etc.go 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916
  1. // Copyright 2020 The Libc Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style
  3. // license that can be found in the LICENSE file.
  4. package libc // import "modernc.org/libc"
  5. import (
  6. "fmt"
  7. "io"
  8. "os"
  9. "path/filepath"
  10. "runtime"
  11. "runtime/debug"
  12. "sort"
  13. "strconv"
  14. "strings"
  15. "sync"
  16. "sync/atomic"
  17. "syscall"
  18. "time"
  19. "unsafe"
  20. "modernc.org/libc/errno"
  21. "modernc.org/libc/signal"
  22. "modernc.org/libc/sys/types"
  23. )
  24. const (
  25. allocatorPageOverhead = 4 * unsafe.Sizeof(int(0))
  26. stackHeaderSize = unsafe.Sizeof(stackHeader{})
  27. stackSegmentSize = 1<<12 - allocatorPageOverhead
  28. uintptrSize = unsafe.Sizeof(uintptr(0))
  29. )
  30. var (
  31. Covered = map[uintptr]struct{}{}
  32. CoveredC = map[string]struct{}{}
  33. fToken uintptr
  34. tid int32
  35. atExit []func()
  36. atExitMu sync.Mutex
  37. signals [signal.NSIG]uintptr
  38. signalsMu sync.Mutex
  39. objectMu sync.Mutex
  40. objects = map[uintptr]interface{}{}
  41. tlsBalance int32
  42. _ = origin
  43. _ = trc
  44. )
  45. func init() {
  46. if n := stackHeaderSize; n%16 != 0 {
  47. panic(fmt.Errorf("internal error: stackHeaderSize %v == %v (mod 16)", n, n%16))
  48. }
  49. }
  50. func origin(skip int) string {
  51. pc, fn, fl, _ := runtime.Caller(skip)
  52. f := runtime.FuncForPC(pc)
  53. var fns string
  54. if f != nil {
  55. fns = f.Name()
  56. if x := strings.LastIndex(fns, "."); x > 0 {
  57. fns = fns[x+1:]
  58. }
  59. }
  60. return fmt.Sprintf("%s:%d:%s", filepath.Base(fn), fl, fns)
  61. }
  62. func trc(s string, args ...interface{}) string { //TODO-
  63. switch {
  64. case s == "":
  65. s = fmt.Sprintf(strings.Repeat("%v ", len(args)), args...)
  66. default:
  67. s = fmt.Sprintf(s, args...)
  68. }
  69. r := fmt.Sprintf("%s: TRC %s", origin(2), s)
  70. fmt.Fprintf(os.Stdout, "%s\n", r)
  71. os.Stdout.Sync()
  72. return r
  73. }
  74. func todo(s string, args ...interface{}) string { //TODO-
  75. switch {
  76. case s == "":
  77. s = fmt.Sprintf(strings.Repeat("%v ", len(args)), args...)
  78. default:
  79. s = fmt.Sprintf(s, args...)
  80. }
  81. r := fmt.Sprintf("%s: TODOTODO %s", origin(2), s) //TODOOK
  82. if dmesgs {
  83. dmesg("%s", r)
  84. }
  85. fmt.Fprintf(os.Stdout, "%s\n", r)
  86. fmt.Fprintf(os.Stdout, "%s\n", debug.Stack()) //TODO-
  87. os.Stdout.Sync()
  88. os.Exit(1)
  89. panic("unrechable")
  90. }
  91. var coverPCs [1]uintptr //TODO not concurrent safe
  92. func Cover() {
  93. runtime.Callers(2, coverPCs[:])
  94. Covered[coverPCs[0]] = struct{}{}
  95. }
  96. func CoverReport(w io.Writer) error {
  97. var a []string
  98. pcs := make([]uintptr, 1)
  99. for pc := range Covered {
  100. pcs[0] = pc
  101. frame, _ := runtime.CallersFrames(pcs).Next()
  102. a = append(a, fmt.Sprintf("%s:%07d:%s", filepath.Base(frame.File), frame.Line, frame.Func.Name()))
  103. }
  104. sort.Strings(a)
  105. _, err := fmt.Fprintf(w, "%s\n", strings.Join(a, "\n"))
  106. return err
  107. }
  108. func CoverC(s string) {
  109. CoveredC[s] = struct{}{}
  110. }
  111. func CoverCReport(w io.Writer) error {
  112. var a []string
  113. for k := range CoveredC {
  114. a = append(a, k)
  115. }
  116. sort.Strings(a)
  117. _, err := fmt.Fprintf(w, "%s\n", strings.Join(a, "\n"))
  118. return err
  119. }
  120. func token() uintptr { return atomic.AddUintptr(&fToken, 1) }
  121. func addObject(o interface{}) uintptr {
  122. t := token()
  123. objectMu.Lock()
  124. objects[t] = o
  125. objectMu.Unlock()
  126. return t
  127. }
  128. func getObject(t uintptr) interface{} {
  129. objectMu.Lock()
  130. o := objects[t]
  131. if o == nil {
  132. panic(todo("", t))
  133. }
  134. objectMu.Unlock()
  135. return o
  136. }
  137. func removeObject(t uintptr) {
  138. objectMu.Lock()
  139. if _, ok := objects[t]; !ok {
  140. panic(todo(""))
  141. }
  142. delete(objects, t)
  143. objectMu.Unlock()
  144. }
  145. func (t *TLS) setErrno(err interface{}) {
  146. if memgrind {
  147. if atomic.SwapInt32(&t.reentryGuard, 1) != 0 {
  148. panic(todo("concurrent use of TLS instance %p", t))
  149. }
  150. defer func() {
  151. if atomic.SwapInt32(&t.reentryGuard, 0) != 1 {
  152. panic(todo("concurrent use of TLS instance %p", t))
  153. }
  154. }()
  155. }
  156. // if dmesgs {
  157. // dmesg("%v: %T(%v)\n%s", origin(1), err, err, debug.Stack())
  158. // }
  159. again:
  160. switch x := err.(type) {
  161. case int:
  162. *(*int32)(unsafe.Pointer(t.errnop)) = int32(x)
  163. case int32:
  164. *(*int32)(unsafe.Pointer(t.errnop)) = x
  165. case *os.PathError:
  166. err = x.Err
  167. goto again
  168. case syscall.Errno:
  169. *(*int32)(unsafe.Pointer(t.errnop)) = int32(x)
  170. case *os.SyscallError:
  171. err = x.Err
  172. goto again
  173. default:
  174. panic(todo("%T", x))
  175. }
  176. }
  177. // Close frees the resources of t.
  178. func (t *TLS) Close() {
  179. t.Free(int(unsafe.Sizeof(int32(0))))
  180. if memgrind {
  181. if t.stackHeaderBalance != 0 {
  182. panic(todo("non zero stack header balance: %d", t.stackHeaderBalance))
  183. }
  184. atomic.AddInt32(&tlsBalance, -1)
  185. }
  186. t.pthreadData.close(t)
  187. *t = TLS{}
  188. }
  189. // Alloc allocates n bytes of thread-local storage. It must be paired with a
  190. // call to t.Free(n), using the same n. The order matters. This is ok:
  191. //
  192. // t.Alloc(11)
  193. // t.Alloc(22)
  194. // t.Free(22)
  195. // t.Free(11)
  196. //
  197. // This is not correct:
  198. //
  199. // t.Alloc(11)
  200. // t.Alloc(22)
  201. // t.Free(11)
  202. // t.Free(22)
  203. func (t *TLS) Alloc(n int) (r uintptr) {
  204. if memgrind {
  205. if atomic.SwapInt32(&t.reentryGuard, 1) != 0 {
  206. panic(todo("concurrent use of TLS instance %p", t))
  207. }
  208. defer func() {
  209. if atomic.SwapInt32(&t.reentryGuard, 0) != 1 {
  210. panic(todo("concurrent use of TLS instance %p", t))
  211. }
  212. }()
  213. }
  214. n += 15
  215. n &^= 15
  216. if t.stack.free >= n {
  217. r = t.stack.sp
  218. t.stack.free -= n
  219. t.stack.sp += uintptr(n)
  220. return r
  221. }
  222. //if we have a next stack
  223. if nstack := t.stack.next; nstack != 0 {
  224. if (*stackHeader)(unsafe.Pointer(nstack)).free >= n {
  225. *(*stackHeader)(unsafe.Pointer(t.stack.page)) = t.stack
  226. t.stack = *(*stackHeader)(unsafe.Pointer(nstack))
  227. r = t.stack.sp
  228. t.stack.free -= n
  229. t.stack.sp += uintptr(n)
  230. return r
  231. }
  232. nstack := *(*stackHeader)(unsafe.Pointer(t.stack.next))
  233. for ; ; nstack = *(*stackHeader)(unsafe.Pointer(nstack.next)) {
  234. if memgrind {
  235. if atomic.AddInt32(&t.stackHeaderBalance, -1) < 0 {
  236. panic(todo("negative stack header balance"))
  237. }
  238. }
  239. Xfree(t, nstack.page)
  240. if nstack.next == 0 {
  241. break
  242. }
  243. }
  244. t.stack.next = 0
  245. }
  246. if t.stack.page != 0 {
  247. *(*stackHeader)(unsafe.Pointer(t.stack.page)) = t.stack
  248. }
  249. rq := n + int(stackHeaderSize)
  250. if rq%int(stackSegmentSize) != 0 {
  251. rq -= rq % int(stackSegmentSize)
  252. rq += int(stackSegmentSize)
  253. }
  254. t.stack.free = rq - int(stackHeaderSize)
  255. t.stack.prev = t.stack.page
  256. rq += 15
  257. rq &^= 15
  258. t.stack.page = Xmalloc(t, types.Size_t(rq))
  259. if t.stack.page == 0 {
  260. panic("OOM")
  261. }
  262. if memgrind {
  263. atomic.AddInt32(&t.stackHeaderBalance, 1)
  264. }
  265. t.stack.sp = t.stack.page + stackHeaderSize
  266. r = t.stack.sp
  267. t.stack.free -= n
  268. t.stack.sp += uintptr(n)
  269. if t.stack.prev != 0 {
  270. (*stackHeader)(unsafe.Pointer(t.stack.prev)).next = t.stack.page
  271. }
  272. return r
  273. }
  274. // this declares how many stack frames are kept alive before being freed
  275. const stackFrameKeepalive = 2
  276. // Free deallocates n bytes of thread-local storage. See TLS.Alloc for details
  277. // on correct usage.
  278. func (t *TLS) Free(n int) {
  279. if memgrind {
  280. if atomic.SwapInt32(&t.reentryGuard, 1) != 0 {
  281. panic(todo("concurrent use of TLS instance %p", t))
  282. }
  283. defer func() {
  284. if atomic.SwapInt32(&t.reentryGuard, 0) != 1 {
  285. panic(todo("concurrent use of TLS instance %p", t))
  286. }
  287. }()
  288. }
  289. n += 15
  290. n &^= 15
  291. t.stack.free += n
  292. t.stack.sp -= uintptr(n)
  293. if t.stack.sp != t.stack.page+stackHeaderSize {
  294. return
  295. }
  296. nstack := t.stack
  297. //if we are the first one, just free all of them
  298. if t.stack.prev == 0 {
  299. for ; ; nstack = *(*stackHeader)(unsafe.Pointer(nstack.next)) {
  300. if memgrind {
  301. if atomic.AddInt32(&t.stackHeaderBalance, -1) < 0 {
  302. panic(todo("negative stack header balance"))
  303. }
  304. }
  305. Xfree(t, nstack.page)
  306. if nstack.next == 0 {
  307. break
  308. }
  309. }
  310. t.stack = stackHeader{}
  311. return
  312. }
  313. //look if we are in the last n stackframes (n=stackFrameKeepalive)
  314. //if we find something just return and set the current stack pointer to the previous one
  315. for i := 0; i < stackFrameKeepalive; i++ {
  316. if nstack.next == 0 {
  317. *((*stackHeader)(unsafe.Pointer(t.stack.page))) = t.stack
  318. t.stack = *(*stackHeader)(unsafe.Pointer(t.stack.prev))
  319. return
  320. }
  321. nstack = *(*stackHeader)(unsafe.Pointer(nstack.next))
  322. }
  323. //else only free the last
  324. if memgrind {
  325. if atomic.AddInt32(&t.stackHeaderBalance, -1) < 0 {
  326. panic(todo("negative stack header balance"))
  327. }
  328. }
  329. Xfree(t, nstack.page)
  330. (*stackHeader)(unsafe.Pointer(nstack.prev)).next = 0
  331. *(*stackHeader)(unsafe.Pointer(t.stack.page)) = t.stack
  332. t.stack = *(*stackHeader)(unsafe.Pointer(t.stack.prev))
  333. }
  334. type stackHeader struct {
  335. free int // bytes left in page
  336. page uintptr // stack page
  337. prev uintptr // prev stack page = prev stack header
  338. next uintptr // next stack page = next stack header
  339. sp uintptr // next allocation address
  340. _ stackHeaderPadding
  341. }
  342. func cString(t *TLS, s string) uintptr { //TODO-
  343. n := len(s)
  344. p := Xmalloc(t, types.Size_t(n)+1)
  345. if p == 0 {
  346. panic("OOM")
  347. }
  348. copy((*RawMem)(unsafe.Pointer(p))[:n:n], s)
  349. *(*byte)(unsafe.Pointer(p + uintptr(n))) = 0
  350. return p
  351. }
  352. // VaList fills a varargs list at p with args and returns p. The list must
  353. // have been allocated by caller and it must not be in Go managed memory, ie.
  354. // it must be pinned. Caller is responsible for freeing the list.
  355. //
  356. // Individual arguments must be one of int, uint, int32, uint32, int64, uint64,
  357. // float64, uintptr or Intptr. Other types will panic.
  358. //
  359. // This function supports code generated by ccgo/v3. For manually constructed
  360. // var args it's recommended to use the NewVaList function instead.
  361. //
  362. // Note: The C translated to Go varargs ABI alignment for all types is 8 on all
  363. // architectures.
  364. func VaList(p uintptr, args ...interface{}) (r uintptr) {
  365. if p&7 != 0 {
  366. panic("internal error")
  367. }
  368. r = p
  369. for _, v := range args {
  370. switch x := v.(type) {
  371. case int:
  372. *(*int64)(unsafe.Pointer(p)) = int64(x)
  373. case int32:
  374. *(*int64)(unsafe.Pointer(p)) = int64(x)
  375. case int64:
  376. *(*int64)(unsafe.Pointer(p)) = x
  377. case uint:
  378. *(*uint64)(unsafe.Pointer(p)) = uint64(x)
  379. case uint16:
  380. *(*uint64)(unsafe.Pointer(p)) = uint64(x)
  381. case uint32:
  382. *(*uint64)(unsafe.Pointer(p)) = uint64(x)
  383. case uint64:
  384. *(*uint64)(unsafe.Pointer(p)) = x
  385. case float64:
  386. *(*float64)(unsafe.Pointer(p)) = x
  387. case uintptr:
  388. *(*uintptr)(unsafe.Pointer(p)) = x
  389. default:
  390. panic(todo("invalid VaList argument type: %T", x))
  391. }
  392. p += 8
  393. }
  394. return r
  395. }
  396. // NewVaListN returns a newly allocated va_list for n items. The caller of
  397. // NewVaListN is responsible for freeing the va_list.
  398. func NewVaListN(n int) (va_list uintptr) {
  399. return Xmalloc(nil, types.Size_t(8*n))
  400. }
  401. // NewVaList is like VaList but automatically allocates the correct amount of
  402. // memory for all of the items in args.
  403. //
  404. // The va_list return value is used to pass the constructed var args to var
  405. // args accepting functions. The caller of NewVaList is responsible for freeing
  406. // the va_list.
  407. func NewVaList(args ...interface{}) (va_list uintptr) {
  408. return VaList(NewVaListN(len(args)), args...)
  409. }
  410. func VaInt32(app *uintptr) int32 {
  411. ap := *(*uintptr)(unsafe.Pointer(app))
  412. if ap == 0 {
  413. return 0
  414. }
  415. ap = roundup(ap, 8)
  416. v := int32(*(*int64)(unsafe.Pointer(ap)))
  417. ap += 8
  418. *(*uintptr)(unsafe.Pointer(app)) = ap
  419. return v
  420. }
  421. func VaUint32(app *uintptr) uint32 {
  422. ap := *(*uintptr)(unsafe.Pointer(app))
  423. if ap == 0 {
  424. return 0
  425. }
  426. ap = roundup(ap, 8)
  427. v := uint32(*(*uint64)(unsafe.Pointer(ap)))
  428. ap += 8
  429. *(*uintptr)(unsafe.Pointer(app)) = ap
  430. return v
  431. }
  432. func VaInt64(app *uintptr) int64 {
  433. ap := *(*uintptr)(unsafe.Pointer(app))
  434. if ap == 0 {
  435. return 0
  436. }
  437. ap = roundup(ap, 8)
  438. v := *(*int64)(unsafe.Pointer(ap))
  439. ap += 8
  440. *(*uintptr)(unsafe.Pointer(app)) = ap
  441. return v
  442. }
  443. func VaUint64(app *uintptr) uint64 {
  444. ap := *(*uintptr)(unsafe.Pointer(app))
  445. if ap == 0 {
  446. return 0
  447. }
  448. ap = roundup(ap, 8)
  449. v := *(*uint64)(unsafe.Pointer(ap))
  450. ap += 8
  451. *(*uintptr)(unsafe.Pointer(app)) = ap
  452. return v
  453. }
  454. func VaFloat32(app *uintptr) float32 {
  455. ap := *(*uintptr)(unsafe.Pointer(app))
  456. if ap == 0 {
  457. return 0
  458. }
  459. ap = roundup(ap, 8)
  460. v := *(*float64)(unsafe.Pointer(ap))
  461. ap += 8
  462. *(*uintptr)(unsafe.Pointer(app)) = ap
  463. return float32(v)
  464. }
  465. func VaFloat64(app *uintptr) float64 {
  466. ap := *(*uintptr)(unsafe.Pointer(app))
  467. if ap == 0 {
  468. return 0
  469. }
  470. ap = roundup(ap, 8)
  471. v := *(*float64)(unsafe.Pointer(ap))
  472. ap += 8
  473. *(*uintptr)(unsafe.Pointer(app)) = ap
  474. return v
  475. }
  476. func VaUintptr(app *uintptr) uintptr {
  477. ap := *(*uintptr)(unsafe.Pointer(app))
  478. if ap == 0 {
  479. return 0
  480. }
  481. ap = roundup(ap, 8)
  482. v := *(*uintptr)(unsafe.Pointer(ap))
  483. ap += 8
  484. *(*uintptr)(unsafe.Pointer(app)) = ap
  485. return v
  486. }
  487. func roundup(n, to uintptr) uintptr {
  488. if r := n % to; r != 0 {
  489. return n + to - r
  490. }
  491. return n
  492. }
  493. func GoString(s uintptr) string {
  494. if s == 0 {
  495. return ""
  496. }
  497. var buf []byte
  498. for {
  499. b := *(*byte)(unsafe.Pointer(s))
  500. if b == 0 {
  501. return string(buf)
  502. }
  503. buf = append(buf, b)
  504. s++
  505. }
  506. }
  507. // GoBytes returns a byte slice from a C char* having length len bytes.
  508. func GoBytes(s uintptr, len int) []byte {
  509. if len == 0 {
  510. return nil
  511. }
  512. return (*RawMem)(unsafe.Pointer(s))[:len:len]
  513. }
  514. func Bool32(b bool) int32 {
  515. if b {
  516. return 1
  517. }
  518. return 0
  519. }
  520. func Bool64(b bool) int64 {
  521. if b {
  522. return 1
  523. }
  524. return 0
  525. }
  526. type sorter struct {
  527. len int
  528. base uintptr
  529. sz uintptr
  530. f func(*TLS, uintptr, uintptr) int32
  531. t *TLS
  532. }
  533. func (s *sorter) Len() int { return s.len }
  534. func (s *sorter) Less(i, j int) bool {
  535. return s.f(s.t, s.base+uintptr(i)*s.sz, s.base+uintptr(j)*s.sz) < 0
  536. }
  537. func (s *sorter) Swap(i, j int) {
  538. p := uintptr(s.base + uintptr(i)*s.sz)
  539. q := uintptr(s.base + uintptr(j)*s.sz)
  540. for i := 0; i < int(s.sz); i++ {
  541. *(*byte)(unsafe.Pointer(p)), *(*byte)(unsafe.Pointer(q)) = *(*byte)(unsafe.Pointer(q)), *(*byte)(unsafe.Pointer(p))
  542. p++
  543. q++
  544. }
  545. }
  546. func CString(s string) (uintptr, error) {
  547. n := len(s)
  548. p := Xmalloc(nil, types.Size_t(n)+1)
  549. if p == 0 {
  550. return 0, fmt.Errorf("CString: cannot allocate %d bytes", n+1)
  551. }
  552. copy((*RawMem)(unsafe.Pointer(p))[:n:n], s)
  553. *(*byte)(unsafe.Pointer(p + uintptr(n))) = 0
  554. return p, nil
  555. }
  556. func GetEnviron() (r []string) {
  557. for p := Environ(); ; p += unsafe.Sizeof(p) {
  558. q := *(*uintptr)(unsafe.Pointer(p))
  559. if q == 0 {
  560. return r
  561. }
  562. r = append(r, GoString(q))
  563. }
  564. }
  565. func strToUint64(t *TLS, s uintptr, base int32) (seenDigits, neg bool, next uintptr, n uint64, err int32) {
  566. var c byte
  567. out:
  568. for {
  569. c = *(*byte)(unsafe.Pointer(s))
  570. switch c {
  571. case ' ', '\t', '\n', '\r', '\v', '\f':
  572. s++
  573. case '+':
  574. s++
  575. break out
  576. case '-':
  577. s++
  578. neg = true
  579. break out
  580. default:
  581. break out
  582. }
  583. }
  584. for {
  585. c = *(*byte)(unsafe.Pointer(s))
  586. var digit uint64
  587. switch base {
  588. case 10:
  589. switch {
  590. case c >= '0' && c <= '9':
  591. seenDigits = true
  592. digit = uint64(c) - '0'
  593. default:
  594. return seenDigits, neg, s, n, 0
  595. }
  596. case 16:
  597. if c >= 'A' && c <= 'F' {
  598. c = c + ('a' - 'A')
  599. }
  600. switch {
  601. case c >= '0' && c <= '9':
  602. seenDigits = true
  603. digit = uint64(c) - '0'
  604. case c >= 'a' && c <= 'f':
  605. seenDigits = true
  606. digit = uint64(c) - 'a' + 10
  607. default:
  608. return seenDigits, neg, s, n, 0
  609. }
  610. default:
  611. panic(todo("", base))
  612. }
  613. n0 := n
  614. n = uint64(base)*n + digit
  615. if n < n0 { // overflow
  616. return seenDigits, neg, s, n0, errno.ERANGE
  617. }
  618. s++
  619. }
  620. }
  621. func strToFloatt64(t *TLS, s uintptr, bits int) (n float64, errno int32) {
  622. var b []byte
  623. var neg bool
  624. defer func() {
  625. var err error
  626. if n, err = strconv.ParseFloat(string(b), bits); err != nil {
  627. panic(todo(""))
  628. }
  629. if neg {
  630. n = -n
  631. }
  632. }()
  633. var c byte
  634. out:
  635. for {
  636. c = *(*byte)(unsafe.Pointer(s))
  637. switch c {
  638. case ' ', '\t', '\n', '\r', '\v', '\f':
  639. s++
  640. case '+':
  641. s++
  642. break out
  643. case '-':
  644. s++
  645. neg = true
  646. break out
  647. default:
  648. break out
  649. }
  650. }
  651. for {
  652. c = *(*byte)(unsafe.Pointer(s))
  653. switch {
  654. case c >= '0' && c <= '9':
  655. b = append(b, c)
  656. case c == '.':
  657. b = append(b, c)
  658. s++
  659. for {
  660. c = *(*byte)(unsafe.Pointer(s))
  661. switch {
  662. case c >= '0' && c <= '9':
  663. b = append(b, c)
  664. case c == 'e' || c == 'E':
  665. b = append(b, c)
  666. s++
  667. for {
  668. c = *(*byte)(unsafe.Pointer(s))
  669. switch {
  670. case c == '+' || c == '-':
  671. b = append(b, c)
  672. s++
  673. for {
  674. c = *(*byte)(unsafe.Pointer(s))
  675. switch {
  676. case c >= '0' && c <= '9':
  677. b = append(b, c)
  678. default:
  679. return
  680. }
  681. s++
  682. }
  683. default:
  684. panic(todo("%q %q", b, string(c)))
  685. }
  686. }
  687. default:
  688. return
  689. }
  690. s++
  691. }
  692. default:
  693. panic(todo("%q %q", b, string(c)))
  694. }
  695. s++
  696. }
  697. }
  698. func parseZone(s string) (name string, off int) {
  699. _, name, off, _ = parseZoneOffset(s, false)
  700. return name, off
  701. }
  702. func parseZoneOffset(s string, offOpt bool) (string, string, int, bool) {
  703. s0 := s
  704. name := s
  705. for len(s) != 0 {
  706. switch c := s[0]; {
  707. case c >= 'A' && c <= 'Z', c >= 'a' && c <= 'z', c == '_', c == '/':
  708. s = s[1:]
  709. default:
  710. name = name[:len(name)-len(s)]
  711. if len(name) < 3 {
  712. panic(todo("%q", s0))
  713. }
  714. if offOpt {
  715. if len(s) == 0 {
  716. return "", name, 0, false
  717. }
  718. if c := s[0]; (c < '0' || c > '9') && c != '+' && c != '-' {
  719. return s, name, 0, false
  720. }
  721. }
  722. s, off := parseOffset(s)
  723. return s, name, off, true
  724. }
  725. }
  726. return "", s0, 0, true
  727. }
  728. // [+|-]hh[:mm[:ss]]
  729. func parseOffset(s string) (string, int) {
  730. if len(s) == 0 {
  731. panic(todo(""))
  732. }
  733. k := 1
  734. switch s[0] {
  735. case '+':
  736. // nop
  737. s = s[1:]
  738. case '-':
  739. k = -1
  740. s = s[1:]
  741. }
  742. s, hh, ok := parseUint(s)
  743. if !ok {
  744. panic(todo(""))
  745. }
  746. n := hh * 3600
  747. if len(s) == 0 || s[0] != ':' {
  748. return s, k * n
  749. }
  750. s = s[1:] // ':'
  751. if len(s) == 0 {
  752. panic(todo(""))
  753. }
  754. s, mm, ok := parseUint(s)
  755. if !ok {
  756. panic(todo(""))
  757. }
  758. n += mm * 60
  759. if len(s) == 0 || s[0] != ':' {
  760. return s, k * n
  761. }
  762. s = s[1:] // ':'
  763. if len(s) == 0 {
  764. panic(todo(""))
  765. }
  766. s, ss, _ := parseUint(s)
  767. return s, k * (n + ss)
  768. }
  769. func parseUint(s string) (string, int, bool) {
  770. var ok bool
  771. var r int
  772. for len(s) != 0 {
  773. switch c := s[0]; {
  774. case c >= '0' && c <= '9':
  775. ok = true
  776. r0 := r
  777. r = 10*r + int(c) - '0'
  778. if r < r0 {
  779. panic(todo(""))
  780. }
  781. s = s[1:]
  782. default:
  783. return s, r, ok
  784. }
  785. }
  786. return s, r, ok
  787. }
  788. // https://stackoverflow.com/a/53052382
  789. //
  790. // isTimeDST returns true if time t occurs within daylight saving time
  791. // for its time zone.
  792. func isTimeDST(t time.Time) bool {
  793. // If the most recent (within the last year) clock change
  794. // was forward then assume the change was from std to dst.
  795. hh, mm, _ := t.UTC().Clock()
  796. tClock := hh*60 + mm
  797. for m := -1; m > -12; m-- {
  798. // assume dst lasts for at least one month
  799. hh, mm, _ := t.AddDate(0, m, 0).UTC().Clock()
  800. clock := hh*60 + mm
  801. if clock != tClock {
  802. return clock > tClock
  803. }
  804. }
  805. // assume no dst
  806. return false
  807. }