source: code/trunk/vendor/modernc.org/libc/etc.go@ 823

Last change on this file since 823 was 822, checked in by yakumo.izuru, 22 months ago

Prefer immortal.run over runit and rc.d, use vendored modules
for convenience.

Signed-off-by: Izuru Yakumo <yakumo.izuru@…>

File size: 18.1 KB
Line 
1// Copyright 2020 The Libc Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package libc // import "modernc.org/libc"
6
7import (
8 "fmt"
9 "io"
10 "os"
11 "path/filepath"
12 "runtime"
13 "runtime/debug"
14 "sort"
15 "strconv"
16 "strings"
17 "sync"
18 "sync/atomic"
19 "syscall"
20 "time"
21 "unsafe"
22
23 "modernc.org/libc/errno"
24 "modernc.org/libc/signal"
25 "modernc.org/libc/sys/types"
26)
27
28const (
29 allocatorPageOverhead = 4 * unsafe.Sizeof(int(0))
30 stackHeaderSize = unsafe.Sizeof(stackHeader{})
31 stackSegmentSize = 1<<12 - allocatorPageOverhead
32 uintptrSize = unsafe.Sizeof(uintptr(0))
33)
34
35var (
36 Covered = map[uintptr]struct{}{}
37 CoveredC = map[string]struct{}{}
38 fToken uintptr
39 tid int32
40
41 atExit []func()
42 atExitMu sync.Mutex
43
44 signals [signal.NSIG]uintptr
45 signalsMu sync.Mutex
46
47 objectMu sync.Mutex
48 objects = map[uintptr]interface{}{}
49
50 tlsBalance int32
51
52 _ = origin
53 _ = trc
54)
55
56func init() {
57 if n := stackHeaderSize; n%16 != 0 {
58 panic(fmt.Errorf("internal error: stackHeaderSize %v == %v (mod 16)", n, n%16))
59 }
60}
61
62func origin(skip int) string {
63 pc, fn, fl, _ := runtime.Caller(skip)
64 f := runtime.FuncForPC(pc)
65 var fns string
66 if f != nil {
67 fns = f.Name()
68 if x := strings.LastIndex(fns, "."); x > 0 {
69 fns = fns[x+1:]
70 }
71 }
72 return fmt.Sprintf("%s:%d:%s", filepath.Base(fn), fl, fns)
73}
74
75func trc(s string, args ...interface{}) string { //TODO-
76 switch {
77 case s == "":
78 s = fmt.Sprintf(strings.Repeat("%v ", len(args)), args...)
79 default:
80 s = fmt.Sprintf(s, args...)
81 }
82 r := fmt.Sprintf("%s: TRC %s", origin(2), s)
83 fmt.Fprintf(os.Stdout, "%s\n", r)
84 os.Stdout.Sync()
85 return r
86}
87
88func todo(s string, args ...interface{}) string { //TODO-
89 switch {
90 case s == "":
91 s = fmt.Sprintf(strings.Repeat("%v ", len(args)), args...)
92 default:
93 s = fmt.Sprintf(s, args...)
94 }
95 r := fmt.Sprintf("%s: TODOTODO %s", origin(2), s) //TODOOK
96 if dmesgs {
97 dmesg("%s", r)
98 }
99 fmt.Fprintf(os.Stdout, "%s\n", r)
100 fmt.Fprintf(os.Stdout, "%s\n", debug.Stack()) //TODO-
101 os.Stdout.Sync()
102 os.Exit(1)
103 panic("unrechable")
104}
105
106var coverPCs [1]uintptr //TODO not concurrent safe
107
108func Cover() {
109 runtime.Callers(2, coverPCs[:])
110 Covered[coverPCs[0]] = struct{}{}
111}
112
113func CoverReport(w io.Writer) error {
114 var a []string
115 pcs := make([]uintptr, 1)
116 for pc := range Covered {
117 pcs[0] = pc
118 frame, _ := runtime.CallersFrames(pcs).Next()
119 a = append(a, fmt.Sprintf("%s:%07d:%s", filepath.Base(frame.File), frame.Line, frame.Func.Name()))
120 }
121 sort.Strings(a)
122 _, err := fmt.Fprintf(w, "%s\n", strings.Join(a, "\n"))
123 return err
124}
125
126func CoverC(s string) {
127 CoveredC[s] = struct{}{}
128}
129
130func CoverCReport(w io.Writer) error {
131 var a []string
132 for k := range CoveredC {
133 a = append(a, k)
134 }
135 sort.Strings(a)
136 _, err := fmt.Fprintf(w, "%s\n", strings.Join(a, "\n"))
137 return err
138}
139
140func token() uintptr { return atomic.AddUintptr(&fToken, 1) }
141
142func addObject(o interface{}) uintptr {
143 t := token()
144 objectMu.Lock()
145 objects[t] = o
146 objectMu.Unlock()
147 return t
148}
149
150func getObject(t uintptr) interface{} {
151 objectMu.Lock()
152 o := objects[t]
153 if o == nil {
154 panic(todo("", t))
155 }
156
157 objectMu.Unlock()
158 return o
159}
160
161func removeObject(t uintptr) {
162 objectMu.Lock()
163 if _, ok := objects[t]; !ok {
164 panic(todo(""))
165 }
166
167 delete(objects, t)
168 objectMu.Unlock()
169}
170
171func (t *TLS) setErrno(err interface{}) {
172 if memgrind {
173 if atomic.SwapInt32(&t.reentryGuard, 1) != 0 {
174 panic(todo("concurrent use of TLS instance %p", t))
175 }
176
177 defer func() {
178 if atomic.SwapInt32(&t.reentryGuard, 0) != 1 {
179 panic(todo("concurrent use of TLS instance %p", t))
180 }
181 }()
182 }
183 // if dmesgs {
184 // dmesg("%v: %T(%v)\n%s", origin(1), err, err, debug.Stack())
185 // }
186again:
187 switch x := err.(type) {
188 case int:
189 *(*int32)(unsafe.Pointer(t.errnop)) = int32(x)
190 case int32:
191 *(*int32)(unsafe.Pointer(t.errnop)) = x
192 case *os.PathError:
193 err = x.Err
194 goto again
195 case syscall.Errno:
196 *(*int32)(unsafe.Pointer(t.errnop)) = int32(x)
197 case *os.SyscallError:
198 err = x.Err
199 goto again
200 default:
201 panic(todo("%T", x))
202 }
203}
204
205// Close frees the resources of t.
206func (t *TLS) Close() {
207 t.Free(int(unsafe.Sizeof(int32(0))))
208 if memgrind {
209 if t.stackHeaderBalance != 0 {
210 panic(todo("non zero stack header balance: %d", t.stackHeaderBalance))
211 }
212
213 atomic.AddInt32(&tlsBalance, -1)
214 }
215 t.pthreadData.close(t)
216 *t = TLS{}
217}
218
219// Alloc allocates n bytes of thread-local storage. It must be paired with a
220// call to t.Free(n), using the same n. The order matters. This is ok:
221//
222// t.Alloc(11)
223// t.Alloc(22)
224// t.Free(22)
225// t.Free(11)
226//
227// This is not correct:
228//
229// t.Alloc(11)
230// t.Alloc(22)
231// t.Free(11)
232// t.Free(22)
233func (t *TLS) Alloc(n int) (r uintptr) {
234 if memgrind {
235 if atomic.SwapInt32(&t.reentryGuard, 1) != 0 {
236 panic(todo("concurrent use of TLS instance %p", t))
237 }
238
239 defer func() {
240 if atomic.SwapInt32(&t.reentryGuard, 0) != 1 {
241 panic(todo("concurrent use of TLS instance %p", t))
242 }
243 }()
244 }
245 n += 15
246 n &^= 15
247 if t.stack.free >= n {
248 r = t.stack.sp
249 t.stack.free -= n
250 t.stack.sp += uintptr(n)
251 return r
252 }
253 //if we have a next stack
254 if nstack := t.stack.next; nstack != 0 {
255 if (*stackHeader)(unsafe.Pointer(nstack)).free >= n {
256 *(*stackHeader)(unsafe.Pointer(t.stack.page)) = t.stack
257 t.stack = *(*stackHeader)(unsafe.Pointer(nstack))
258 r = t.stack.sp
259 t.stack.free -= n
260 t.stack.sp += uintptr(n)
261 return r
262 }
263 nstack := *(*stackHeader)(unsafe.Pointer(t.stack.next))
264 for ; ; nstack = *(*stackHeader)(unsafe.Pointer(nstack.next)) {
265 if memgrind {
266 if atomic.AddInt32(&t.stackHeaderBalance, -1) < 0 {
267 panic(todo("negative stack header balance"))
268 }
269 }
270 Xfree(t, nstack.page)
271 if nstack.next == 0 {
272 break
273 }
274 }
275 t.stack.next = 0
276 }
277
278 if t.stack.page != 0 {
279 *(*stackHeader)(unsafe.Pointer(t.stack.page)) = t.stack
280 }
281
282 rq := n + int(stackHeaderSize)
283 if rq%int(stackSegmentSize) != 0 {
284 rq -= rq % int(stackSegmentSize)
285 rq += int(stackSegmentSize)
286 }
287 t.stack.free = rq - int(stackHeaderSize)
288 t.stack.prev = t.stack.page
289
290 rq += 15
291 rq &^= 15
292 t.stack.page = Xmalloc(t, types.Size_t(rq))
293 if t.stack.page == 0 {
294 panic("OOM")
295 }
296
297 if memgrind {
298 atomic.AddInt32(&t.stackHeaderBalance, 1)
299 }
300 t.stack.sp = t.stack.page + stackHeaderSize
301
302 r = t.stack.sp
303 t.stack.free -= n
304 t.stack.sp += uintptr(n)
305 if t.stack.prev != 0 {
306 (*stackHeader)(unsafe.Pointer(t.stack.prev)).next = t.stack.page
307 }
308
309 return r
310}
311
312// this declares how many stack frames are kept alive before being freed
313const stackFrameKeepalive = 2
314
315// Free deallocates n bytes of thread-local storage. See TLS.Alloc for details
316// on correct usage.
317func (t *TLS) Free(n int) {
318 if memgrind {
319 if atomic.SwapInt32(&t.reentryGuard, 1) != 0 {
320 panic(todo("concurrent use of TLS instance %p", t))
321 }
322
323 defer func() {
324 if atomic.SwapInt32(&t.reentryGuard, 0) != 1 {
325 panic(todo("concurrent use of TLS instance %p", t))
326 }
327 }()
328 }
329 n += 15
330 n &^= 15
331 t.stack.free += n
332 t.stack.sp -= uintptr(n)
333 if t.stack.sp != t.stack.page+stackHeaderSize {
334 return
335 }
336
337 nstack := t.stack
338
339 //if we are the first one, just free all of them
340 if t.stack.prev == 0 {
341 for ; ; nstack = *(*stackHeader)(unsafe.Pointer(nstack.next)) {
342 if memgrind {
343 if atomic.AddInt32(&t.stackHeaderBalance, -1) < 0 {
344 panic(todo("negative stack header balance"))
345 }
346 }
347 Xfree(t, nstack.page)
348 if nstack.next == 0 {
349 break
350 }
351 }
352 t.stack = stackHeader{}
353 return
354 }
355
356 //look if we are in the last n stackframes (n=stackFrameKeepalive)
357 //if we find something just return and set the current stack pointer to the previous one
358 for i := 0; i < stackFrameKeepalive; i++ {
359 if nstack.next == 0 {
360 *((*stackHeader)(unsafe.Pointer(t.stack.page))) = t.stack
361 t.stack = *(*stackHeader)(unsafe.Pointer(t.stack.prev))
362 return
363 }
364 nstack = *(*stackHeader)(unsafe.Pointer(nstack.next))
365 }
366
367 //else only free the last
368 if memgrind {
369 if atomic.AddInt32(&t.stackHeaderBalance, -1) < 0 {
370 panic(todo("negative stack header balance"))
371 }
372 }
373 Xfree(t, nstack.page)
374 (*stackHeader)(unsafe.Pointer(nstack.prev)).next = 0
375 *(*stackHeader)(unsafe.Pointer(t.stack.page)) = t.stack
376 t.stack = *(*stackHeader)(unsafe.Pointer(t.stack.prev))
377}
378
379type stackHeader struct {
380 free int // bytes left in page
381 page uintptr // stack page
382 prev uintptr // prev stack page = prev stack header
383 next uintptr // next stack page = next stack header
384 sp uintptr // next allocation address
385 _ stackHeaderPadding
386}
387
388func cString(t *TLS, s string) uintptr { //TODO-
389 n := len(s)
390 p := Xmalloc(t, types.Size_t(n)+1)
391 if p == 0 {
392 panic("OOM")
393 }
394
395 copy((*RawMem)(unsafe.Pointer(p))[:n:n], s)
396 *(*byte)(unsafe.Pointer(p + uintptr(n))) = 0
397 return p
398}
399
400// VaList fills a varargs list at p with args and returns p. The list must
401// have been allocated by caller and it must not be in Go managed memory, ie.
402// it must be pinned. Caller is responsible for freeing the list.
403//
404// Individual arguments must be one of int, uint, int32, uint32, int64, uint64,
405// float64, uintptr or Intptr. Other types will panic.
406//
407// This function supports code generated by ccgo/v3. For manually constructed
408// var args it's recommended to use the NewVaList function instead.
409//
410// Note: The C translated to Go varargs ABI alignment for all types is 8 on all
411// architectures.
412func VaList(p uintptr, args ...interface{}) (r uintptr) {
413 if p&7 != 0 {
414 panic("internal error")
415 }
416
417 r = p
418 for _, v := range args {
419 switch x := v.(type) {
420 case int:
421 *(*int64)(unsafe.Pointer(p)) = int64(x)
422 case int32:
423 *(*int64)(unsafe.Pointer(p)) = int64(x)
424 case int64:
425 *(*int64)(unsafe.Pointer(p)) = x
426 case uint:
427 *(*uint64)(unsafe.Pointer(p)) = uint64(x)
428 case uint16:
429 *(*uint64)(unsafe.Pointer(p)) = uint64(x)
430 case uint32:
431 *(*uint64)(unsafe.Pointer(p)) = uint64(x)
432 case uint64:
433 *(*uint64)(unsafe.Pointer(p)) = x
434 case float64:
435 *(*float64)(unsafe.Pointer(p)) = x
436 case uintptr:
437 *(*uintptr)(unsafe.Pointer(p)) = x
438 default:
439 panic(todo("invalid VaList argument type: %T", x))
440 }
441 p += 8
442 }
443 return r
444}
445
446// NewVaListN returns a newly allocated va_list for n items. The caller of
447// NewVaListN is responsible for freeing the va_list.
448func NewVaListN(n int) (va_list uintptr) {
449 return Xmalloc(nil, types.Size_t(8*n))
450}
451
452// NewVaList is like VaList but automatically allocates the correct amount of
453// memory for all of the items in args.
454//
455// The va_list return value is used to pass the constructed var args to var
456// args accepting functions. The caller of NewVaList is responsible for freeing
457// the va_list.
458func NewVaList(args ...interface{}) (va_list uintptr) {
459 return VaList(NewVaListN(len(args)), args...)
460}
461
462func VaInt32(app *uintptr) int32 {
463 ap := *(*uintptr)(unsafe.Pointer(app))
464 if ap == 0 {
465 return 0
466 }
467
468 ap = roundup(ap, 8)
469 v := int32(*(*int64)(unsafe.Pointer(ap)))
470 ap += 8
471 *(*uintptr)(unsafe.Pointer(app)) = ap
472 return v
473}
474
475func VaUint32(app *uintptr) uint32 {
476 ap := *(*uintptr)(unsafe.Pointer(app))
477 if ap == 0 {
478 return 0
479 }
480
481 ap = roundup(ap, 8)
482 v := uint32(*(*uint64)(unsafe.Pointer(ap)))
483 ap += 8
484 *(*uintptr)(unsafe.Pointer(app)) = ap
485 return v
486}
487
488func VaInt64(app *uintptr) int64 {
489 ap := *(*uintptr)(unsafe.Pointer(app))
490 if ap == 0 {
491 return 0
492 }
493
494 ap = roundup(ap, 8)
495 v := *(*int64)(unsafe.Pointer(ap))
496 ap += 8
497 *(*uintptr)(unsafe.Pointer(app)) = ap
498 return v
499}
500
501func VaUint64(app *uintptr) uint64 {
502 ap := *(*uintptr)(unsafe.Pointer(app))
503 if ap == 0 {
504 return 0
505 }
506
507 ap = roundup(ap, 8)
508 v := *(*uint64)(unsafe.Pointer(ap))
509 ap += 8
510 *(*uintptr)(unsafe.Pointer(app)) = ap
511 return v
512}
513
514func VaFloat32(app *uintptr) float32 {
515 ap := *(*uintptr)(unsafe.Pointer(app))
516 if ap == 0 {
517 return 0
518 }
519
520 ap = roundup(ap, 8)
521 v := *(*float64)(unsafe.Pointer(ap))
522 ap += 8
523 *(*uintptr)(unsafe.Pointer(app)) = ap
524 return float32(v)
525}
526
527func VaFloat64(app *uintptr) float64 {
528 ap := *(*uintptr)(unsafe.Pointer(app))
529 if ap == 0 {
530 return 0
531 }
532
533 ap = roundup(ap, 8)
534 v := *(*float64)(unsafe.Pointer(ap))
535 ap += 8
536 *(*uintptr)(unsafe.Pointer(app)) = ap
537 return v
538}
539
540func VaUintptr(app *uintptr) uintptr {
541 ap := *(*uintptr)(unsafe.Pointer(app))
542 if ap == 0 {
543 return 0
544 }
545
546 ap = roundup(ap, 8)
547 v := *(*uintptr)(unsafe.Pointer(ap))
548 ap += 8
549 *(*uintptr)(unsafe.Pointer(app)) = ap
550 return v
551}
552
553func roundup(n, to uintptr) uintptr {
554 if r := n % to; r != 0 {
555 return n + to - r
556 }
557
558 return n
559}
560
561func GoString(s uintptr) string {
562 if s == 0 {
563 return ""
564 }
565
566 var buf []byte
567 for {
568 b := *(*byte)(unsafe.Pointer(s))
569 if b == 0 {
570 return string(buf)
571 }
572
573 buf = append(buf, b)
574 s++
575 }
576}
577
578// GoBytes returns a byte slice from a C char* having length len bytes.
579func GoBytes(s uintptr, len int) []byte {
580 if len == 0 {
581 return nil
582 }
583
584 return (*RawMem)(unsafe.Pointer(s))[:len:len]
585}
586
587func Bool32(b bool) int32 {
588 if b {
589 return 1
590 }
591
592 return 0
593}
594
595func Bool64(b bool) int64 {
596 if b {
597 return 1
598 }
599
600 return 0
601}
602
603type sorter struct {
604 len int
605 base uintptr
606 sz uintptr
607 f func(*TLS, uintptr, uintptr) int32
608 t *TLS
609}
610
611func (s *sorter) Len() int { return s.len }
612
613func (s *sorter) Less(i, j int) bool {
614 return s.f(s.t, s.base+uintptr(i)*s.sz, s.base+uintptr(j)*s.sz) < 0
615}
616
617func (s *sorter) Swap(i, j int) {
618 p := uintptr(s.base + uintptr(i)*s.sz)
619 q := uintptr(s.base + uintptr(j)*s.sz)
620 for i := 0; i < int(s.sz); i++ {
621 *(*byte)(unsafe.Pointer(p)), *(*byte)(unsafe.Pointer(q)) = *(*byte)(unsafe.Pointer(q)), *(*byte)(unsafe.Pointer(p))
622 p++
623 q++
624 }
625}
626
627func CString(s string) (uintptr, error) {
628 n := len(s)
629 p := Xmalloc(nil, types.Size_t(n)+1)
630 if p == 0 {
631 return 0, fmt.Errorf("CString: cannot allocate %d bytes", n+1)
632 }
633
634 copy((*RawMem)(unsafe.Pointer(p))[:n:n], s)
635 *(*byte)(unsafe.Pointer(p + uintptr(n))) = 0
636 return p, nil
637}
638
639func GetEnviron() (r []string) {
640 for p := Environ(); ; p += unsafe.Sizeof(p) {
641 q := *(*uintptr)(unsafe.Pointer(p))
642 if q == 0 {
643 return r
644 }
645
646 r = append(r, GoString(q))
647 }
648}
649
650func strToUint64(t *TLS, s uintptr, base int32) (seenDigits, neg bool, next uintptr, n uint64, err int32) {
651 var c byte
652out:
653 for {
654 c = *(*byte)(unsafe.Pointer(s))
655 switch c {
656 case ' ', '\t', '\n', '\r', '\v', '\f':
657 s++
658 case '+':
659 s++
660 break out
661 case '-':
662 s++
663 neg = true
664 break out
665 default:
666 break out
667 }
668 }
669 for {
670 c = *(*byte)(unsafe.Pointer(s))
671 var digit uint64
672 switch base {
673 case 10:
674 switch {
675 case c >= '0' && c <= '9':
676 seenDigits = true
677 digit = uint64(c) - '0'
678 default:
679 return seenDigits, neg, s, n, 0
680 }
681 case 16:
682 if c >= 'A' && c <= 'F' {
683 c = c + ('a' - 'A')
684 }
685 switch {
686 case c >= '0' && c <= '9':
687 seenDigits = true
688 digit = uint64(c) - '0'
689 case c >= 'a' && c <= 'f':
690 seenDigits = true
691 digit = uint64(c) - 'a' + 10
692 default:
693 return seenDigits, neg, s, n, 0
694 }
695 default:
696 panic(todo("", base))
697 }
698 n0 := n
699 n = uint64(base)*n + digit
700 if n < n0 { // overflow
701 return seenDigits, neg, s, n0, errno.ERANGE
702 }
703
704 s++
705 }
706}
707
708func strToFloatt64(t *TLS, s uintptr, bits int) (n float64, errno int32) {
709 var b []byte
710 var neg bool
711
712 defer func() {
713 var err error
714 if n, err = strconv.ParseFloat(string(b), bits); err != nil {
715 panic(todo(""))
716 }
717
718 if neg {
719 n = -n
720 }
721 }()
722
723 var c byte
724out:
725 for {
726 c = *(*byte)(unsafe.Pointer(s))
727 switch c {
728 case ' ', '\t', '\n', '\r', '\v', '\f':
729 s++
730 case '+':
731 s++
732 break out
733 case '-':
734 s++
735 neg = true
736 break out
737 default:
738 break out
739 }
740 }
741 for {
742 c = *(*byte)(unsafe.Pointer(s))
743 switch {
744 case c >= '0' && c <= '9':
745 b = append(b, c)
746 case c == '.':
747 b = append(b, c)
748 s++
749 for {
750 c = *(*byte)(unsafe.Pointer(s))
751 switch {
752 case c >= '0' && c <= '9':
753 b = append(b, c)
754 case c == 'e' || c == 'E':
755 b = append(b, c)
756 s++
757 for {
758 c = *(*byte)(unsafe.Pointer(s))
759 switch {
760 case c == '+' || c == '-':
761 b = append(b, c)
762 s++
763 for {
764 c = *(*byte)(unsafe.Pointer(s))
765 switch {
766 case c >= '0' && c <= '9':
767 b = append(b, c)
768 default:
769 return
770 }
771
772 s++
773 }
774 default:
775 panic(todo("%q %q", b, string(c)))
776 }
777 }
778 default:
779 return
780 }
781
782 s++
783 }
784 default:
785 panic(todo("%q %q", b, string(c)))
786 }
787
788 s++
789 }
790}
791
792func parseZone(s string) (name string, off int) {
793 _, name, off, _ = parseZoneOffset(s, false)
794 return name, off
795}
796
797func parseZoneOffset(s string, offOpt bool) (string, string, int, bool) {
798 s0 := s
799 name := s
800 for len(s) != 0 {
801 switch c := s[0]; {
802 case c >= 'A' && c <= 'Z', c >= 'a' && c <= 'z', c == '_', c == '/':
803 s = s[1:]
804 default:
805 name = name[:len(name)-len(s)]
806 if len(name) < 3 {
807 panic(todo("%q", s0))
808 }
809
810 if offOpt {
811 if len(s) == 0 {
812 return "", name, 0, false
813 }
814
815 if c := s[0]; (c < '0' || c > '9') && c != '+' && c != '-' {
816 return s, name, 0, false
817 }
818 }
819
820 s, off := parseOffset(s)
821 return s, name, off, true
822 }
823 }
824 return "", s0, 0, true
825}
826
827// [+|-]hh[:mm[:ss]]
828func parseOffset(s string) (string, int) {
829 if len(s) == 0 {
830 panic(todo(""))
831 }
832
833 k := 1
834 switch s[0] {
835 case '+':
836 // nop
837 s = s[1:]
838 case '-':
839 k = -1
840 s = s[1:]
841 }
842 s, hh, ok := parseUint(s)
843 if !ok {
844 panic(todo(""))
845 }
846
847 n := hh * 3600
848 if len(s) == 0 || s[0] != ':' {
849 return s, k * n
850 }
851
852 s = s[1:] // ':'
853 if len(s) == 0 {
854 panic(todo(""))
855 }
856
857 s, mm, ok := parseUint(s)
858 if !ok {
859 panic(todo(""))
860 }
861
862 n += mm * 60
863 if len(s) == 0 || s[0] != ':' {
864 return s, k * n
865 }
866
867 s = s[1:] // ':'
868 if len(s) == 0 {
869 panic(todo(""))
870 }
871
872 s, ss, _ := parseUint(s)
873 return s, k * (n + ss)
874}
875
876func parseUint(s string) (string, int, bool) {
877 var ok bool
878 var r int
879 for len(s) != 0 {
880 switch c := s[0]; {
881 case c >= '0' && c <= '9':
882 ok = true
883 r0 := r
884 r = 10*r + int(c) - '0'
885 if r < r0 {
886 panic(todo(""))
887 }
888
889 s = s[1:]
890 default:
891 return s, r, ok
892 }
893 }
894 return s, r, ok
895}
896
897// https://stackoverflow.com/a/53052382
898//
899// isTimeDST returns true if time t occurs within daylight saving time
900// for its time zone.
901func isTimeDST(t time.Time) bool {
902 // If the most recent (within the last year) clock change
903 // was forward then assume the change was from std to dst.
904 hh, mm, _ := t.UTC().Clock()
905 tClock := hh*60 + mm
906 for m := -1; m > -12; m-- {
907 // assume dst lasts for at least one month
908 hh, mm, _ := t.AddDate(0, m, 0).UTC().Clock()
909 clock := hh*60 + mm
910 if clock != tClock {
911 return clock > tClock
912 }
913 }
914 // assume no dst
915 return false
916}
Note: See TracBrowser for help on using the repository browser.