source: code/trunk/vendor/modernc.org/libc/libc.go@ 823

Last change on this file since 823 was 822, checked in by yakumo.izuru, 22 months ago

Prefer immortal.run over runit and rc.d, use vendored modules
for convenience.

Signed-off-by: Izuru Yakumo <yakumo.izuru@…>

File size: 39.1 KB
Line 
1// Copyright 2020 The Libc Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5//go.generate echo package libc > ccgo.go
6//go:generate go run generate.go
7//go:generate go fmt ./...
8
9// Package libc provides run time support for ccgo generated programs and
10// implements selected parts of the C standard library.
11package libc // import "modernc.org/libc"
12
13//TODO use O_RDONLY etc. from fcntl header
14
15//TODO use t.Alloc/Free where appropriate
16
17import (
18 "bufio"
19 crand "crypto/rand"
20 "fmt"
21 "math"
22 mbits "math/bits"
23 "math/rand"
24 "os"
25 "runtime"
26 "runtime/debug"
27 "sort"
28 "strings"
29 "sync"
30 "sync/atomic"
31 gotime "time"
32 "unsafe"
33
34 "github.com/mattn/go-isatty"
35 "modernc.org/libc/errno"
36 "modernc.org/libc/stdio"
37 "modernc.org/libc/sys/types"
38 "modernc.org/libc/time"
39 "modernc.org/libc/unistd"
40 "modernc.org/mathutil"
41)
42
43type (
44 // RawMem64 represents the biggest uint64 array the runtime can handle.
45 RawMem64 [unsafe.Sizeof(RawMem{}) / unsafe.Sizeof(uint64(0))]uint64
46)
47
48var (
49 allocMu sync.Mutex
50 environInitialized bool
51 isWindows bool
52)
53
54// Keep these outside of the var block otherwise go generate will miss them.
55var Xenviron uintptr
56var Xstdin = newFile(nil, unistd.STDIN_FILENO)
57var Xstdout = newFile(nil, unistd.STDOUT_FILENO)
58var Xstderr = newFile(nil, unistd.STDERR_FILENO)
59
60func setEnviron() {
61 SetEnviron(nil, os.Environ())
62}
63
64func Environ() uintptr {
65 if !environInitialized {
66 SetEnviron(nil, os.Environ())
67 }
68 return Xenviron
69}
70
71func EnvironP() uintptr {
72 if !environInitialized {
73 SetEnviron(nil, os.Environ())
74 }
75 return uintptr(unsafe.Pointer(&Xenviron))
76}
77
78func X___errno_location(t *TLS) uintptr {
79 return X__errno_location(t)
80}
81
82// int * __errno_location(void);
83func X__errno_location(t *TLS) uintptr {
84 return t.errnop
85}
86
87func Start(main func(*TLS, int32, uintptr) int32) {
88 if dmesgs {
89 wd, err := os.Getwd()
90 dmesg("%v: %v, wd %v, %v", origin(1), os.Args, wd, err)
91
92 defer func() {
93 if err := recover(); err != nil {
94 dmesg("%v: CRASH: %v\n%s", origin(1), err, debug.Stack())
95 }
96 }()
97 }
98 runtime.LockOSThread()
99 t := &TLS{errnop: uintptr(unsafe.Pointer(&errno0))}
100 argv := Xcalloc(t, 1, types.Size_t((len(os.Args)+1)*int(uintptrSize)))
101 if argv == 0 {
102 panic("OOM")
103 }
104
105 p := argv
106 for _, v := range os.Args {
107 s := Xcalloc(t, 1, types.Size_t(len(v)+1))
108 if s == 0 {
109 panic("OOM")
110 }
111
112 copy((*RawMem)(unsafe.Pointer(s))[:len(v):len(v)], v)
113 *(*uintptr)(unsafe.Pointer(p)) = s
114 p += uintptrSize
115 }
116 SetEnviron(t, os.Environ())
117 audit := false
118 if memgrind {
119 if s := os.Getenv("LIBC_MEMGRIND_START"); s != "0" {
120 MemAuditStart()
121 audit = true
122 }
123 }
124 t = NewTLS()
125 rc := main(t, int32(len(os.Args)), argv)
126 exit(t, rc, audit)
127}
128
129func Xexit(t *TLS, status int32) { exit(t, status, false) }
130
131func exit(t *TLS, status int32, audit bool) {
132 if len(Covered) != 0 {
133 buf := bufio.NewWriter(os.Stdout)
134 CoverReport(buf)
135 buf.Flush()
136 }
137 if len(CoveredC) != 0 {
138 buf := bufio.NewWriter(os.Stdout)
139 CoverCReport(buf)
140 buf.Flush()
141 }
142 for _, v := range atExit {
143 v()
144 }
145 if audit {
146 t.Close()
147 if tlsBalance != 0 {
148 fmt.Fprintf(os.Stderr, "non zero TLS balance: %d\n", tlsBalance)
149 status = 1
150 }
151 }
152 X_exit(nil, status)
153}
154
155// void _exit(int status);
156func X_exit(_ *TLS, status int32) {
157 if dmesgs {
158 dmesg("%v: EXIT %v", origin(1), status)
159 }
160 os.Exit(int(status))
161}
162
163func SetEnviron(t *TLS, env []string) {
164 if environInitialized {
165 return
166 }
167
168 environInitialized = true
169 p := Xcalloc(t, 1, types.Size_t((len(env)+1)*(int(uintptrSize))))
170 if p == 0 {
171 panic("OOM")
172 }
173
174 Xenviron = p
175 for _, v := range env {
176 s := Xcalloc(t, 1, types.Size_t(len(v)+1))
177 if s == 0 {
178 panic("OOM")
179 }
180
181 copy((*(*RawMem)(unsafe.Pointer(s)))[:len(v):len(v)], v)
182 *(*uintptr)(unsafe.Pointer(p)) = s
183 p += uintptrSize
184 }
185}
186
187// void setbuf(FILE *stream, char *buf);
188func Xsetbuf(t *TLS, stream, buf uintptr) {
189 //TODO panic(todo(""))
190}
191
192// size_t confstr(int name, char *buf, size_t len);
193func Xconfstr(t *TLS, name int32, buf uintptr, len types.Size_t) types.Size_t {
194 panic(todo(""))
195}
196
197// int puts(const char *s);
198func Xputs(t *TLS, s uintptr) int32 {
199 n, err := fmt.Printf("%s\n", GoString(s))
200 if err != nil {
201 return stdio.EOF
202 }
203
204 return int32(n)
205}
206
207var (
208 randomMu sync.Mutex
209 randomGen = rand.New(rand.NewSource(42))
210)
211
212// long int random(void);
213func Xrandom(t *TLS) long {
214 randomMu.Lock()
215 r := randomGen.Int63n(math.MaxInt32 + 1)
216 randomMu.Unlock()
217 return long(r)
218}
219
220func write(b []byte) (int, error) {
221 // if dmesgs {
222 // dmesg("%v: %s", origin(1), b)
223 // }
224 if _, err := os.Stdout.Write(b); err != nil {
225 return -1, err
226 }
227
228 return len(b), nil
229}
230
231func X__builtin_bzero(t *TLS, s uintptr, n types.Size_t) { Xbzero(t, s, n) }
232func X__builtin_abort(t *TLS) { Xabort(t) }
233func X__builtin_abs(t *TLS, j int32) int32 { return Xabs(t, j) }
234func X__builtin_clz(t *TLS, n uint32) int32 { return int32(mbits.LeadingZeros32(n)) }
235func X__builtin_clzl(t *TLS, n ulong) int32 { return int32(mbits.LeadingZeros64(uint64(n))) }
236func X__builtin_clzll(t *TLS, n uint64) int32 { return int32(mbits.LeadingZeros64(n)) }
237func X__builtin_constant_p_impl() { panic(todo("internal error: should never be called")) }
238func X__builtin_copysign(t *TLS, x, y float64) float64 { return Xcopysign(t, x, y) }
239func X__builtin_copysignf(t *TLS, x, y float32) float32 { return Xcopysignf(t, x, y) }
240func X__builtin_copysignl(t *TLS, x, y float64) float64 { return Xcopysign(t, x, y) }
241func X__builtin_exit(t *TLS, status int32) { Xexit(t, status) }
242func X__builtin_expect(t *TLS, exp, c long) long { return exp }
243func X__builtin_fabs(t *TLS, x float64) float64 { return Xfabs(t, x) }
244func X__builtin_fabsf(t *TLS, x float32) float32 { return Xfabsf(t, x) }
245func X__builtin_fabsl(t *TLS, x float64) float64 { return Xfabsl(t, x) }
246func X__builtin_free(t *TLS, ptr uintptr) { Xfree(t, ptr) }
247func X__builtin_getentropy(t *TLS, buf uintptr, n types.Size_t) int32 { return Xgetentropy(t, buf, n) }
248func X__builtin_huge_val(t *TLS) float64 { return math.Inf(1) }
249func X__builtin_huge_valf(t *TLS) float32 { return float32(math.Inf(1)) }
250func X__builtin_inf(t *TLS) float64 { return math.Inf(1) }
251func X__builtin_inff(t *TLS) float32 { return float32(math.Inf(1)) }
252func X__builtin_infl(t *TLS) float64 { return math.Inf(1) }
253func X__builtin_malloc(t *TLS, size types.Size_t) uintptr { return Xmalloc(t, size) }
254func X__builtin_memcmp(t *TLS, s1, s2 uintptr, n types.Size_t) int32 { return Xmemcmp(t, s1, s2, n) }
255func X__builtin_nan(t *TLS, s uintptr) float64 { return math.NaN() }
256func X__builtin_nanf(t *TLS, s uintptr) float32 { return float32(math.NaN()) }
257func X__builtin_nanl(t *TLS, s uintptr) float64 { return math.NaN() }
258func X__builtin_prefetch(t *TLS, addr, args uintptr) {}
259func X__builtin_printf(t *TLS, s, args uintptr) int32 { return Xprintf(t, s, args) }
260func X__builtin_strchr(t *TLS, s uintptr, c int32) uintptr { return Xstrchr(t, s, c) }
261func X__builtin_strcmp(t *TLS, s1, s2 uintptr) int32 { return Xstrcmp(t, s1, s2) }
262func X__builtin_strcpy(t *TLS, dest, src uintptr) uintptr { return Xstrcpy(t, dest, src) }
263func X__builtin_strlen(t *TLS, s uintptr) types.Size_t { return Xstrlen(t, s) }
264func X__builtin_trap(t *TLS) { Xabort(t) }
265func X__isnan(t *TLS, arg float64) int32 { return X__builtin_isnan(t, arg) }
266func X__isnanf(t *TLS, arg float32) int32 { return Xisnanf(t, arg) }
267func X__isnanl(t *TLS, arg float64) int32 { return Xisnanl(t, arg) }
268
269func Xvfprintf(t *TLS, stream, format, ap uintptr) int32 { return Xfprintf(t, stream, format, ap) }
270
271// int __builtin_popcount (unsigned int x)
272func X__builtin_popcount(t *TLS, x uint32) int32 {
273 return int32(mbits.OnesCount32(x))
274}
275
276// int __builtin_popcountl (unsigned long x)
277func X__builtin_popcountl(t *TLS, x ulong) int32 {
278 return int32(mbits.OnesCount64(uint64(x)))
279}
280
281// char * __builtin___strcpy_chk (char *dest, const char *src, size_t os);
282func X__builtin___strcpy_chk(t *TLS, dest, src uintptr, os types.Size_t) uintptr {
283 return Xstrcpy(t, dest, src)
284}
285
286func X__builtin_mmap(t *TLS, addr uintptr, length types.Size_t, prot, flags, fd int32, offset types.Off_t) uintptr {
287 return Xmmap(t, addr, length, prot, flags, fd, offset)
288}
289
290// uint16_t __builtin_bswap16 (uint32_t x)
291func X__builtin_bswap16(t *TLS, x uint16) uint16 {
292 return x<<8 |
293 x>>8
294}
295
296// uint32_t __builtin_bswap32 (uint32_t x)
297func X__builtin_bswap32(t *TLS, x uint32) uint32 {
298 return x<<24 |
299 x&0xff00<<8 |
300 x&0xff0000>>8 |
301 x>>24
302}
303
304// uint64_t __builtin_bswap64 (uint64_t x)
305func X__builtin_bswap64(t *TLS, x uint64) uint64 {
306 return x<<56 |
307 x&0xff00<<40 |
308 x&0xff0000<<24 |
309 x&0xff000000<<8 |
310 x&0xff00000000>>8 |
311 x&0xff0000000000>>24 |
312 x&0xff000000000000>>40 |
313 x>>56
314}
315
316// bool __builtin_add_overflow (type1 a, type2 b, type3 *res)
317func X__builtin_add_overflowInt64(t *TLS, a, b int64, res uintptr) int32 {
318 r, ovf := mathutil.AddOverflowInt64(a, b)
319 *(*int64)(unsafe.Pointer(res)) = r
320 return Bool32(ovf)
321}
322
323// bool __builtin_add_overflow (type1 a, type2 b, type3 *res)
324func X__builtin_add_overflowUint32(t *TLS, a, b uint32, res uintptr) int32 {
325 r := a + b
326 *(*uint32)(unsafe.Pointer(res)) = r
327 return Bool32(r < a)
328}
329
330// bool __builtin_add_overflow (type1 a, type2 b, type3 *res)
331func X__builtin_add_overflowUint64(t *TLS, a, b uint64, res uintptr) int32 {
332 r := a + b
333 *(*uint64)(unsafe.Pointer(res)) = r
334 return Bool32(r < a)
335}
336
337// bool __builtin_sub_overflow (type1 a, type2 b, type3 *res)
338func X__builtin_sub_overflowInt64(t *TLS, a, b int64, res uintptr) int32 {
339 r, ovf := mathutil.SubOverflowInt64(a, b)
340 *(*int64)(unsafe.Pointer(res)) = r
341 return Bool32(ovf)
342}
343
344// bool __builtin_mul_overflow (type1 a, type2 b, type3 *res)
345func X__builtin_mul_overflowInt64(t *TLS, a, b int64, res uintptr) int32 {
346 r, ovf := mathutil.MulOverflowInt64(a, b)
347 *(*int64)(unsafe.Pointer(res)) = r
348 return Bool32(ovf)
349}
350
351// bool __builtin_mul_overflow (type1 a, type2 b, type3 *res)
352func X__builtin_mul_overflowUint64(t *TLS, a, b uint64, res uintptr) int32 {
353 hi, lo := mbits.Mul64(a, b)
354 *(*uint64)(unsafe.Pointer(res)) = lo
355 return Bool32(hi != 0)
356}
357
358// bool __builtin_mul_overflow (type1 a, type2 b, type3 *res)
359func X__builtin_mul_overflowUint128(t *TLS, a, b Uint128, res uintptr) int32 {
360 r, ovf := a.mulOvf(b)
361 *(*Uint128)(unsafe.Pointer(res)) = r
362 return Bool32(ovf)
363}
364
365func X__builtin_unreachable(t *TLS) {
366 fmt.Fprintf(os.Stderr, "unrechable\n")
367 os.Stderr.Sync()
368 Xexit(t, 1)
369}
370
371func X__builtin_snprintf(t *TLS, str uintptr, size types.Size_t, format, args uintptr) int32 {
372 return Xsnprintf(t, str, size, format, args)
373}
374
375func X__builtin_sprintf(t *TLS, str, format, args uintptr) (r int32) {
376 return Xsprintf(t, str, format, args)
377}
378
379func X__builtin_memcpy(t *TLS, dest, src uintptr, n types.Size_t) (r uintptr) {
380 return Xmemcpy(t, dest, src, n)
381}
382
383// void * __builtin___memcpy_chk (void *dest, const void *src, size_t n, size_t os);
384func X__builtin___memcpy_chk(t *TLS, dest, src uintptr, n, os types.Size_t) (r uintptr) {
385 if os != ^types.Size_t(0) && n < os {
386 Xabort(t)
387 }
388
389 return Xmemcpy(t, dest, src, n)
390}
391
392func X__builtin_memset(t *TLS, s uintptr, c int32, n types.Size_t) uintptr {
393 return Xmemset(t, s, c, n)
394}
395
396// void * __builtin___memset_chk (void *s, int c, size_t n, size_t os);
397func X__builtin___memset_chk(t *TLS, s uintptr, c int32, n, os types.Size_t) uintptr {
398 if os < n {
399 Xabort(t)
400 }
401
402 return Xmemset(t, s, c, n)
403}
404
405// size_t __builtin_object_size (const void * ptr, int type)
406func X__builtin_object_size(t *TLS, p uintptr, typ int32) types.Size_t {
407 return ^types.Size_t(0) //TODO frontend magic
408}
409
410var atomicLoadStore16 sync.Mutex
411
412func AtomicLoadNUint16(ptr uintptr, memorder int32) uint16 {
413 atomicLoadStore16.Lock()
414 r := *(*uint16)(unsafe.Pointer(ptr))
415 atomicLoadStore16.Unlock()
416 return r
417}
418
419func AtomicStoreNUint16(ptr uintptr, val uint16, memorder int32) {
420 atomicLoadStore16.Lock()
421 *(*uint16)(unsafe.Pointer(ptr)) = val
422 atomicLoadStore16.Unlock()
423}
424
425// int sprintf(char *str, const char *format, ...);
426func Xsprintf(t *TLS, str, format, args uintptr) (r int32) {
427 b := printf(format, args)
428 r = int32(len(b))
429 copy((*RawMem)(unsafe.Pointer(str))[:r:r], b)
430 *(*byte)(unsafe.Pointer(str + uintptr(r))) = 0
431 return int32(len(b))
432}
433
434// int __builtin___sprintf_chk (char *s, int flag, size_t os, const char *fmt, ...);
435func X__builtin___sprintf_chk(t *TLS, s uintptr, flag int32, os types.Size_t, format, args uintptr) (r int32) {
436 return Xsprintf(t, s, format, args)
437}
438
439// void qsort(void *base, size_t nmemb, size_t size, int (*compar)(const void *, const void *));
440func Xqsort(t *TLS, base uintptr, nmemb, size types.Size_t, compar uintptr) {
441 sort.Sort(&sorter{
442 len: int(nmemb),
443 base: base,
444 sz: uintptr(size),
445 f: (*struct {
446 f func(*TLS, uintptr, uintptr) int32
447 })(unsafe.Pointer(&struct{ uintptr }{compar})).f,
448 t: t,
449 })
450}
451
452// void __assert_fail(const char * assertion, const char * file, unsigned int line, const char * function);
453func X__assert_fail(t *TLS, assertion, file uintptr, line uint32, function uintptr) {
454 fmt.Fprintf(os.Stderr, "assertion failure: %s:%d.%s: %s\n", GoString(file), line, GoString(function), GoString(assertion))
455 if memgrind {
456 fmt.Fprintf(os.Stderr, "%s\n", debug.Stack())
457 }
458 os.Stderr.Sync()
459 Xexit(t, 1)
460}
461
462// int vprintf(const char *format, va_list ap);
463func Xvprintf(t *TLS, s, ap uintptr) int32 { return Xprintf(t, s, ap) }
464
465// int vsprintf(char *str, const char *format, va_list ap);
466func Xvsprintf(t *TLS, str, format, va uintptr) int32 {
467 return Xsprintf(t, str, format, va)
468}
469
470// int vsnprintf(char *str, size_t size, const char *format, va_list ap);
471func Xvsnprintf(t *TLS, str uintptr, size types.Size_t, format, va uintptr) int32 {
472 return Xsnprintf(t, str, size, format, va)
473}
474
475// int obstack_vprintf (struct obstack *obstack, const char *template, va_list ap)
476func Xobstack_vprintf(t *TLS, obstack, template, va uintptr) int32 {
477 panic(todo(""))
478}
479
480// extern void _obstack_newchunk(struct obstack *, int);
481func X_obstack_newchunk(t *TLS, obstack uintptr, length int32) int32 {
482 panic(todo(""))
483}
484
485// int _obstack_begin (struct obstack *h, _OBSTACK_SIZE_T size, _OBSTACK_SIZE_T alignment, void *(*chunkfun) (size_t), void (*freefun) (void *))
486func X_obstack_begin(t *TLS, obstack uintptr, size, alignment int32, chunkfun, freefun uintptr) int32 {
487 panic(todo(""))
488}
489
490// void obstack_free (struct obstack *h, void *obj)
491func Xobstack_free(t *TLS, obstack, obj uintptr) {
492 panic(todo(""))
493}
494
495// unsigned int sleep(unsigned int seconds);
496func Xsleep(t *TLS, seconds uint32) uint32 {
497 gotime.Sleep(gotime.Second * gotime.Duration(seconds))
498 return 0
499}
500
501// size_t strcspn(const char *s, const char *reject);
502func Xstrcspn(t *TLS, s, reject uintptr) (r types.Size_t) {
503 bits := newBits(256)
504 for {
505 c := *(*byte)(unsafe.Pointer(reject))
506 if c == 0 {
507 break
508 }
509
510 reject++
511 bits.set(int(c))
512 }
513 for {
514 c := *(*byte)(unsafe.Pointer(s))
515 if c == 0 || bits.has(int(c)) {
516 return r
517 }
518
519 s++
520 r++
521 }
522}
523
524// int printf(const char *format, ...);
525func Xprintf(t *TLS, format, args uintptr) int32 {
526 n, _ := write(printf(format, args))
527 return int32(n)
528}
529
530// int snprintf(char *str, size_t size, const char *format, ...);
531func Xsnprintf(t *TLS, str uintptr, size types.Size_t, format, args uintptr) (r int32) {
532 if format == 0 {
533 return 0
534 }
535
536 b := printf(format, args)
537 r = int32(len(b))
538 if size == 0 {
539 return r
540 }
541
542 if len(b)+1 > int(size) {
543 b = b[:size-1]
544 }
545 n := len(b)
546 copy((*RawMem)(unsafe.Pointer(str))[:n:n], b)
547 *(*byte)(unsafe.Pointer(str + uintptr(n))) = 0
548 return r
549}
550
551// int __builtin___snprintf_chk(char * str, size_t maxlen, int flag, size_t os, const char * format, ...);
552func X__builtin___snprintf_chk(t *TLS, str uintptr, maxlen types.Size_t, flag int32, os types.Size_t, format, args uintptr) (r int32) {
553 if os != ^types.Size_t(0) && maxlen > os {
554 Xabort(t)
555 }
556
557 return Xsnprintf(t, str, maxlen, format, args)
558}
559
560// int __builtin___vsnprintf_chk (char *s, size_t maxlen, int flag, size_t os, const char *fmt, va_list ap);
561func X__builtin___vsnprintf_chk(t *TLS, str uintptr, maxlen types.Size_t, flag int32, os types.Size_t, format, args uintptr) (r int32) {
562 if os != ^types.Size_t(0) && maxlen > os {
563 Xabort(t)
564 }
565
566 return Xsnprintf(t, str, maxlen, format, args)
567}
568
569// int abs(int j);
570func Xabs(t *TLS, j int32) int32 {
571 if j >= 0 {
572 return j
573 }
574
575 return -j
576}
577
578func Xllabs(tls *TLS, a int64) int64 {
579 if a >= int64(0) {
580 return a
581 }
582
583 return -a
584}
585
586func X__builtin_isnan(t *TLS, x float64) int32 { return Bool32(math.IsNaN(x)) }
587func X__builtin_llabs(tls *TLS, a int64) int64 { return Xllabs(tls, a) }
588func Xacos(t *TLS, x float64) float64 { return math.Acos(x) }
589func Xacosh(t *TLS, x float64) float64 { return math.Acosh(x) }
590func Xasin(t *TLS, x float64) float64 { return math.Asin(x) }
591func Xasinh(t *TLS, x float64) float64 { return math.Asinh(x) }
592func Xatan(t *TLS, x float64) float64 { return math.Atan(x) }
593func Xatan2(t *TLS, x, y float64) float64 { return math.Atan2(x, y) }
594func Xatanh(t *TLS, x float64) float64 { return math.Atanh(x) }
595func Xceil(t *TLS, x float64) float64 { return math.Ceil(x) }
596func Xceilf(t *TLS, x float32) float32 { return float32(math.Ceil(float64(x))) }
597func Xcopysign(t *TLS, x, y float64) float64 { return math.Copysign(x, y) }
598func Xcopysignf(t *TLS, x, y float32) float32 { return float32(math.Copysign(float64(x), float64(y))) }
599func Xcos(t *TLS, x float64) float64 { return math.Cos(x) }
600func Xcosf(t *TLS, x float32) float32 { return float32(math.Cos(float64(x))) }
601func Xcosh(t *TLS, x float64) float64 { return math.Cosh(x) }
602func Xexp(t *TLS, x float64) float64 { return math.Exp(x) }
603func Xfabs(t *TLS, x float64) float64 { return math.Abs(x) }
604func Xfabsf(t *TLS, x float32) float32 { return float32(math.Abs(float64(x))) }
605func Xfloor(t *TLS, x float64) float64 { return math.Floor(x) }
606func Xfmod(t *TLS, x, y float64) float64 { return math.Mod(x, y) }
607func Xhypot(t *TLS, x, y float64) float64 { return math.Hypot(x, y) }
608func Xisnan(t *TLS, x float64) int32 { return X__builtin_isnan(t, x) }
609func Xisnanf(t *TLS, x float32) int32 { return Bool32(math.IsNaN(float64(x))) }
610func Xisnanl(t *TLS, x float64) int32 { return Bool32(math.IsNaN(x)) } // ccgo has to handle long double as double as Go does not support long double.
611func Xldexp(t *TLS, x float64, exp int32) float64 { return math.Ldexp(x, int(exp)) }
612func Xlog(t *TLS, x float64) float64 { return math.Log(x) }
613func Xlog10(t *TLS, x float64) float64 { return math.Log10(x) }
614func Xlog2(t *TLS, x float64) float64 { return math.Log2(x) }
615func Xround(t *TLS, x float64) float64 { return math.Round(x) }
616func Xsin(t *TLS, x float64) float64 { return math.Sin(x) }
617func Xsinf(t *TLS, x float32) float32 { return float32(math.Sin(float64(x))) }
618func Xsinh(t *TLS, x float64) float64 { return math.Sinh(x) }
619func Xsqrt(t *TLS, x float64) float64 { return math.Sqrt(x) }
620func Xtan(t *TLS, x float64) float64 { return math.Tan(x) }
621func Xtanh(t *TLS, x float64) float64 { return math.Tanh(x) }
622func Xtrunc(t *TLS, x float64) float64 { return math.Trunc(x) }
623
624var nextRand = uint64(1)
625
626// int rand(void);
627func Xrand(t *TLS) int32 {
628 nextRand = nextRand*1103515245 + 12345
629 return int32(uint32(nextRand / (math.MaxUint32 + 1) % math.MaxInt32))
630}
631
632func Xpow(t *TLS, x, y float64) float64 {
633 r := math.Pow(x, y)
634 if x > 0 && r == 1 && y >= -1.0000000000000000715e-18 && y < -1e-30 {
635 r = 0.9999999999999999
636 }
637 return r
638}
639
640func Xfrexp(t *TLS, x float64, exp uintptr) float64 {
641 f, e := math.Frexp(x)
642 *(*int32)(unsafe.Pointer(exp)) = int32(e)
643 return f
644}
645
646func Xmodf(t *TLS, x float64, iptr uintptr) float64 {
647 i, f := math.Modf(x)
648 *(*float64)(unsafe.Pointer(iptr)) = i
649 return f
650}
651
652// char *strncpy(char *dest, const char *src, size_t n)
653func Xstrncpy(t *TLS, dest, src uintptr, n types.Size_t) (r uintptr) {
654 r = dest
655 for c := *(*int8)(unsafe.Pointer(src)); c != 0 && n > 0; n-- {
656 *(*int8)(unsafe.Pointer(dest)) = c
657 dest++
658 src++
659 c = *(*int8)(unsafe.Pointer(src))
660 }
661 for ; uintptr(n) > 0; n-- {
662 *(*int8)(unsafe.Pointer(dest)) = 0
663 dest++
664 }
665 return r
666}
667
668// char * __builtin___strncpy_chk (char *dest, const char *src, size_t n, size_t os);
669func X__builtin___strncpy_chk(t *TLS, dest, src uintptr, n, os types.Size_t) (r uintptr) {
670 if n != ^types.Size_t(0) && os < n {
671 Xabort(t)
672 }
673
674 return Xstrncpy(t, dest, src, n)
675}
676
677// int strcmp(const char *s1, const char *s2)
678func Xstrcmp(t *TLS, s1, s2 uintptr) int32 {
679 for {
680 ch1 := *(*byte)(unsafe.Pointer(s1))
681 s1++
682 ch2 := *(*byte)(unsafe.Pointer(s2))
683 s2++
684 if ch1 != ch2 || ch1 == 0 || ch2 == 0 {
685 return int32(ch1) - int32(ch2)
686 }
687 }
688}
689
690// size_t strlen(const char *s)
691func Xstrlen(t *TLS, s uintptr) (r types.Size_t) {
692 if s == 0 {
693 return 0
694 }
695
696 for ; *(*int8)(unsafe.Pointer(s)) != 0; s++ {
697 r++
698 }
699 return r
700}
701
702// char *strcat(char *dest, const char *src)
703func Xstrcat(t *TLS, dest, src uintptr) (r uintptr) {
704 r = dest
705 for *(*int8)(unsafe.Pointer(dest)) != 0 {
706 dest++
707 }
708 for {
709 c := *(*int8)(unsafe.Pointer(src))
710 src++
711 *(*int8)(unsafe.Pointer(dest)) = c
712 dest++
713 if c == 0 {
714 return r
715 }
716 }
717}
718
719// char * __builtin___strcat_chk (char *dest, const char *src, size_t os);
720func X__builtin___strcat_chk(t *TLS, dest, src uintptr, os types.Size_t) (r uintptr) {
721 return Xstrcat(t, dest, src)
722}
723
724// int strncmp(const char *s1, const char *s2, size_t n)
725func Xstrncmp(t *TLS, s1, s2 uintptr, n types.Size_t) int32 {
726 var ch1, ch2 byte
727 for ; n != 0; n-- {
728 ch1 = *(*byte)(unsafe.Pointer(s1))
729 s1++
730 ch2 = *(*byte)(unsafe.Pointer(s2))
731 s2++
732 if ch1 != ch2 {
733 return int32(ch1) - int32(ch2)
734 }
735
736 if ch1 == 0 {
737 return 0
738 }
739 }
740 return 0
741}
742
743// char *strcpy(char *dest, const char *src)
744func Xstrcpy(t *TLS, dest, src uintptr) (r uintptr) {
745 r = dest
746 // src0 := src
747 for ; ; dest++ {
748 c := *(*int8)(unsafe.Pointer(src))
749 src++
750 *(*int8)(unsafe.Pointer(dest)) = c
751 if c == 0 {
752 return r
753 }
754 }
755}
756
757// char *strchr(const char *s, int c)
758func Xstrchr(t *TLS, s uintptr, c int32) uintptr {
759 for {
760 ch2 := *(*byte)(unsafe.Pointer(s))
761 if ch2 == byte(c) {
762 return s
763 }
764
765 if ch2 == 0 {
766 return 0
767 }
768
769 s++
770 }
771}
772
773// char *strrchr(const char *s, int c)
774func Xstrrchr(t *TLS, s uintptr, c int32) (r uintptr) {
775 for {
776 ch2 := *(*byte)(unsafe.Pointer(s))
777 if ch2 == 0 {
778 return r
779 }
780
781 if ch2 == byte(c) {
782 r = s
783 }
784 s++
785 }
786}
787
788// void *memset(void *s, int c, size_t n)
789func Xmemset(t *TLS, s uintptr, c int32, n types.Size_t) uintptr {
790 if n != 0 {
791 c := byte(c & 0xff)
792
793 // This will make sure that on platforms where they are not equally aligned we
794 // clear out the first few bytes until allignment
795 bytesBeforeAllignment := s % unsafe.Alignof(uint64(0))
796 if bytesBeforeAllignment > uintptr(n) {
797 bytesBeforeAllignment = uintptr(n)
798 }
799 b := (*RawMem)(unsafe.Pointer(s))[:bytesBeforeAllignment:bytesBeforeAllignment]
800 n -= types.Size_t(bytesBeforeAllignment)
801 for i := range b {
802 b[i] = c
803 }
804 if n >= 8 {
805 i64 := uint64(c) + uint64(c)<<8 + uint64(c)<<16 + uint64(c)<<24 + uint64(c)<<32 + uint64(c)<<40 + uint64(c)<<48 + uint64(c)<<56
806 b8 := (*RawMem64)(unsafe.Pointer(s + bytesBeforeAllignment))[: n/8 : n/8]
807 for i := range b8 {
808 b8[i] = i64
809 }
810 }
811 if n%8 != 0 {
812 b = (*RawMem)(unsafe.Pointer(s + bytesBeforeAllignment + uintptr(n-n%8)))[: n%8 : n%8]
813 for i := range b {
814 b[i] = c
815 }
816 }
817 }
818 return s
819}
820
821// void *memcpy(void *dest, const void *src, size_t n);
822func Xmemcpy(t *TLS, dest, src uintptr, n types.Size_t) (r uintptr) {
823 if n != 0 {
824 copy((*RawMem)(unsafe.Pointer(dest))[:n:n], (*RawMem)(unsafe.Pointer(src))[:n:n])
825 }
826 return dest
827}
828
829// int memcmp(const void *s1, const void *s2, size_t n);
830func Xmemcmp(t *TLS, s1, s2 uintptr, n types.Size_t) int32 {
831 for ; n != 0; n-- {
832 c1 := *(*byte)(unsafe.Pointer(s1))
833 s1++
834 c2 := *(*byte)(unsafe.Pointer(s2))
835 s2++
836 if c1 < c2 {
837 return -1
838 }
839
840 if c1 > c2 {
841 return 1
842 }
843 }
844 return 0
845}
846
847// void *memchr(const void *s, int c, size_t n);
848func Xmemchr(t *TLS, s uintptr, c int32, n types.Size_t) uintptr {
849 for ; n != 0; n-- {
850 if *(*byte)(unsafe.Pointer(s)) == byte(c) {
851 return s
852 }
853
854 s++
855 }
856 return 0
857}
858
859// void *memmove(void *dest, const void *src, size_t n);
860func Xmemmove(t *TLS, dest, src uintptr, n types.Size_t) uintptr {
861 if n == 0 {
862 return dest
863 }
864
865 copy((*RawMem)(unsafe.Pointer(uintptr(dest)))[:n:n], (*RawMem)(unsafe.Pointer(uintptr(src)))[:n:n])
866 return dest
867}
868
869// void * __builtin___memmove_chk (void *dest, const void *src, size_t n, size_t os);
870func X__builtin___memmove_chk(t *TLS, dest, src uintptr, n, os types.Size_t) uintptr {
871 if os != ^types.Size_t(0) && os < n {
872 Xabort(t)
873 }
874
875 return Xmemmove(t, dest, src, n)
876}
877
878// char *getenv(const char *name);
879func Xgetenv(t *TLS, name uintptr) uintptr {
880 return getenv(Environ(), GoString(name))
881}
882
883func getenv(p uintptr, nm string) uintptr {
884 for ; ; p += uintptrSize {
885 q := *(*uintptr)(unsafe.Pointer(p))
886 if q == 0 {
887 return 0
888 }
889
890 s := GoString(q)
891 a := strings.SplitN(s, "=", 2)
892 if len(a) != 2 {
893 panic(todo("%q %q %q", nm, s, a))
894 }
895
896 if a[0] == nm {
897 return q + uintptr(len(nm)) + 1
898 }
899 }
900}
901
902// char *strstr(const char *haystack, const char *needle);
903func Xstrstr(t *TLS, haystack, needle uintptr) uintptr {
904 hs := GoString(haystack)
905 nd := GoString(needle)
906 if i := strings.Index(hs, nd); i >= 0 {
907 r := haystack + uintptr(i)
908 return r
909 }
910
911 return 0
912}
913
914// int putc(int c, FILE *stream);
915func Xputc(t *TLS, c int32, fp uintptr) int32 {
916 return Xfputc(t, c, fp)
917}
918
919// int atoi(const char *nptr);
920func Xatoi(t *TLS, nptr uintptr) int32 {
921 _, neg, _, n, _ := strToUint64(t, nptr, 10)
922 switch {
923 case neg:
924 return int32(-n)
925 default:
926 return int32(n)
927 }
928}
929
930// double atof(const char *nptr);
931func Xatof(t *TLS, nptr uintptr) float64 {
932 n, _ := strToFloatt64(t, nptr, 64)
933 // if dmesgs {
934 // dmesg("%v: %q: %v", origin(1), GoString(nptr), n)
935 // }
936 return n
937}
938
939// int tolower(int c);
940func Xtolower(t *TLS, c int32) int32 {
941 if c >= 'A' && c <= 'Z' {
942 return c + ('a' - 'A')
943 }
944
945 return c
946}
947
948// int toupper(int c);
949func Xtoupper(t *TLS, c int32) int32 {
950 if c >= 'a' && c <= 'z' {
951 return c - ('a' - 'A')
952 }
953
954 return c
955}
956
957// int isatty(int fd);
958func Xisatty(t *TLS, fd int32) int32 {
959 return Bool32(isatty.IsTerminal(uintptr(fd)))
960}
961
962// long atol(const char *nptr);
963func Xatol(t *TLS, nptr uintptr) long {
964 _, neg, _, n, _ := strToUint64(t, nptr, 10)
965 switch {
966 case neg:
967 return long(-n)
968 default:
969 return long(n)
970 }
971}
972
973// time_t mktime(struct tm *tm);
974func Xmktime(t *TLS, ptm uintptr) time.Time_t {
975 loc := gotime.Local
976 if r := getenv(Environ(), "TZ"); r != 0 {
977 zone, off := parseZone(GoString(r))
978 loc = gotime.FixedZone(zone, off)
979 }
980 tt := gotime.Date(
981 int((*time.Tm)(unsafe.Pointer(ptm)).Ftm_year+1900),
982 gotime.Month((*time.Tm)(unsafe.Pointer(ptm)).Ftm_mon+1),
983 int((*time.Tm)(unsafe.Pointer(ptm)).Ftm_mday),
984 int((*time.Tm)(unsafe.Pointer(ptm)).Ftm_hour),
985 int((*time.Tm)(unsafe.Pointer(ptm)).Ftm_min),
986 int((*time.Tm)(unsafe.Pointer(ptm)).Ftm_sec),
987 0,
988 loc,
989 )
990 (*time.Tm)(unsafe.Pointer(ptm)).Ftm_wday = int32(tt.Weekday())
991 (*time.Tm)(unsafe.Pointer(ptm)).Ftm_yday = int32(tt.YearDay() - 1)
992 return time.Time_t(tt.Unix())
993}
994
995// char *strpbrk(const char *s, const char *accept);
996func Xstrpbrk(t *TLS, s, accept uintptr) uintptr {
997 bits := newBits(256)
998 for {
999 b := *(*byte)(unsafe.Pointer(accept))
1000 if b == 0 {
1001 break
1002 }
1003
1004 bits.set(int(b))
1005 accept++
1006 }
1007 for {
1008 b := *(*byte)(unsafe.Pointer(s))
1009 if b == 0 {
1010 return 0
1011 }
1012
1013 if bits.has(int(b)) {
1014 return s
1015 }
1016
1017 s++
1018 }
1019}
1020
1021// int strcasecmp(const char *s1, const char *s2);
1022func Xstrcasecmp(t *TLS, s1, s2 uintptr) int32 {
1023 for {
1024 ch1 := *(*byte)(unsafe.Pointer(s1))
1025 if ch1 >= 'a' && ch1 <= 'z' {
1026 ch1 = ch1 - ('a' - 'A')
1027 }
1028 s1++
1029 ch2 := *(*byte)(unsafe.Pointer(s2))
1030 if ch2 >= 'a' && ch2 <= 'z' {
1031 ch2 = ch2 - ('a' - 'A')
1032 }
1033 s2++
1034 if ch1 != ch2 || ch1 == 0 || ch2 == 0 {
1035 r := int32(ch1) - int32(ch2)
1036 return r
1037 }
1038 }
1039}
1040
1041func Xntohs(t *TLS, netshort uint16) uint16 {
1042 return uint16((*[2]byte)(unsafe.Pointer(&netshort))[0])<<8 | uint16((*[2]byte)(unsafe.Pointer(&netshort))[1])
1043}
1044
1045// uint16_t htons(uint16_t hostshort);
1046func Xhtons(t *TLS, hostshort uint16) uint16 {
1047 var a [2]byte
1048 a[0] = byte(hostshort >> 8)
1049 a[1] = byte(hostshort)
1050 return *(*uint16)(unsafe.Pointer(&a))
1051}
1052
1053// uint32_t htonl(uint32_t hostlong);
1054func Xhtonl(t *TLS, hostlong uint32) uint32 {
1055 var a [4]byte
1056 a[0] = byte(hostlong >> 24)
1057 a[1] = byte(hostlong >> 16)
1058 a[2] = byte(hostlong >> 8)
1059 a[3] = byte(hostlong)
1060 return *(*uint32)(unsafe.Pointer(&a))
1061}
1062
1063// FILE *fopen(const char *pathname, const char *mode);
1064func Xfopen(t *TLS, pathname, mode uintptr) uintptr {
1065 return Xfopen64(t, pathname, mode) //TODO 32 bit
1066}
1067
1068func Dmesg(s string, args ...interface{}) {
1069 if dmesgs {
1070 dmesg(s, args...)
1071 }
1072}
1073
1074// void sqlite3_log(int iErrCode, const char *zFormat, ...);
1075func X__ccgo_sqlite3_log(t *TLS, iErrCode int32, zFormat uintptr, args uintptr) {
1076 // if dmesgs {
1077 // dmesg("%v: iErrCode: %v, msg: %s\n%s", origin(1), iErrCode, printf(zFormat, args), debug.Stack())
1078 // }
1079}
1080
1081// int _IO_putc(int __c, _IO_FILE *__fp);
1082func X_IO_putc(t *TLS, c int32, fp uintptr) int32 {
1083 return Xputc(t, c, fp)
1084}
1085
1086// int atexit(void (*function)(void));
1087func Xatexit(t *TLS, function uintptr) int32 {
1088 AtExit(func() {
1089 (*struct{ f func(*TLS) })(unsafe.Pointer(&struct{ uintptr }{function})).f(t)
1090 })
1091 return 0
1092}
1093
1094// int vasprintf(char **strp, const char *fmt, va_list ap);
1095func Xvasprintf(t *TLS, strp, fmt, ap uintptr) int32 {
1096 panic(todo(""))
1097}
1098
1099func AtomicLoadInt32(addr *int32) (val int32) { return atomic.LoadInt32(addr) }
1100func AtomicLoadInt64(addr *int64) (val int64) { return atomic.LoadInt64(addr) }
1101func AtomicLoadUint32(addr *uint32) (val uint32) { return atomic.LoadUint32(addr) }
1102func AtomicLoadUint64(addr *uint64) (val uint64) { return atomic.LoadUint64(addr) }
1103func AtomicLoadUintptr(addr *uintptr) (val uintptr) { return atomic.LoadUintptr(addr) }
1104
1105func AtomicLoadFloat32(addr *float32) (val float32) {
1106 return math.Float32frombits(atomic.LoadUint32((*uint32)(unsafe.Pointer(addr))))
1107}
1108
1109func AtomicLoadFloat64(addr *float64) (val float64) {
1110 return math.Float64frombits(atomic.LoadUint64((*uint64)(unsafe.Pointer(addr))))
1111}
1112
1113func AtomicLoadPInt32(addr uintptr) (val int32) {
1114 return atomic.LoadInt32((*int32)(unsafe.Pointer(addr)))
1115}
1116
1117func AtomicLoadPInt64(addr uintptr) (val int64) {
1118 return atomic.LoadInt64((*int64)(unsafe.Pointer(addr)))
1119}
1120
1121func AtomicLoadPUint32(addr uintptr) (val uint32) {
1122 return atomic.LoadUint32((*uint32)(unsafe.Pointer(addr)))
1123}
1124
1125func AtomicLoadPUint64(addr uintptr) (val uint64) {
1126 return atomic.LoadUint64((*uint64)(unsafe.Pointer(addr)))
1127}
1128
1129func AtomicLoadPUintptr(addr uintptr) (val uintptr) {
1130 return atomic.LoadUintptr((*uintptr)(unsafe.Pointer(addr)))
1131}
1132
1133func AtomicLoadPFloat32(addr uintptr) (val float32) {
1134 return math.Float32frombits(atomic.LoadUint32((*uint32)(unsafe.Pointer(addr))))
1135}
1136
1137func AtomicLoadPFloat64(addr uintptr) (val float64) {
1138 return math.Float64frombits(atomic.LoadUint64((*uint64)(unsafe.Pointer(addr))))
1139}
1140
1141func AtomicStoreInt32(addr *int32, val int32) { atomic.StoreInt32(addr, val) }
1142func AtomicStoreInt64(addr *int64, val int64) { atomic.StoreInt64(addr, val) }
1143func AtomicStoreUint32(addr *uint32, val uint32) { atomic.StoreUint32(addr, val) }
1144func AtomicStoreUint64(addr *uint64, val uint64) { atomic.StoreUint64(addr, val) }
1145func AtomicStoreUintptr(addr *uintptr, val uintptr) { atomic.StoreUintptr(addr, val) }
1146
1147func AtomicStoreFloat32(addr *float32, val float32) {
1148 atomic.StoreUint32((*uint32)(unsafe.Pointer(addr)), math.Float32bits(val))
1149}
1150
1151func AtomicStoreFloat64(addr *float64, val float64) {
1152 atomic.StoreUint64((*uint64)(unsafe.Pointer(addr)), math.Float64bits(val))
1153}
1154
1155func AtomicStorePInt32(addr uintptr, val int32) {
1156 atomic.StoreInt32((*int32)(unsafe.Pointer(addr)), val)
1157}
1158
1159func AtomicStorePInt64(addr uintptr, val int64) {
1160 atomic.StoreInt64((*int64)(unsafe.Pointer(addr)), val)
1161}
1162
1163func AtomicStorePUint32(addr uintptr, val uint32) {
1164 atomic.StoreUint32((*uint32)(unsafe.Pointer(addr)), val)
1165}
1166
1167func AtomicStorePUint64(addr uintptr, val uint64) {
1168 atomic.StoreUint64((*uint64)(unsafe.Pointer(addr)), val)
1169}
1170
1171func AtomicStorePUintptr(addr uintptr, val uintptr) {
1172 atomic.StoreUintptr((*uintptr)(unsafe.Pointer(addr)), val)
1173}
1174
1175func AtomicStorePFloat32(addr uintptr, val float32) {
1176 atomic.StoreUint32((*uint32)(unsafe.Pointer(addr)), math.Float32bits(val))
1177}
1178
1179func AtomicStorePFloat64(addr uintptr, val float64) {
1180 atomic.StoreUint64((*uint64)(unsafe.Pointer(addr)), math.Float64bits(val))
1181}
1182
1183func AtomicAddInt32(addr *int32, delta int32) (new int32) { return atomic.AddInt32(addr, delta) }
1184func AtomicAddInt64(addr *int64, delta int64) (new int64) { return atomic.AddInt64(addr, delta) }
1185func AtomicAddUint32(addr *uint32, delta uint32) (new uint32) { return atomic.AddUint32(addr, delta) }
1186func AtomicAddUint64(addr *uint64, delta uint64) (new uint64) { return atomic.AddUint64(addr, delta) }
1187
1188func AtomicAddUintptr(addr *uintptr, delta uintptr) (new uintptr) {
1189 return atomic.AddUintptr(addr, delta)
1190
1191}
1192
1193func AtomicAddFloat32(addr *float32, delta float32) (new float32) {
1194 v := AtomicLoadFloat32(addr) + delta
1195 AtomicStoreFloat32(addr, v)
1196 return v
1197}
1198
1199func AtomicAddFloat64(addr *float64, delta float64) (new float64) {
1200 v := AtomicLoadFloat64(addr) + delta
1201 AtomicStoreFloat64(addr, v)
1202 return v
1203}
1204
1205// size_t mbstowcs(wchar_t *dest, const char *src, size_t n);
1206func Xmbstowcs(t *TLS, dest, src uintptr, n types.Size_t) types.Size_t {
1207 panic(todo(""))
1208}
1209
1210// int mbtowc(wchar_t *pwc, const char *s, size_t n);
1211func Xmbtowc(t *TLS, pwc, s uintptr, n types.Size_t) int32 {
1212 panic(todo(""))
1213}
1214
1215// size_t __ctype_get_mb_cur_max(void);
1216func X__ctype_get_mb_cur_max(t *TLS) types.Size_t {
1217 panic(todo(""))
1218}
1219
1220// int wctomb(char *s, wchar_t wc);
1221func Xwctomb(t *TLS, s uintptr, wc wchar_t) int32 {
1222 panic(todo(""))
1223}
1224
1225// int mblen(const char *s, size_t n);
1226func Xmblen(t *TLS, s uintptr, n types.Size_t) int32 {
1227 panic(todo(""))
1228}
1229
1230// ssize_t readv(int fd, const struct iovec *iov, int iovcnt);
1231func Xreadv(t *TLS, fd int32, iov uintptr, iovcnt int32) types.Ssize_t {
1232 panic(todo(""))
1233}
1234
1235// int openpty(int *amaster, int *aslave, char *name,
1236//
1237// const struct termios *termp,
1238// const struct winsize *winp);
1239func Xopenpty(t *TLS, amaster, aslave, name, termp, winp uintptr) int32 {
1240 panic(todo(""))
1241}
1242
1243// pid_t setsid(void);
1244func Xsetsid(t *TLS) types.Pid_t {
1245 panic(todo(""))
1246}
1247
1248// int pselect(int nfds, fd_set *readfds, fd_set *writefds,
1249//
1250// fd_set *exceptfds, const struct timespec *timeout,
1251// const sigset_t *sigmask);
1252func Xpselect(t *TLS, nfds int32, readfds, writefds, exceptfds, timeout, sigmask uintptr) int32 {
1253 panic(todo(""))
1254}
1255
1256// int kill(pid_t pid, int sig);
1257func Xkill(t *TLS, pid types.Pid_t, sig int32) int32 {
1258 panic(todo(""))
1259}
1260
1261// int tcsendbreak(int fd, int duration);
1262func Xtcsendbreak(t *TLS, fd, duration int32) int32 {
1263 panic(todo(""))
1264}
1265
1266// int wcwidth(wchar_t c);
1267func Xwcwidth(t *TLS, c wchar_t) int32 {
1268 panic(todo(""))
1269}
1270
1271// int clock_gettime(clockid_t clk_id, struct timespec *tp);
1272func Xclock_gettime(t *TLS, clk_id int32, tp uintptr) int32 {
1273 panic(todo(""))
1274}
1275
1276// AtExit will attempt to run f at process exit. The execution cannot be
1277// guaranteed, neither its ordering with respect to any other handlers
1278// registered by AtExit.
1279func AtExit(f func()) {
1280 atExitMu.Lock()
1281 atExit = append(atExit, f)
1282 atExitMu.Unlock()
1283}
1284
1285func X__ccgo_dmesg(t *TLS, fmt uintptr, va uintptr) {
1286 if dmesgs {
1287 dmesg("%s", printf(fmt, va))
1288 }
1289}
1290
1291// int getentropy(void *buffer, size_t length);
1292//
1293// The getentropy() function writes length bytes of high-quality random data
1294// to the buffer starting at the location pointed to by buffer. The maximum
1295// permitted value for the length argument is 256.
1296func Xgetentropy(t *TLS, buffer uintptr, length size_t) int32 {
1297 const max = 256
1298 switch {
1299 case length == 0:
1300 return 0
1301 case buffer == 0:
1302 t.setErrno(errno.EFAULT)
1303 return -1
1304 case length > max:
1305 t.setErrno(errno.EIO)
1306 return -1
1307 }
1308
1309 if _, err := crand.Read((*RawMem)(unsafe.Pointer(buffer))[:length]); err != nil {
1310 t.setErrno(errno.EIO)
1311 return -1
1312 }
1313
1314 return 0
1315}
1316
1317// void * reallocarray(void *ptr, size_t nmemb, size_t size);
1318func Xreallocarray(t *TLS, ptr uintptr, nmemb, size size_t) uintptr {
1319 hi, lo := mathutil.MulUint128_64(uint64(nmemb), uint64(size))
1320 if hi != 0 || lo > uint64(unsafe.Sizeof(RawMem{})) {
1321 t.setErrno(errno.ENOMEM)
1322 return 0
1323 }
1324
1325 return Xrealloc(t, ptr, size_t(lo))
1326}
1327
1328// int setjmp(jmp_buf env);
1329func Xsetjmp(t *TLS, env uintptr) int32 {
1330 return 0 //TODO
1331}
1332
1333// void longjmp(jmp_buf env, int val);
1334func Xlongjmp(t *TLS, env uintptr, val int32) {
1335 panic(todo(""))
1336}
1337
1338// https://linux.die.net/man/3/_setjmp
1339//
1340// The _longjmp() and _setjmp() functions shall be equivalent to longjmp() and
1341// setjmp(), respectively, with the additional restriction that _longjmp() and
1342// _setjmp() shall not manipulate the signal mask.
1343
1344// int _setjmp(jmp_buf env);
1345func X_setjmp(t *TLS, env uintptr) int32 {
1346 return 0 //TODO
1347}
1348
1349// void _longjmp(jmp_buf env, int val);
1350func X_longjmp(t *TLS, env uintptr, val int32) {
1351 panic(todo(""))
1352}
1353
1354// unsigned __sync_add_and_fetch_uint32(*unsigned, unsigned)
1355func X__sync_add_and_fetch_uint32(t *TLS, p uintptr, v uint32) uint32 {
1356 return atomic.AddUint32((*uint32)(unsafe.Pointer(p)), v)
1357}
1358
1359// unsigned __sync_sub_and_fetch_uint32(*unsigned, unsigned)
1360func X__sync_sub_and_fetch_uint32(t *TLS, p uintptr, v uint32) uint32 {
1361 return atomic.AddUint32((*uint32)(unsafe.Pointer(p)), -v)
1362}
1363
1364// int sched_yield(void);
1365func Xsched_yield(t *TLS) {
1366 runtime.Gosched()
1367}
1368
1369// int getc(FILE *stream);
1370func Xgetc(t *TLS, stream uintptr) int32 {
1371 return Xfgetc(t, stream)
1372}
1373
1374// char *fgets(char *s, int size, FILE *stream);
1375func Xfgets(t *TLS, s uintptr, size int32, stream uintptr) uintptr {
1376 var b []byte
1377out:
1378 for ; size > 0; size-- {
1379 switch c := Xfgetc(t, stream); c {
1380 case '\n':
1381 b = append(b, byte(c))
1382 break out
1383 case stdio.EOF:
1384 break out
1385 default:
1386 b = append(b, byte(c))
1387 }
1388 }
1389 if len(b) == 0 {
1390 return 0
1391 }
1392
1393 b = append(b, 0)
1394 copy((*RawMem)(unsafe.Pointer(s))[:len(b):len(b)], b)
1395 return s
1396}
1397
1398// void bzero(void *s, size_t n);
1399func Xbzero(t *TLS, s uintptr, n types.Size_t) {
1400 b := (*RawMem)(unsafe.Pointer(s))[:n]
1401 for i := range b {
1402 b[i] = 0
1403 }
1404}
1405
1406// char *rindex(const char *s, int c);
1407func Xrindex(t *TLS, s uintptr, c int32) uintptr {
1408 if s == 0 {
1409 return 0
1410 }
1411
1412 var r uintptr
1413 for {
1414 c2 := int32(*(*byte)(unsafe.Pointer(s)))
1415 if c2 == c {
1416 r = s
1417 }
1418
1419 if c2 == 0 {
1420 return r
1421 }
1422
1423 s++
1424 }
1425}
1426
1427// int isascii(int c);
1428func Xisascii(t *TLS, c int32) int32 {
1429 return Bool32(c >= 0 && c <= 0x7f)
1430}
1431
1432func X__builtin_isunordered(t *TLS, a, b float64) int32 {
1433 return Bool32(math.IsNaN(a) || math.IsNaN(b))
1434}
Note: See TracBrowser for help on using the repository browser.