1 | // Copyright 2017 The Memory Authors. All rights reserved.
|
---|
2 | // Use of this source code is governed by a BSD-style
|
---|
3 | // license that can be found in the LICENSE file.
|
---|
4 |
|
---|
5 | // Package memory implements a memory allocator.
|
---|
6 | //
|
---|
7 | // Build status
|
---|
8 | //
|
---|
9 | // available at https://modern-c.appspot.com/-/builder/?importpath=modernc.org%2fmemory
|
---|
10 | //
|
---|
11 | // Changelog
|
---|
12 | //
|
---|
13 | // 2017-10-03 Added alternative, unsafe.Pointer-based API.
|
---|
14 | //
|
---|
15 | // Package memory implements a memory allocator.
|
---|
16 | //
|
---|
17 | // Changelog
|
---|
18 | //
|
---|
19 | // 2017-10-03 Added alternative, unsafe.Pointer-based API.
|
---|
20 | //
|
---|
21 | // Benchmarks
|
---|
22 | //
|
---|
23 | // AMD Ryzen 9 3900X 12-Core Processor × 24
|
---|
24 | //
|
---|
25 | // jnml@3900x:~/src/modernc.org/memory$ date ; go version ; go test -run @ -bench . -benchmem |& tee log
|
---|
26 | // Fri Nov 20 17:23:04 CET 2020
|
---|
27 | // go version go1.15.5 linux/amd64
|
---|
28 | // goos: linux
|
---|
29 | // goarch: amd64
|
---|
30 | // pkg: modernc.org/memory
|
---|
31 | // BenchmarkFree16-24 141188362 8.26 ns/op 0 B/op 0 allocs/op
|
---|
32 | // BenchmarkFree32-24 100000000 11.4 ns/op 0 B/op 0 allocs/op
|
---|
33 | // BenchmarkFree64-24 67160647 18.3 ns/op 0 B/op 0 allocs/op
|
---|
34 | // BenchmarkCalloc16-24 60612698 19.8 ns/op 0 B/op 0 allocs/op
|
---|
35 | // BenchmarkCalloc32-24 47968105 23.8 ns/op 0 B/op 0 allocs/op
|
---|
36 | // BenchmarkCalloc64-24 40752181 28.6 ns/op 0 B/op 0 allocs/op
|
---|
37 | // BenchmarkGoCalloc16-24 66487354 17.8 ns/op 16 B/op 1 allocs/op
|
---|
38 | // BenchmarkGoCalloc32-24 56009206 21.2 ns/op 32 B/op 1 allocs/op
|
---|
39 | // BenchmarkGoCalloc64-24 52086571 23.4 ns/op 64 B/op 1 allocs/op
|
---|
40 | // BenchmarkMalloc16-24 113943390 10.2 ns/op 0 B/op 0 allocs/op
|
---|
41 | // BenchmarkMalloc32-24 113520471 10.2 ns/op 0 B/op 0 allocs/op
|
---|
42 | // BenchmarkMalloc64-24 108787056 10.7 ns/op 0 B/op 0 allocs/op
|
---|
43 | // BenchmarkUintptrFree16-24 146110286 7.94 ns/op 0 B/op 0 allocs/op
|
---|
44 | // BenchmarkUintptrFree32-24 93052707 12.0 ns/op 0 B/op 0 allocs/op
|
---|
45 | // BenchmarkUintptrFree64-24 69805262 17.3 ns/op 0 B/op 0 allocs/op
|
---|
46 | // BenchmarkUintptrCalloc16-24 85282725 13.7 ns/op 0 B/op 0 allocs/op
|
---|
47 | // BenchmarkUintptrCalloc32-24 66489789 17.9 ns/op 0 B/op 0 allocs/op
|
---|
48 | // BenchmarkUintptrCalloc64-24 53561092 22.7 ns/op 0 B/op 0 allocs/op
|
---|
49 | // BenchmarkUintptrMalloc16-24 222978858 5.28 ns/op 0 B/op 0 allocs/op
|
---|
50 | // BenchmarkUintptrMalloc32-24 210443384 5.30 ns/op 0 B/op 0 allocs/op
|
---|
51 | // BenchmarkUintptrMalloc64-24 213706227 5.47 ns/op 0 B/op 0 allocs/op
|
---|
52 | // PASS
|
---|
53 | // ok modernc.org/memory 70.528s
|
---|
54 | // jnml@3900x:~/src/modernc.org/memory$
|
---|
55 | //
|
---|
56 | // Intel® Core™ i5-4670 CPU @ 3.40GHz × 4
|
---|
57 | //
|
---|
58 | // ==== jnml@4670:~/src/modernc.org/memory> date ; go version ; go test -run @ -bench . -benchmem |& tee log
|
---|
59 | // Sat Dec 8 12:56:53 CET 2018
|
---|
60 | // go version go1.11.2 linux/amd64
|
---|
61 | // goos: linux
|
---|
62 | // goarch: amd64
|
---|
63 | // pkg: modernc.org/memory
|
---|
64 | // BenchmarkFree16-4 100000000 14.7 ns/op 0 B/op 0 allocs/op
|
---|
65 | // BenchmarkFree32-4 100000000 20.5 ns/op 0 B/op 0 allocs/op
|
---|
66 | // BenchmarkFree64-4 50000000 32.8 ns/op 0 B/op 0 allocs/op
|
---|
67 | // BenchmarkCalloc16-4 50000000 24.4 ns/op 0 B/op 0 allocs/op
|
---|
68 | // BenchmarkCalloc32-4 50000000 29.2 ns/op 0 B/op 0 allocs/op
|
---|
69 | // BenchmarkCalloc64-4 50000000 35.7 ns/op 0 B/op 0 allocs/op
|
---|
70 | // BenchmarkGoCalloc16-4 50000000 27.0 ns/op 16 B/op 1 allocs/op
|
---|
71 | // BenchmarkGoCalloc32-4 50000000 27.3 ns/op 32 B/op 1 allocs/op
|
---|
72 | // BenchmarkGoCalloc64-4 30000000 37.9 ns/op 64 B/op 1 allocs/op
|
---|
73 | // BenchmarkMalloc16-4 100000000 12.9 ns/op 0 B/op 0 allocs/op
|
---|
74 | // BenchmarkMalloc32-4 100000000 12.9 ns/op 0 B/op 0 allocs/op
|
---|
75 | // BenchmarkMalloc64-4 100000000 13.2 ns/op 0 B/op 0 allocs/op
|
---|
76 | // BenchmarkUintptrFree16-4 100000000 12.0 ns/op 0 B/op 0 allocs/op
|
---|
77 | // BenchmarkUintptrFree32-4 100000000 17.5 ns/op 0 B/op 0 allocs/op
|
---|
78 | // BenchmarkUintptrFree64-4 50000000 28.9 ns/op 0 B/op 0 allocs/op
|
---|
79 | // BenchmarkUintptrCalloc16-4 100000000 17.8 ns/op 0 B/op 0 allocs/op
|
---|
80 | // BenchmarkUintptrCalloc32-4 100000000 22.9 ns/op 0 B/op 0 allocs/op
|
---|
81 | // BenchmarkUintptrCalloc64-4 50000000 29.6 ns/op 0 B/op 0 allocs/op
|
---|
82 | // BenchmarkUintptrMalloc16-4 200000000 7.31 ns/op 0 B/op 0 allocs/op
|
---|
83 | // BenchmarkUintptrMalloc32-4 200000000 7.47 ns/op 0 B/op 0 allocs/op
|
---|
84 | // BenchmarkUintptrMalloc64-4 200000000 7.68 ns/op 0 B/op 0 allocs/op
|
---|
85 | // PASS
|
---|
86 | // ok modernc.org/memory 73.859s
|
---|
87 | // //
|
---|
88 | // Intel® Xeon(R) CPU E5-1650 v2 @ 3.50GHz × 12
|
---|
89 | //
|
---|
90 | // ==== jnml@e5-1650:~/src/modernc.org/memory> date ; go version ; go test -run @ -bench . -benchmem
|
---|
91 | // Fri Dec 7 14:18:50 CET 2018
|
---|
92 | // go version go1.11.2 linux/amd64
|
---|
93 | // goos: linux
|
---|
94 | // goarch: amd64
|
---|
95 | // pkg: modernc.org/memory
|
---|
96 | // BenchmarkFree16-12 100000000 16.7 ns/op 0 B/op 0 allocs/op
|
---|
97 | // BenchmarkFree32-12 50000000 25.0 ns/op 0 B/op 0 allocs/op
|
---|
98 | // BenchmarkFree64-12 30000000 39.7 ns/op 0 B/op 0 allocs/op
|
---|
99 | // BenchmarkCalloc16-12 50000000 26.3 ns/op 0 B/op 0 allocs/op
|
---|
100 | // BenchmarkCalloc32-12 50000000 33.4 ns/op 0 B/op 0 allocs/op
|
---|
101 | // BenchmarkCalloc64-12 30000000 38.3 ns/op 0 B/op 0 allocs/op
|
---|
102 | // BenchmarkGoCalloc16-12 50000000 26.6 ns/op 16 B/op 1 allocs/op
|
---|
103 | // BenchmarkGoCalloc32-12 50000000 26.8 ns/op 32 B/op 1 allocs/op
|
---|
104 | // BenchmarkGoCalloc64-12 30000000 35.1 ns/op 64 B/op 1 allocs/op
|
---|
105 | // BenchmarkMalloc16-12 100000000 13.5 ns/op 0 B/op 0 allocs/op
|
---|
106 | // BenchmarkMalloc32-12 100000000 13.4 ns/op 0 B/op 0 allocs/op
|
---|
107 | // BenchmarkMalloc64-12 100000000 14.1 ns/op 0 B/op 0 allocs/op
|
---|
108 | // BenchmarkUintptrFree16-12 100000000 14.4 ns/op 0 B/op 0 allocs/op
|
---|
109 | // BenchmarkUintptrFree32-12 100000000 21.7 ns/op 0 B/op 0 allocs/op
|
---|
110 | // BenchmarkUintptrFree64-12 50000000 36.7 ns/op 0 B/op 0 allocs/op
|
---|
111 | // BenchmarkUintptrCalloc16-12 100000000 20.4 ns/op 0 B/op 0 allocs/op
|
---|
112 | // BenchmarkUintptrCalloc32-12 50000000 27.1 ns/op 0 B/op 0 allocs/op
|
---|
113 | // BenchmarkUintptrCalloc64-12 50000000 33.4 ns/op 0 B/op 0 allocs/op
|
---|
114 | // BenchmarkUintptrMalloc16-12 200000000 8.02 ns/op 0 B/op 0 allocs/op
|
---|
115 | // BenchmarkUintptrMalloc32-12 200000000 8.28 ns/op 0 B/op 0 allocs/op
|
---|
116 | // BenchmarkUintptrMalloc64-12 200000000 8.29 ns/op 0 B/op 0 allocs/op
|
---|
117 | // PASS
|
---|
118 | // ok modernc.org/memory 80.896s
|
---|
119 | package memory // import "modernc.org/memory"
|
---|
120 |
|
---|
121 | import (
|
---|
122 | "fmt"
|
---|
123 | "math/bits"
|
---|
124 | "os"
|
---|
125 | "reflect"
|
---|
126 | "unsafe"
|
---|
127 | )
|
---|
128 |
|
---|
129 | const (
|
---|
130 | headerSize = unsafe.Sizeof(page{})
|
---|
131 | mallocAllign = 2 * unsafe.Sizeof(uintptr(0))
|
---|
132 | maxSlotSize = 1 << maxSlotSizeLog
|
---|
133 | maxSlotSizeLog = pageSizeLog - 2
|
---|
134 | pageAvail = pageSize - headerSize
|
---|
135 | pageMask = pageSize - 1
|
---|
136 | pageSize = 1 << pageSizeLog
|
---|
137 | )
|
---|
138 |
|
---|
139 | func init() {
|
---|
140 | if unsafe.Sizeof(page{})%mallocAllign != 0 {
|
---|
141 | panic("internal error")
|
---|
142 | }
|
---|
143 | }
|
---|
144 |
|
---|
145 | // if n%m != 0 { n += m-n%m }. m must be a power of 2.
|
---|
146 | func roundup(n, m int) int { return (n + m - 1) &^ (m - 1) }
|
---|
147 |
|
---|
148 | type node struct {
|
---|
149 | prev, next uintptr // *node
|
---|
150 | }
|
---|
151 |
|
---|
152 | type page struct {
|
---|
153 | brk int
|
---|
154 | log uint
|
---|
155 | size int
|
---|
156 | used int
|
---|
157 | }
|
---|
158 |
|
---|
159 | // Allocator allocates and frees memory. Its zero value is ready for use. The
|
---|
160 | // exported counters are updated only when build tag memory.counters is
|
---|
161 | // present.
|
---|
162 | type Allocator struct {
|
---|
163 | Allocs int // # of allocs.
|
---|
164 | Bytes int // Asked from OS.
|
---|
165 | cap [64]int
|
---|
166 | lists [64]uintptr // *node
|
---|
167 | Mmaps int // Asked from OS.
|
---|
168 | pages [64]uintptr // *page
|
---|
169 | regs map[uintptr]struct{} // map[*page]struct{}
|
---|
170 | }
|
---|
171 |
|
---|
172 | func (a *Allocator) mmap(size int) (uintptr /* *page */, error) {
|
---|
173 | p, size, err := mmap(size)
|
---|
174 | if err != nil {
|
---|
175 | return 0, err
|
---|
176 | }
|
---|
177 |
|
---|
178 | if counters {
|
---|
179 | a.Mmaps++
|
---|
180 | a.Bytes += size
|
---|
181 | }
|
---|
182 | if a.regs == nil {
|
---|
183 | a.regs = map[uintptr]struct{}{}
|
---|
184 | }
|
---|
185 | (*page)(unsafe.Pointer(p)).size = size
|
---|
186 | a.regs[p] = struct{}{}
|
---|
187 | return p, nil
|
---|
188 | }
|
---|
189 |
|
---|
190 | func (a *Allocator) newPage(size int) (uintptr /* *page */, error) {
|
---|
191 | size += int(headerSize)
|
---|
192 | p, err := a.mmap(size)
|
---|
193 | if err != nil {
|
---|
194 | return 0, err
|
---|
195 | }
|
---|
196 |
|
---|
197 | (*page)(unsafe.Pointer(p)).log = 0
|
---|
198 | return p, nil
|
---|
199 | }
|
---|
200 |
|
---|
201 | func (a *Allocator) newSharedPage(log uint) (uintptr /* *page */, error) {
|
---|
202 | if a.cap[log] == 0 {
|
---|
203 | a.cap[log] = int(pageAvail) / (1 << log)
|
---|
204 | }
|
---|
205 | size := int(headerSize) + a.cap[log]<<log
|
---|
206 | p, err := a.mmap(size)
|
---|
207 | if err != nil {
|
---|
208 | return 0, err
|
---|
209 | }
|
---|
210 |
|
---|
211 | a.pages[log] = p
|
---|
212 | (*page)(unsafe.Pointer(p)).log = log
|
---|
213 | return p, nil
|
---|
214 | }
|
---|
215 |
|
---|
216 | func (a *Allocator) unmap(p uintptr /* *page */) error {
|
---|
217 | delete(a.regs, p)
|
---|
218 | if counters {
|
---|
219 | a.Mmaps--
|
---|
220 | }
|
---|
221 | return unmap(p, (*page)(unsafe.Pointer(p)).size)
|
---|
222 | }
|
---|
223 |
|
---|
224 | // UintptrCalloc is like Calloc except it returns an uintptr.
|
---|
225 | func (a *Allocator) UintptrCalloc(size int) (r uintptr, err error) {
|
---|
226 | if trace {
|
---|
227 | defer func() {
|
---|
228 | fmt.Fprintf(os.Stderr, "Calloc(%#x) %#x, %v\n", size, r, err)
|
---|
229 | }()
|
---|
230 | }
|
---|
231 | if r, err = a.UintptrMalloc(size); r == 0 || err != nil {
|
---|
232 | return 0, err
|
---|
233 | }
|
---|
234 | b := ((*rawmem)(unsafe.Pointer(r)))[:size:size]
|
---|
235 | for i := range b {
|
---|
236 | b[i] = 0
|
---|
237 | }
|
---|
238 | return r, nil
|
---|
239 | }
|
---|
240 |
|
---|
241 | // UintptrFree is like Free except its argument is an uintptr, which must have
|
---|
242 | // been acquired from UintptrCalloc or UintptrMalloc or UintptrRealloc.
|
---|
243 | func (a *Allocator) UintptrFree(p uintptr) (err error) {
|
---|
244 | if trace {
|
---|
245 | defer func() {
|
---|
246 | fmt.Fprintf(os.Stderr, "Free(%#x) %v\n", p, err)
|
---|
247 | }()
|
---|
248 | }
|
---|
249 | if p == 0 {
|
---|
250 | return nil
|
---|
251 | }
|
---|
252 |
|
---|
253 | if counters {
|
---|
254 | a.Allocs--
|
---|
255 | }
|
---|
256 | pg := p &^ uintptr(pageMask)
|
---|
257 | log := (*page)(unsafe.Pointer(pg)).log
|
---|
258 | if log == 0 {
|
---|
259 | if counters {
|
---|
260 | a.Bytes -= (*page)(unsafe.Pointer(pg)).size
|
---|
261 | }
|
---|
262 | return a.unmap(pg)
|
---|
263 | }
|
---|
264 |
|
---|
265 | (*node)(unsafe.Pointer(p)).prev = 0
|
---|
266 | (*node)(unsafe.Pointer(p)).next = a.lists[log]
|
---|
267 | if next := (*node)(unsafe.Pointer(p)).next; next != 0 {
|
---|
268 | (*node)(unsafe.Pointer(next)).prev = p
|
---|
269 | }
|
---|
270 | a.lists[log] = p
|
---|
271 | (*page)(unsafe.Pointer(pg)).used--
|
---|
272 | if (*page)(unsafe.Pointer(pg)).used != 0 {
|
---|
273 | return nil
|
---|
274 | }
|
---|
275 |
|
---|
276 | for i := 0; i < (*page)(unsafe.Pointer(pg)).brk; i++ {
|
---|
277 | n := pg + headerSize + uintptr(i)<<log
|
---|
278 | next := (*node)(unsafe.Pointer(n)).next
|
---|
279 | prev := (*node)(unsafe.Pointer(n)).prev
|
---|
280 | switch {
|
---|
281 | case prev == 0:
|
---|
282 | a.lists[log] = next
|
---|
283 | if next != 0 {
|
---|
284 | (*node)(unsafe.Pointer(next)).prev = 0
|
---|
285 | }
|
---|
286 | case next == 0:
|
---|
287 | (*node)(unsafe.Pointer(prev)).next = 0
|
---|
288 | default:
|
---|
289 | (*node)(unsafe.Pointer(prev)).next = next
|
---|
290 | (*node)(unsafe.Pointer(next)).prev = prev
|
---|
291 | }
|
---|
292 | }
|
---|
293 |
|
---|
294 | if a.pages[log] == pg {
|
---|
295 | a.pages[log] = 0
|
---|
296 | }
|
---|
297 | if counters {
|
---|
298 | a.Bytes -= (*page)(unsafe.Pointer(pg)).size
|
---|
299 | }
|
---|
300 | return a.unmap(pg)
|
---|
301 | }
|
---|
302 |
|
---|
303 | // UintptrMalloc is like Malloc except it returns an uinptr.
|
---|
304 | func (a *Allocator) UintptrMalloc(size int) (r uintptr, err error) {
|
---|
305 | if trace {
|
---|
306 | defer func() {
|
---|
307 | fmt.Fprintf(os.Stderr, "Malloc(%#x) %#x, %v\n", size, r, err)
|
---|
308 | }()
|
---|
309 | }
|
---|
310 | if size < 0 {
|
---|
311 | panic("invalid malloc size")
|
---|
312 | }
|
---|
313 |
|
---|
314 | if size == 0 {
|
---|
315 | return 0, nil
|
---|
316 | }
|
---|
317 |
|
---|
318 | if counters {
|
---|
319 | a.Allocs++
|
---|
320 | }
|
---|
321 | log := uint(bits.Len(uint((size+int(mallocAllign)-1)&^int(mallocAllign-1) - 1)))
|
---|
322 | if log > maxSlotSizeLog {
|
---|
323 | p, err := a.newPage(size)
|
---|
324 | if err != nil {
|
---|
325 | return 0, err
|
---|
326 | }
|
---|
327 |
|
---|
328 | return p + headerSize, nil
|
---|
329 | }
|
---|
330 |
|
---|
331 | if a.lists[log] == 0 && a.pages[log] == 0 {
|
---|
332 | if _, err := a.newSharedPage(log); err != nil {
|
---|
333 | return 0, err
|
---|
334 | }
|
---|
335 | }
|
---|
336 |
|
---|
337 | if p := a.pages[log]; p != 0 {
|
---|
338 | (*page)(unsafe.Pointer(p)).used++
|
---|
339 | (*page)(unsafe.Pointer(p)).brk++
|
---|
340 | if (*page)(unsafe.Pointer(p)).brk == a.cap[log] {
|
---|
341 | a.pages[log] = 0
|
---|
342 | }
|
---|
343 | return p + headerSize + uintptr((*page)(unsafe.Pointer(p)).brk-1)<<log, nil
|
---|
344 | }
|
---|
345 |
|
---|
346 | n := a.lists[log]
|
---|
347 | p := n &^ uintptr(pageMask)
|
---|
348 | a.lists[log] = (*node)(unsafe.Pointer(n)).next
|
---|
349 | if next := (*node)(unsafe.Pointer(n)).next; next != 0 {
|
---|
350 | (*node)(unsafe.Pointer(next)).prev = 0
|
---|
351 | }
|
---|
352 | (*page)(unsafe.Pointer(p)).used++
|
---|
353 | return n, nil
|
---|
354 | }
|
---|
355 |
|
---|
356 | // UintptrRealloc is like Realloc except its first argument is an uintptr,
|
---|
357 | // which must have been returned from UintptrCalloc, UintptrMalloc or
|
---|
358 | // UintptrRealloc.
|
---|
359 | func (a *Allocator) UintptrRealloc(p uintptr, size int) (r uintptr, err error) {
|
---|
360 | if trace {
|
---|
361 | defer func() {
|
---|
362 | fmt.Fprintf(os.Stderr, "UnsafeRealloc(%#x, %#x) %#x, %v\n", p, size, r, err)
|
---|
363 | }()
|
---|
364 | }
|
---|
365 | switch {
|
---|
366 | case p == 0:
|
---|
367 | return a.UintptrMalloc(size)
|
---|
368 | case size == 0 && p != 0:
|
---|
369 | return 0, a.UintptrFree(p)
|
---|
370 | }
|
---|
371 |
|
---|
372 | us := UintptrUsableSize(p)
|
---|
373 | if us > size {
|
---|
374 | return p, nil
|
---|
375 | }
|
---|
376 |
|
---|
377 | if r, err = a.UintptrMalloc(size); err != nil {
|
---|
378 | return 0, err
|
---|
379 | }
|
---|
380 |
|
---|
381 | if us < size {
|
---|
382 | size = us
|
---|
383 | }
|
---|
384 | copy((*rawmem)(unsafe.Pointer(r))[:size:size], (*rawmem)(unsafe.Pointer(p))[:size:size])
|
---|
385 | return r, a.UintptrFree(p)
|
---|
386 | }
|
---|
387 |
|
---|
388 | // UintptrUsableSize is like UsableSize except its argument is an uintptr,
|
---|
389 | // which must have been returned from UintptrCalloc, UintptrMalloc or
|
---|
390 | // UintptrRealloc.
|
---|
391 | func UintptrUsableSize(p uintptr) (r int) {
|
---|
392 | if trace {
|
---|
393 | defer func() {
|
---|
394 | fmt.Fprintf(os.Stderr, "UsableSize(%#x) %#x\n", p, r)
|
---|
395 | }()
|
---|
396 | }
|
---|
397 | if p == 0 {
|
---|
398 | return 0
|
---|
399 | }
|
---|
400 |
|
---|
401 | return usableSize(p)
|
---|
402 | }
|
---|
403 |
|
---|
404 | func usableSize(p uintptr) (r int) {
|
---|
405 | pg := p &^ uintptr(pageMask)
|
---|
406 | if log := (*page)(unsafe.Pointer(pg)).log; log != 0 {
|
---|
407 | return 1 << log
|
---|
408 | }
|
---|
409 |
|
---|
410 | return (*page)(unsafe.Pointer(pg)).size - int(headerSize)
|
---|
411 | }
|
---|
412 |
|
---|
413 | // Calloc is like Malloc except the allocated memory is zeroed.
|
---|
414 | func (a *Allocator) Calloc(size int) (r []byte, err error) {
|
---|
415 | p, err := a.UintptrCalloc(size)
|
---|
416 | if err != nil {
|
---|
417 | return nil, err
|
---|
418 | }
|
---|
419 |
|
---|
420 | var b []byte
|
---|
421 | sh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
|
---|
422 | sh.Cap = usableSize(p)
|
---|
423 | sh.Data = p
|
---|
424 | sh.Len = size
|
---|
425 | return b, nil
|
---|
426 | }
|
---|
427 |
|
---|
428 | // Close releases all OS resources used by a and sets it to its zero value.
|
---|
429 | //
|
---|
430 | // It's not necessary to Close the Allocator when exiting a process.
|
---|
431 | func (a *Allocator) Close() (err error) {
|
---|
432 | for p := range a.regs {
|
---|
433 | if e := a.unmap(p); e != nil && err == nil {
|
---|
434 | err = e
|
---|
435 | }
|
---|
436 | }
|
---|
437 | *a = Allocator{}
|
---|
438 | return err
|
---|
439 | }
|
---|
440 |
|
---|
441 | // Free deallocates memory (as in C.free). The argument of Free must have been
|
---|
442 | // acquired from Calloc or Malloc or Realloc.
|
---|
443 | func (a *Allocator) Free(b []byte) (err error) {
|
---|
444 | if b = b[:cap(b)]; len(b) == 0 {
|
---|
445 | return nil
|
---|
446 | }
|
---|
447 |
|
---|
448 | return a.UintptrFree(uintptr(unsafe.Pointer(&b[0])))
|
---|
449 | }
|
---|
450 |
|
---|
451 | // Malloc allocates size bytes and returns a byte slice of the allocated
|
---|
452 | // memory. The memory is not initialized. Malloc panics for size < 0 and
|
---|
453 | // returns (nil, nil) for zero size.
|
---|
454 | //
|
---|
455 | // It's ok to reslice the returned slice but the result of appending to it
|
---|
456 | // cannot be passed to Free or Realloc as it may refer to a different backing
|
---|
457 | // array afterwards.
|
---|
458 | func (a *Allocator) Malloc(size int) (r []byte, err error) {
|
---|
459 | p, err := a.UintptrMalloc(size)
|
---|
460 | if p == 0 || err != nil {
|
---|
461 | return nil, err
|
---|
462 | }
|
---|
463 |
|
---|
464 | sh := (*reflect.SliceHeader)(unsafe.Pointer(&r))
|
---|
465 | sh.Cap = usableSize(p)
|
---|
466 | sh.Data = p
|
---|
467 | sh.Len = size
|
---|
468 | return r, nil
|
---|
469 | }
|
---|
470 |
|
---|
471 | // Realloc changes the size of the backing array of b to size bytes or returns
|
---|
472 | // an error, if any. The contents will be unchanged in the range from the
|
---|
473 | // start of the region up to the minimum of the old and new sizes. If the
|
---|
474 | // new size is larger than the old size, the added memory will not be
|
---|
475 | // initialized. If b's backing array is of zero size, then the call is
|
---|
476 | // equivalent to Malloc(size), for all values of size; if size is equal to
|
---|
477 | // zero, and b's backing array is not of zero size, then the call is equivalent
|
---|
478 | // to Free(b). Unless b's backing array is of zero size, it must have been
|
---|
479 | // returned by an earlier call to Malloc, Calloc or Realloc. If the area
|
---|
480 | // pointed to was moved, a Free(b) is done.
|
---|
481 | func (a *Allocator) Realloc(b []byte, size int) (r []byte, err error) {
|
---|
482 | var p uintptr
|
---|
483 | if b = b[:cap(b)]; len(b) != 0 {
|
---|
484 | p = uintptr(unsafe.Pointer(&b[0]))
|
---|
485 | }
|
---|
486 | if p, err = a.UintptrRealloc(p, size); p == 0 || err != nil {
|
---|
487 | return nil, err
|
---|
488 | }
|
---|
489 |
|
---|
490 | sh := (*reflect.SliceHeader)(unsafe.Pointer(&r))
|
---|
491 | sh.Cap = usableSize(p)
|
---|
492 | sh.Data = p
|
---|
493 | sh.Len = size
|
---|
494 | return r, nil
|
---|
495 | }
|
---|
496 |
|
---|
497 | // UsableSize reports the size of the memory block allocated at p, which must
|
---|
498 | // point to the first byte of a slice returned from Calloc, Malloc or Realloc.
|
---|
499 | // The allocated memory block size can be larger than the size originally
|
---|
500 | // requested from Calloc, Malloc or Realloc.
|
---|
501 | func UsableSize(p *byte) (r int) { return UintptrUsableSize(uintptr(unsafe.Pointer(p))) }
|
---|
502 |
|
---|
503 | // UnsafeCalloc is like Calloc except it returns an unsafe.Pointer.
|
---|
504 | func (a *Allocator) UnsafeCalloc(size int) (r unsafe.Pointer, err error) {
|
---|
505 | p, err := a.UintptrCalloc(size)
|
---|
506 | if err != nil {
|
---|
507 | return nil, err
|
---|
508 | }
|
---|
509 |
|
---|
510 | return unsafe.Pointer(p), nil
|
---|
511 | }
|
---|
512 |
|
---|
513 | // UnsafeFree is like Free except its argument is an unsafe.Pointer, which must
|
---|
514 | // have been acquired from UnsafeCalloc or UnsafeMalloc or UnsafeRealloc.
|
---|
515 | func (a *Allocator) UnsafeFree(p unsafe.Pointer) (err error) { return a.UintptrFree(uintptr(p)) }
|
---|
516 |
|
---|
517 | // UnsafeMalloc is like Malloc except it returns an unsafe.Pointer.
|
---|
518 | func (a *Allocator) UnsafeMalloc(size int) (r unsafe.Pointer, err error) {
|
---|
519 | p, err := a.UintptrMalloc(size)
|
---|
520 | if err != nil {
|
---|
521 | return nil, err
|
---|
522 | }
|
---|
523 |
|
---|
524 | return unsafe.Pointer(p), nil
|
---|
525 | }
|
---|
526 |
|
---|
527 | // UnsafeRealloc is like Realloc except its first argument is an
|
---|
528 | // unsafe.Pointer, which must have been returned from UnsafeCalloc,
|
---|
529 | // UnsafeMalloc or UnsafeRealloc.
|
---|
530 | func (a *Allocator) UnsafeRealloc(p unsafe.Pointer, size int) (r unsafe.Pointer, err error) {
|
---|
531 | q, err := a.UintptrRealloc(uintptr(p), size)
|
---|
532 | if err != nil {
|
---|
533 | return nil, err
|
---|
534 | }
|
---|
535 |
|
---|
536 | return unsafe.Pointer(q), nil
|
---|
537 | }
|
---|
538 |
|
---|
539 | // UnsafeUsableSize is like UsableSize except its argument is an
|
---|
540 | // unsafe.Pointer, which must have been returned from UnsafeCalloc,
|
---|
541 | // UnsafeMalloc or UnsafeRealloc.
|
---|
542 | func UnsafeUsableSize(p unsafe.Pointer) (r int) { return UintptrUsableSize(uintptr(p)) }
|
---|