source: code/trunk/vendor/github.com/andybalholm/brotli/ringbuffer.go@ 145

Last change on this file since 145 was 145, checked in by Izuru Yakumo, 22 months ago

Updated the Makefile and vendored depedencies

Signed-off-by: Izuru Yakumo <yakumo.izuru@…>

File size: 4.4 KB
Line 
1package brotli
2
3/* Copyright 2013 Google Inc. All Rights Reserved.
4
5 Distributed under MIT license.
6 See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
7*/
8
9/* A ringBuffer(window_bits, tail_bits) contains `1 << window_bits' bytes of
10 data in a circular manner: writing a byte writes it to:
11 `position() % (1 << window_bits)'.
12 For convenience, the ringBuffer array contains another copy of the
13 first `1 << tail_bits' bytes:
14 buffer_[i] == buffer_[i + (1 << window_bits)], if i < (1 << tail_bits),
15 and another copy of the last two bytes:
16 buffer_[-1] == buffer_[(1 << window_bits) - 1] and
17 buffer_[-2] == buffer_[(1 << window_bits) - 2]. */
18type ringBuffer struct {
19 size_ uint32
20 mask_ uint32
21 tail_size_ uint32
22 total_size_ uint32
23 cur_size_ uint32
24 pos_ uint32
25 data_ []byte
26 buffer_ []byte
27}
28
29func ringBufferInit(rb *ringBuffer) {
30 rb.pos_ = 0
31}
32
33func ringBufferSetup(params *encoderParams, rb *ringBuffer) {
34 var window_bits int = computeRbBits(params)
35 var tail_bits int = params.lgblock
36 *(*uint32)(&rb.size_) = 1 << uint(window_bits)
37 *(*uint32)(&rb.mask_) = (1 << uint(window_bits)) - 1
38 *(*uint32)(&rb.tail_size_) = 1 << uint(tail_bits)
39 *(*uint32)(&rb.total_size_) = rb.size_ + rb.tail_size_
40}
41
42const kSlackForEightByteHashingEverywhere uint = 7
43
44/* Allocates or re-allocates data_ to the given length + plus some slack
45 region before and after. Fills the slack regions with zeros. */
46func ringBufferInitBuffer(buflen uint32, rb *ringBuffer) {
47 var new_data []byte
48 var i uint
49 size := 2 + int(buflen) + int(kSlackForEightByteHashingEverywhere)
50 if cap(rb.data_) < size {
51 new_data = make([]byte, size)
52 } else {
53 new_data = rb.data_[:size]
54 }
55 if rb.data_ != nil {
56 copy(new_data, rb.data_[:2+rb.cur_size_+uint32(kSlackForEightByteHashingEverywhere)])
57 }
58
59 rb.data_ = new_data
60 rb.cur_size_ = buflen
61 rb.buffer_ = rb.data_[2:]
62 rb.data_[1] = 0
63 rb.data_[0] = rb.data_[1]
64 for i = 0; i < kSlackForEightByteHashingEverywhere; i++ {
65 rb.buffer_[rb.cur_size_+uint32(i)] = 0
66 }
67}
68
69func ringBufferWriteTail(bytes []byte, n uint, rb *ringBuffer) {
70 var masked_pos uint = uint(rb.pos_ & rb.mask_)
71 if uint32(masked_pos) < rb.tail_size_ {
72 /* Just fill the tail buffer with the beginning data. */
73 var p uint = uint(rb.size_ + uint32(masked_pos))
74 copy(rb.buffer_[p:], bytes[:brotli_min_size_t(n, uint(rb.tail_size_-uint32(masked_pos)))])
75 }
76}
77
78/* Push bytes into the ring buffer. */
79func ringBufferWrite(bytes []byte, n uint, rb *ringBuffer) {
80 if rb.pos_ == 0 && uint32(n) < rb.tail_size_ {
81 /* Special case for the first write: to process the first block, we don't
82 need to allocate the whole ring-buffer and we don't need the tail
83 either. However, we do this memory usage optimization only if the
84 first write is less than the tail size, which is also the input block
85 size, otherwise it is likely that other blocks will follow and we
86 will need to reallocate to the full size anyway. */
87 rb.pos_ = uint32(n)
88
89 ringBufferInitBuffer(rb.pos_, rb)
90 copy(rb.buffer_, bytes[:n])
91 return
92 }
93
94 if rb.cur_size_ < rb.total_size_ {
95 /* Lazily allocate the full buffer. */
96 ringBufferInitBuffer(rb.total_size_, rb)
97
98 /* Initialize the last two bytes to zero, so that we don't have to worry
99 later when we copy the last two bytes to the first two positions. */
100 rb.buffer_[rb.size_-2] = 0
101
102 rb.buffer_[rb.size_-1] = 0
103 }
104 {
105 var masked_pos uint = uint(rb.pos_ & rb.mask_)
106
107 /* The length of the writes is limited so that we do not need to worry
108 about a write */
109 ringBufferWriteTail(bytes, n, rb)
110
111 if uint32(masked_pos+n) <= rb.size_ {
112 /* A single write fits. */
113 copy(rb.buffer_[masked_pos:], bytes[:n])
114 } else {
115 /* Split into two writes.
116 Copy into the end of the buffer, including the tail buffer. */
117 copy(rb.buffer_[masked_pos:], bytes[:brotli_min_size_t(n, uint(rb.total_size_-uint32(masked_pos)))])
118
119 /* Copy into the beginning of the buffer */
120 copy(rb.buffer_, bytes[rb.size_-uint32(masked_pos):][:uint32(n)-(rb.size_-uint32(masked_pos))])
121 }
122 }
123 {
124 var not_first_lap bool = rb.pos_&(1<<31) != 0
125 var rb_pos_mask uint32 = (1 << 31) - 1
126 rb.data_[0] = rb.buffer_[rb.size_-2]
127 rb.data_[1] = rb.buffer_[rb.size_-1]
128 rb.pos_ = (rb.pos_ & rb_pos_mask) + uint32(uint32(n)&rb_pos_mask)
129 if not_first_lap {
130 /* Wrap, but preserve not-a-first-lap feature. */
131 rb.pos_ |= 1 << 31
132 }
133 }
134}
Note: See TracBrowser for help on using the repository browser.