source: code/trunk/vendor/github.com/klauspost/compress/flate/level4.go@ 824

Last change on this file since 824 was 822, checked in by yakumo.izuru, 22 months ago

Prefer immortal.run over runit and rc.d, use vendored modules
for convenience.

Signed-off-by: Izuru Yakumo <yakumo.izuru@…>

File size: 5.1 KB
Line 
1package flate
2
3import "fmt"
4
5type fastEncL4 struct {
6 fastGen
7 table [tableSize]tableEntry
8 bTable [tableSize]tableEntry
9}
10
11func (e *fastEncL4) Encode(dst *tokens, src []byte) {
12 const (
13 inputMargin = 12 - 1
14 minNonLiteralBlockSize = 1 + 1 + inputMargin
15 )
16 if debugDeflate && e.cur < 0 {
17 panic(fmt.Sprint("e.cur < 0: ", e.cur))
18 }
19 // Protect against e.cur wraparound.
20 for e.cur >= bufferReset {
21 if len(e.hist) == 0 {
22 for i := range e.table[:] {
23 e.table[i] = tableEntry{}
24 }
25 for i := range e.bTable[:] {
26 e.bTable[i] = tableEntry{}
27 }
28 e.cur = maxMatchOffset
29 break
30 }
31 // Shift down everything in the table that isn't already too far away.
32 minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
33 for i := range e.table[:] {
34 v := e.table[i].offset
35 if v <= minOff {
36 v = 0
37 } else {
38 v = v - e.cur + maxMatchOffset
39 }
40 e.table[i].offset = v
41 }
42 for i := range e.bTable[:] {
43 v := e.bTable[i].offset
44 if v <= minOff {
45 v = 0
46 } else {
47 v = v - e.cur + maxMatchOffset
48 }
49 e.bTable[i].offset = v
50 }
51 e.cur = maxMatchOffset
52 }
53
54 s := e.addBlock(src)
55
56 // This check isn't in the Snappy implementation, but there, the caller
57 // instead of the callee handles this case.
58 if len(src) < minNonLiteralBlockSize {
59 // We do not fill the token table.
60 // This will be picked up by caller.
61 dst.n = uint16(len(src))
62 return
63 }
64
65 // Override src
66 src = e.hist
67 nextEmit := s
68
69 // sLimit is when to stop looking for offset/length copies. The inputMargin
70 // lets us use a fast path for emitLiteral in the main loop, while we are
71 // looking for copies.
72 sLimit := int32(len(src) - inputMargin)
73
74 // nextEmit is where in src the next emitLiteral should start from.
75 cv := load6432(src, s)
76 for {
77 const skipLog = 6
78 const doEvery = 1
79
80 nextS := s
81 var t int32
82 for {
83 nextHashS := hash4x64(cv, tableBits)
84 nextHashL := hash7(cv, tableBits)
85
86 s = nextS
87 nextS = s + doEvery + (s-nextEmit)>>skipLog
88 if nextS > sLimit {
89 goto emitRemainder
90 }
91 // Fetch a short+long candidate
92 sCandidate := e.table[nextHashS]
93 lCandidate := e.bTable[nextHashL]
94 next := load6432(src, nextS)
95 entry := tableEntry{offset: s + e.cur}
96 e.table[nextHashS] = entry
97 e.bTable[nextHashL] = entry
98
99 t = lCandidate.offset - e.cur
100 if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.offset-e.cur) {
101 // We got a long match. Use that.
102 break
103 }
104
105 t = sCandidate.offset - e.cur
106 if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) {
107 // Found a 4 match...
108 lCandidate = e.bTable[hash7(next, tableBits)]
109
110 // If the next long is a candidate, check if we should use that instead...
111 lOff := nextS - (lCandidate.offset - e.cur)
112 if lOff < maxMatchOffset && load3232(src, lCandidate.offset-e.cur) == uint32(next) {
113 l1, l2 := matchLen(src[s+4:], src[t+4:]), matchLen(src[nextS+4:], src[nextS-lOff+4:])
114 if l2 > l1 {
115 s = nextS
116 t = lCandidate.offset - e.cur
117 }
118 }
119 break
120 }
121 cv = next
122 }
123
124 // A 4-byte match has been found. We'll later see if more than 4 bytes
125 // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
126 // them as literal bytes.
127
128 // Extend the 4-byte match as long as possible.
129 l := e.matchlenLong(s+4, t+4, src) + 4
130
131 // Extend backwards
132 for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
133 s--
134 t--
135 l++
136 }
137 if nextEmit < s {
138 emitLiteral(dst, src[nextEmit:s])
139 }
140 if debugDeflate {
141 if t >= s {
142 panic("s-t")
143 }
144 if (s - t) > maxMatchOffset {
145 panic(fmt.Sprintln("mmo", t))
146 }
147 if l < baseMatchLength {
148 panic("bml")
149 }
150 }
151
152 dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
153 s += l
154 nextEmit = s
155 if nextS >= s {
156 s = nextS + 1
157 }
158
159 if s >= sLimit {
160 // Index first pair after match end.
161 if int(s+8) < len(src) {
162 cv := load6432(src, s)
163 e.table[hash4x64(cv, tableBits)] = tableEntry{offset: s + e.cur}
164 e.bTable[hash7(cv, tableBits)] = tableEntry{offset: s + e.cur}
165 }
166 goto emitRemainder
167 }
168
169 // Store every 3rd hash in-between
170 if true {
171 i := nextS
172 if i < s-1 {
173 cv := load6432(src, i)
174 t := tableEntry{offset: i + e.cur}
175 t2 := tableEntry{offset: t.offset + 1}
176 e.bTable[hash7(cv, tableBits)] = t
177 e.bTable[hash7(cv>>8, tableBits)] = t2
178 e.table[hash4u(uint32(cv>>8), tableBits)] = t2
179
180 i += 3
181 for ; i < s-1; i += 3 {
182 cv := load6432(src, i)
183 t := tableEntry{offset: i + e.cur}
184 t2 := tableEntry{offset: t.offset + 1}
185 e.bTable[hash7(cv, tableBits)] = t
186 e.bTable[hash7(cv>>8, tableBits)] = t2
187 e.table[hash4u(uint32(cv>>8), tableBits)] = t2
188 }
189 }
190 }
191
192 // We could immediately start working at s now, but to improve
193 // compression we first update the hash table at s-1 and at s.
194 x := load6432(src, s-1)
195 o := e.cur + s - 1
196 prevHashS := hash4x64(x, tableBits)
197 prevHashL := hash7(x, tableBits)
198 e.table[prevHashS] = tableEntry{offset: o}
199 e.bTable[prevHashL] = tableEntry{offset: o}
200 cv = x >> 8
201 }
202
203emitRemainder:
204 if int(nextEmit) < len(src) {
205 // If nothing was added, don't encode literals.
206 if dst.n == 0 {
207 return
208 }
209
210 emitLiteral(dst, src[nextEmit:])
211 }
212}
Note: See TracBrowser for help on using the repository browser.