source: code/trunk/vendor/github.com/klauspost/compress/flate/level1.go@ 823

Last change on this file since 823 was 822, checked in by yakumo.izuru, 22 months ago

Prefer immortal.run over runit and rc.d, use vendored modules
for convenience.

Signed-off-by: Izuru Yakumo <yakumo.izuru@…>

File size: 4.4 KB
Line 
1package flate
2
3import "fmt"
4
5// fastGen maintains the table for matches,
6// and the previous byte block for level 2.
7// This is the generic implementation.
8type fastEncL1 struct {
9 fastGen
10 table [tableSize]tableEntry
11}
12
13// EncodeL1 uses a similar algorithm to level 1
14func (e *fastEncL1) Encode(dst *tokens, src []byte) {
15 const (
16 inputMargin = 12 - 1
17 minNonLiteralBlockSize = 1 + 1 + inputMargin
18 )
19 if debugDeflate && e.cur < 0 {
20 panic(fmt.Sprint("e.cur < 0: ", e.cur))
21 }
22
23 // Protect against e.cur wraparound.
24 for e.cur >= bufferReset {
25 if len(e.hist) == 0 {
26 for i := range e.table[:] {
27 e.table[i] = tableEntry{}
28 }
29 e.cur = maxMatchOffset
30 break
31 }
32 // Shift down everything in the table that isn't already too far away.
33 minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
34 for i := range e.table[:] {
35 v := e.table[i].offset
36 if v <= minOff {
37 v = 0
38 } else {
39 v = v - e.cur + maxMatchOffset
40 }
41 e.table[i].offset = v
42 }
43 e.cur = maxMatchOffset
44 }
45
46 s := e.addBlock(src)
47
48 // This check isn't in the Snappy implementation, but there, the caller
49 // instead of the callee handles this case.
50 if len(src) < minNonLiteralBlockSize {
51 // We do not fill the token table.
52 // This will be picked up by caller.
53 dst.n = uint16(len(src))
54 return
55 }
56
57 // Override src
58 src = e.hist
59 nextEmit := s
60
61 // sLimit is when to stop looking for offset/length copies. The inputMargin
62 // lets us use a fast path for emitLiteral in the main loop, while we are
63 // looking for copies.
64 sLimit := int32(len(src) - inputMargin)
65
66 // nextEmit is where in src the next emitLiteral should start from.
67 cv := load3232(src, s)
68
69 for {
70 const skipLog = 5
71 const doEvery = 2
72
73 nextS := s
74 var candidate tableEntry
75 for {
76 nextHash := hash(cv)
77 candidate = e.table[nextHash]
78 nextS = s + doEvery + (s-nextEmit)>>skipLog
79 if nextS > sLimit {
80 goto emitRemainder
81 }
82
83 now := load6432(src, nextS)
84 e.table[nextHash] = tableEntry{offset: s + e.cur}
85 nextHash = hash(uint32(now))
86
87 offset := s - (candidate.offset - e.cur)
88 if offset < maxMatchOffset && cv == load3232(src, candidate.offset-e.cur) {
89 e.table[nextHash] = tableEntry{offset: nextS + e.cur}
90 break
91 }
92
93 // Do one right away...
94 cv = uint32(now)
95 s = nextS
96 nextS++
97 candidate = e.table[nextHash]
98 now >>= 8
99 e.table[nextHash] = tableEntry{offset: s + e.cur}
100
101 offset = s - (candidate.offset - e.cur)
102 if offset < maxMatchOffset && cv == load3232(src, candidate.offset-e.cur) {
103 e.table[nextHash] = tableEntry{offset: nextS + e.cur}
104 break
105 }
106 cv = uint32(now)
107 s = nextS
108 }
109
110 // A 4-byte match has been found. We'll later see if more than 4 bytes
111 // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
112 // them as literal bytes.
113 for {
114 // Invariant: we have a 4-byte match at s, and no need to emit any
115 // literal bytes prior to s.
116
117 // Extend the 4-byte match as long as possible.
118 t := candidate.offset - e.cur
119 l := e.matchlenLong(s+4, t+4, src) + 4
120
121 // Extend backwards
122 for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
123 s--
124 t--
125 l++
126 }
127 if nextEmit < s {
128 emitLiteral(dst, src[nextEmit:s])
129 }
130
131 // Save the match found
132 dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
133 s += l
134 nextEmit = s
135 if nextS >= s {
136 s = nextS + 1
137 }
138 if s >= sLimit {
139 // Index first pair after match end.
140 if int(s+l+4) < len(src) {
141 cv := load3232(src, s)
142 e.table[hash(cv)] = tableEntry{offset: s + e.cur}
143 }
144 goto emitRemainder
145 }
146
147 // We could immediately start working at s now, but to improve
148 // compression we first update the hash table at s-2 and at s. If
149 // another emitCopy is not our next move, also calculate nextHash
150 // at s+1. At least on GOARCH=amd64, these three hash calculations
151 // are faster as one load64 call (with some shifts) instead of
152 // three load32 calls.
153 x := load6432(src, s-2)
154 o := e.cur + s - 2
155 prevHash := hash(uint32(x))
156 e.table[prevHash] = tableEntry{offset: o}
157 x >>= 16
158 currHash := hash(uint32(x))
159 candidate = e.table[currHash]
160 e.table[currHash] = tableEntry{offset: o + 2}
161
162 offset := s - (candidate.offset - e.cur)
163 if offset > maxMatchOffset || uint32(x) != load3232(src, candidate.offset-e.cur) {
164 cv = uint32(x >> 8)
165 s++
166 break
167 }
168 }
169 }
170
171emitRemainder:
172 if int(nextEmit) < len(src) {
173 // If nothing was added, don't encode literals.
174 if dst.n == 0 {
175 return
176 }
177 emitLiteral(dst, src[nextEmit:])
178 }
179}
Note: See TracBrowser for help on using the repository browser.