source: code/trunk/vendor/github.com/klauspost/compress/flate/inflate.go@ 145

Last change on this file since 145 was 145, checked in by Izuru Yakumo, 22 months ago

Updated the Makefile and vendored depedencies

Signed-off-by: Izuru Yakumo <yakumo.izuru@…>

File size: 20.6 KB
Line 
1// Copyright 2009 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5// Package flate implements the DEFLATE compressed data format, described in
6// RFC 1951. The gzip and zlib packages implement access to DEFLATE-based file
7// formats.
8package flate
9
10import (
11 "bufio"
12 "compress/flate"
13 "fmt"
14 "io"
15 "math/bits"
16 "sync"
17)
18
19const (
20 maxCodeLen = 16 // max length of Huffman code
21 maxCodeLenMask = 15 // mask for max length of Huffman code
22 // The next three numbers come from the RFC section 3.2.7, with the
23 // additional proviso in section 3.2.5 which implies that distance codes
24 // 30 and 31 should never occur in compressed data.
25 maxNumLit = 286
26 maxNumDist = 30
27 numCodes = 19 // number of codes in Huffman meta-code
28
29 debugDecode = false
30)
31
32// Value of length - 3 and extra bits.
33type lengthExtra struct {
34 length, extra uint8
35}
36
37var decCodeToLen = [32]lengthExtra{{length: 0x0, extra: 0x0}, {length: 0x1, extra: 0x0}, {length: 0x2, extra: 0x0}, {length: 0x3, extra: 0x0}, {length: 0x4, extra: 0x0}, {length: 0x5, extra: 0x0}, {length: 0x6, extra: 0x0}, {length: 0x7, extra: 0x0}, {length: 0x8, extra: 0x1}, {length: 0xa, extra: 0x1}, {length: 0xc, extra: 0x1}, {length: 0xe, extra: 0x1}, {length: 0x10, extra: 0x2}, {length: 0x14, extra: 0x2}, {length: 0x18, extra: 0x2}, {length: 0x1c, extra: 0x2}, {length: 0x20, extra: 0x3}, {length: 0x28, extra: 0x3}, {length: 0x30, extra: 0x3}, {length: 0x38, extra: 0x3}, {length: 0x40, extra: 0x4}, {length: 0x50, extra: 0x4}, {length: 0x60, extra: 0x4}, {length: 0x70, extra: 0x4}, {length: 0x80, extra: 0x5}, {length: 0xa0, extra: 0x5}, {length: 0xc0, extra: 0x5}, {length: 0xe0, extra: 0x5}, {length: 0xff, extra: 0x0}, {length: 0x0, extra: 0x0}, {length: 0x0, extra: 0x0}, {length: 0x0, extra: 0x0}}
38
39var bitMask32 = [32]uint32{
40 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF,
41 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF,
42 0x1ffff, 0x3ffff, 0x7FFFF, 0xfFFFF, 0x1fFFFF, 0x3fFFFF, 0x7fFFFF, 0xffFFFF,
43 0x1ffFFFF, 0x3ffFFFF, 0x7ffFFFF, 0xfffFFFF, 0x1fffFFFF, 0x3fffFFFF, 0x7fffFFFF,
44} // up to 32 bits
45
46// Initialize the fixedHuffmanDecoder only once upon first use.
47var fixedOnce sync.Once
48var fixedHuffmanDecoder huffmanDecoder
49
50// A CorruptInputError reports the presence of corrupt input at a given offset.
51type CorruptInputError = flate.CorruptInputError
52
53// An InternalError reports an error in the flate code itself.
54type InternalError string
55
56func (e InternalError) Error() string { return "flate: internal error: " + string(e) }
57
58// A ReadError reports an error encountered while reading input.
59//
60// Deprecated: No longer returned.
61type ReadError = flate.ReadError
62
63// A WriteError reports an error encountered while writing output.
64//
65// Deprecated: No longer returned.
66type WriteError = flate.WriteError
67
68// Resetter resets a ReadCloser returned by NewReader or NewReaderDict to
69// to switch to a new underlying Reader. This permits reusing a ReadCloser
70// instead of allocating a new one.
71type Resetter interface {
72 // Reset discards any buffered data and resets the Resetter as if it was
73 // newly initialized with the given reader.
74 Reset(r io.Reader, dict []byte) error
75}
76
77// The data structure for decoding Huffman tables is based on that of
78// zlib. There is a lookup table of a fixed bit width (huffmanChunkBits),
79// For codes smaller than the table width, there are multiple entries
80// (each combination of trailing bits has the same value). For codes
81// larger than the table width, the table contains a link to an overflow
82// table. The width of each entry in the link table is the maximum code
83// size minus the chunk width.
84//
85// Note that you can do a lookup in the table even without all bits
86// filled. Since the extra bits are zero, and the DEFLATE Huffman codes
87// have the property that shorter codes come before longer ones, the
88// bit length estimate in the result is a lower bound on the actual
89// number of bits.
90//
91// See the following:
92// http://www.gzip.org/algorithm.txt
93
94// chunk & 15 is number of bits
95// chunk >> 4 is value, including table link
96
97const (
98 huffmanChunkBits = 9
99 huffmanNumChunks = 1 << huffmanChunkBits
100 huffmanCountMask = 15
101 huffmanValueShift = 4
102)
103
104type huffmanDecoder struct {
105 maxRead int // the maximum number of bits we can read and not overread
106 chunks *[huffmanNumChunks]uint16 // chunks as described above
107 links [][]uint16 // overflow links
108 linkMask uint32 // mask the width of the link table
109}
110
111// Initialize Huffman decoding tables from array of code lengths.
112// Following this function, h is guaranteed to be initialized into a complete
113// tree (i.e., neither over-subscribed nor under-subscribed). The exception is a
114// degenerate case where the tree has only a single symbol with length 1. Empty
115// trees are permitted.
116func (h *huffmanDecoder) init(lengths []int) bool {
117 // Sanity enables additional runtime tests during Huffman
118 // table construction. It's intended to be used during
119 // development to supplement the currently ad-hoc unit tests.
120 const sanity = false
121
122 if h.chunks == nil {
123 h.chunks = &[huffmanNumChunks]uint16{}
124 }
125 if h.maxRead != 0 {
126 *h = huffmanDecoder{chunks: h.chunks, links: h.links}
127 }
128
129 // Count number of codes of each length,
130 // compute maxRead and max length.
131 var count [maxCodeLen]int
132 var min, max int
133 for _, n := range lengths {
134 if n == 0 {
135 continue
136 }
137 if min == 0 || n < min {
138 min = n
139 }
140 if n > max {
141 max = n
142 }
143 count[n&maxCodeLenMask]++
144 }
145
146 // Empty tree. The decompressor.huffSym function will fail later if the tree
147 // is used. Technically, an empty tree is only valid for the HDIST tree and
148 // not the HCLEN and HLIT tree. However, a stream with an empty HCLEN tree
149 // is guaranteed to fail since it will attempt to use the tree to decode the
150 // codes for the HLIT and HDIST trees. Similarly, an empty HLIT tree is
151 // guaranteed to fail later since the compressed data section must be
152 // composed of at least one symbol (the end-of-block marker).
153 if max == 0 {
154 return true
155 }
156
157 code := 0
158 var nextcode [maxCodeLen]int
159 for i := min; i <= max; i++ {
160 code <<= 1
161 nextcode[i&maxCodeLenMask] = code
162 code += count[i&maxCodeLenMask]
163 }
164
165 // Check that the coding is complete (i.e., that we've
166 // assigned all 2-to-the-max possible bit sequences).
167 // Exception: To be compatible with zlib, we also need to
168 // accept degenerate single-code codings. See also
169 // TestDegenerateHuffmanCoding.
170 if code != 1<<uint(max) && !(code == 1 && max == 1) {
171 if debugDecode {
172 fmt.Println("coding failed, code, max:", code, max, code == 1<<uint(max), code == 1 && max == 1, "(one should be true)")
173 }
174 return false
175 }
176
177 h.maxRead = min
178 chunks := h.chunks[:]
179 for i := range chunks {
180 chunks[i] = 0
181 }
182
183 if max > huffmanChunkBits {
184 numLinks := 1 << (uint(max) - huffmanChunkBits)
185 h.linkMask = uint32(numLinks - 1)
186
187 // create link tables
188 link := nextcode[huffmanChunkBits+1] >> 1
189 if cap(h.links) < huffmanNumChunks-link {
190 h.links = make([][]uint16, huffmanNumChunks-link)
191 } else {
192 h.links = h.links[:huffmanNumChunks-link]
193 }
194 for j := uint(link); j < huffmanNumChunks; j++ {
195 reverse := int(bits.Reverse16(uint16(j)))
196 reverse >>= uint(16 - huffmanChunkBits)
197 off := j - uint(link)
198 if sanity && h.chunks[reverse] != 0 {
199 panic("impossible: overwriting existing chunk")
200 }
201 h.chunks[reverse] = uint16(off<<huffmanValueShift | (huffmanChunkBits + 1))
202 if cap(h.links[off]) < numLinks {
203 h.links[off] = make([]uint16, numLinks)
204 } else {
205 links := h.links[off][:0]
206 h.links[off] = links[:numLinks]
207 }
208 }
209 } else {
210 h.links = h.links[:0]
211 }
212
213 for i, n := range lengths {
214 if n == 0 {
215 continue
216 }
217 code := nextcode[n]
218 nextcode[n]++
219 chunk := uint16(i<<huffmanValueShift | n)
220 reverse := int(bits.Reverse16(uint16(code)))
221 reverse >>= uint(16 - n)
222 if n <= huffmanChunkBits {
223 for off := reverse; off < len(h.chunks); off += 1 << uint(n) {
224 // We should never need to overwrite
225 // an existing chunk. Also, 0 is
226 // never a valid chunk, because the
227 // lower 4 "count" bits should be
228 // between 1 and 15.
229 if sanity && h.chunks[off] != 0 {
230 panic("impossible: overwriting existing chunk")
231 }
232 h.chunks[off] = chunk
233 }
234 } else {
235 j := reverse & (huffmanNumChunks - 1)
236 if sanity && h.chunks[j]&huffmanCountMask != huffmanChunkBits+1 {
237 // Longer codes should have been
238 // associated with a link table above.
239 panic("impossible: not an indirect chunk")
240 }
241 value := h.chunks[j] >> huffmanValueShift
242 linktab := h.links[value]
243 reverse >>= huffmanChunkBits
244 for off := reverse; off < len(linktab); off += 1 << uint(n-huffmanChunkBits) {
245 if sanity && linktab[off] != 0 {
246 panic("impossible: overwriting existing chunk")
247 }
248 linktab[off] = chunk
249 }
250 }
251 }
252
253 if sanity {
254 // Above we've sanity checked that we never overwrote
255 // an existing entry. Here we additionally check that
256 // we filled the tables completely.
257 for i, chunk := range h.chunks {
258 if chunk == 0 {
259 // As an exception, in the degenerate
260 // single-code case, we allow odd
261 // chunks to be missing.
262 if code == 1 && i%2 == 1 {
263 continue
264 }
265 panic("impossible: missing chunk")
266 }
267 }
268 for _, linktab := range h.links {
269 for _, chunk := range linktab {
270 if chunk == 0 {
271 panic("impossible: missing chunk")
272 }
273 }
274 }
275 }
276
277 return true
278}
279
280// The actual read interface needed by NewReader.
281// If the passed in io.Reader does not also have ReadByte,
282// the NewReader will introduce its own buffering.
283type Reader interface {
284 io.Reader
285 io.ByteReader
286}
287
288// Decompress state.
289type decompressor struct {
290 // Input source.
291 r Reader
292 roffset int64
293
294 // Huffman decoders for literal/length, distance.
295 h1, h2 huffmanDecoder
296
297 // Length arrays used to define Huffman codes.
298 bits *[maxNumLit + maxNumDist]int
299 codebits *[numCodes]int
300
301 // Output history, buffer.
302 dict dictDecoder
303
304 // Next step in the decompression,
305 // and decompression state.
306 step func(*decompressor)
307 stepState int
308 err error
309 toRead []byte
310 hl, hd *huffmanDecoder
311 copyLen int
312 copyDist int
313
314 // Temporary buffer (avoids repeated allocation).
315 buf [4]byte
316
317 // Input bits, in top of b.
318 b uint32
319
320 nb uint
321 final bool
322}
323
324func (f *decompressor) nextBlock() {
325 for f.nb < 1+2 {
326 if f.err = f.moreBits(); f.err != nil {
327 return
328 }
329 }
330 f.final = f.b&1 == 1
331 f.b >>= 1
332 typ := f.b & 3
333 f.b >>= 2
334 f.nb -= 1 + 2
335 switch typ {
336 case 0:
337 f.dataBlock()
338 if debugDecode {
339 fmt.Println("stored block")
340 }
341 case 1:
342 // compressed, fixed Huffman tables
343 f.hl = &fixedHuffmanDecoder
344 f.hd = nil
345 f.huffmanBlockDecoder()()
346 if debugDecode {
347 fmt.Println("predefinied huffman block")
348 }
349 case 2:
350 // compressed, dynamic Huffman tables
351 if f.err = f.readHuffman(); f.err != nil {
352 break
353 }
354 f.hl = &f.h1
355 f.hd = &f.h2
356 f.huffmanBlockDecoder()()
357 if debugDecode {
358 fmt.Println("dynamic huffman block")
359 }
360 default:
361 // 3 is reserved.
362 if debugDecode {
363 fmt.Println("reserved data block encountered")
364 }
365 f.err = CorruptInputError(f.roffset)
366 }
367}
368
369func (f *decompressor) Read(b []byte) (int, error) {
370 for {
371 if len(f.toRead) > 0 {
372 n := copy(b, f.toRead)
373 f.toRead = f.toRead[n:]
374 if len(f.toRead) == 0 {
375 return n, f.err
376 }
377 return n, nil
378 }
379 if f.err != nil {
380 return 0, f.err
381 }
382 f.step(f)
383 if f.err != nil && len(f.toRead) == 0 {
384 f.toRead = f.dict.readFlush() // Flush what's left in case of error
385 }
386 }
387}
388
389// Support the io.WriteTo interface for io.Copy and friends.
390func (f *decompressor) WriteTo(w io.Writer) (int64, error) {
391 total := int64(0)
392 flushed := false
393 for {
394 if len(f.toRead) > 0 {
395 n, err := w.Write(f.toRead)
396 total += int64(n)
397 if err != nil {
398 f.err = err
399 return total, err
400 }
401 if n != len(f.toRead) {
402 return total, io.ErrShortWrite
403 }
404 f.toRead = f.toRead[:0]
405 }
406 if f.err != nil && flushed {
407 if f.err == io.EOF {
408 return total, nil
409 }
410 return total, f.err
411 }
412 if f.err == nil {
413 f.step(f)
414 }
415 if len(f.toRead) == 0 && f.err != nil && !flushed {
416 f.toRead = f.dict.readFlush() // Flush what's left in case of error
417 flushed = true
418 }
419 }
420}
421
422func (f *decompressor) Close() error {
423 if f.err == io.EOF {
424 return nil
425 }
426 return f.err
427}
428
429// RFC 1951 section 3.2.7.
430// Compression with dynamic Huffman codes
431
432var codeOrder = [...]int{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}
433
434func (f *decompressor) readHuffman() error {
435 // HLIT[5], HDIST[5], HCLEN[4].
436 for f.nb < 5+5+4 {
437 if err := f.moreBits(); err != nil {
438 return err
439 }
440 }
441 nlit := int(f.b&0x1F) + 257
442 if nlit > maxNumLit {
443 if debugDecode {
444 fmt.Println("nlit > maxNumLit", nlit)
445 }
446 return CorruptInputError(f.roffset)
447 }
448 f.b >>= 5
449 ndist := int(f.b&0x1F) + 1
450 if ndist > maxNumDist {
451 if debugDecode {
452 fmt.Println("ndist > maxNumDist", ndist)
453 }
454 return CorruptInputError(f.roffset)
455 }
456 f.b >>= 5
457 nclen := int(f.b&0xF) + 4
458 // numCodes is 19, so nclen is always valid.
459 f.b >>= 4
460 f.nb -= 5 + 5 + 4
461
462 // (HCLEN+4)*3 bits: code lengths in the magic codeOrder order.
463 for i := 0; i < nclen; i++ {
464 for f.nb < 3 {
465 if err := f.moreBits(); err != nil {
466 return err
467 }
468 }
469 f.codebits[codeOrder[i]] = int(f.b & 0x7)
470 f.b >>= 3
471 f.nb -= 3
472 }
473 for i := nclen; i < len(codeOrder); i++ {
474 f.codebits[codeOrder[i]] = 0
475 }
476 if !f.h1.init(f.codebits[0:]) {
477 if debugDecode {
478 fmt.Println("init codebits failed")
479 }
480 return CorruptInputError(f.roffset)
481 }
482
483 // HLIT + 257 code lengths, HDIST + 1 code lengths,
484 // using the code length Huffman code.
485 for i, n := 0, nlit+ndist; i < n; {
486 x, err := f.huffSym(&f.h1)
487 if err != nil {
488 return err
489 }
490 if x < 16 {
491 // Actual length.
492 f.bits[i] = x
493 i++
494 continue
495 }
496 // Repeat previous length or zero.
497 var rep int
498 var nb uint
499 var b int
500 switch x {
501 default:
502 return InternalError("unexpected length code")
503 case 16:
504 rep = 3
505 nb = 2
506 if i == 0 {
507 if debugDecode {
508 fmt.Println("i==0")
509 }
510 return CorruptInputError(f.roffset)
511 }
512 b = f.bits[i-1]
513 case 17:
514 rep = 3
515 nb = 3
516 b = 0
517 case 18:
518 rep = 11
519 nb = 7
520 b = 0
521 }
522 for f.nb < nb {
523 if err := f.moreBits(); err != nil {
524 if debugDecode {
525 fmt.Println("morebits:", err)
526 }
527 return err
528 }
529 }
530 rep += int(f.b & uint32(1<<(nb&regSizeMaskUint32)-1))
531 f.b >>= nb & regSizeMaskUint32
532 f.nb -= nb
533 if i+rep > n {
534 if debugDecode {
535 fmt.Println("i+rep > n", i, rep, n)
536 }
537 return CorruptInputError(f.roffset)
538 }
539 for j := 0; j < rep; j++ {
540 f.bits[i] = b
541 i++
542 }
543 }
544
545 if !f.h1.init(f.bits[0:nlit]) || !f.h2.init(f.bits[nlit:nlit+ndist]) {
546 if debugDecode {
547 fmt.Println("init2 failed")
548 }
549 return CorruptInputError(f.roffset)
550 }
551
552 // As an optimization, we can initialize the maxRead bits to read at a time
553 // for the HLIT tree to the length of the EOB marker since we know that
554 // every block must terminate with one. This preserves the property that
555 // we never read any extra bytes after the end of the DEFLATE stream.
556 if f.h1.maxRead < f.bits[endBlockMarker] {
557 f.h1.maxRead = f.bits[endBlockMarker]
558 }
559 if !f.final {
560 // If not the final block, the smallest block possible is
561 // a predefined table, BTYPE=01, with a single EOB marker.
562 // This will take up 3 + 7 bits.
563 f.h1.maxRead += 10
564 }
565
566 return nil
567}
568
569// Copy a single uncompressed data block from input to output.
570func (f *decompressor) dataBlock() {
571 // Uncompressed.
572 // Discard current half-byte.
573 left := (f.nb) & 7
574 f.nb -= left
575 f.b >>= left
576
577 offBytes := f.nb >> 3
578 // Unfilled values will be overwritten.
579 f.buf[0] = uint8(f.b)
580 f.buf[1] = uint8(f.b >> 8)
581 f.buf[2] = uint8(f.b >> 16)
582 f.buf[3] = uint8(f.b >> 24)
583
584 f.roffset += int64(offBytes)
585 f.nb, f.b = 0, 0
586
587 // Length then ones-complement of length.
588 nr, err := io.ReadFull(f.r, f.buf[offBytes:4])
589 f.roffset += int64(nr)
590 if err != nil {
591 f.err = noEOF(err)
592 return
593 }
594 n := uint16(f.buf[0]) | uint16(f.buf[1])<<8
595 nn := uint16(f.buf[2]) | uint16(f.buf[3])<<8
596 if nn != ^n {
597 if debugDecode {
598 ncomp := ^n
599 fmt.Println("uint16(nn) != uint16(^n)", nn, ncomp)
600 }
601 f.err = CorruptInputError(f.roffset)
602 return
603 }
604
605 if n == 0 {
606 f.toRead = f.dict.readFlush()
607 f.finishBlock()
608 return
609 }
610
611 f.copyLen = int(n)
612 f.copyData()
613}
614
615// copyData copies f.copyLen bytes from the underlying reader into f.hist.
616// It pauses for reads when f.hist is full.
617func (f *decompressor) copyData() {
618 buf := f.dict.writeSlice()
619 if len(buf) > f.copyLen {
620 buf = buf[:f.copyLen]
621 }
622
623 cnt, err := io.ReadFull(f.r, buf)
624 f.roffset += int64(cnt)
625 f.copyLen -= cnt
626 f.dict.writeMark(cnt)
627 if err != nil {
628 f.err = noEOF(err)
629 return
630 }
631
632 if f.dict.availWrite() == 0 || f.copyLen > 0 {
633 f.toRead = f.dict.readFlush()
634 f.step = (*decompressor).copyData
635 return
636 }
637 f.finishBlock()
638}
639
640func (f *decompressor) finishBlock() {
641 if f.final {
642 if f.dict.availRead() > 0 {
643 f.toRead = f.dict.readFlush()
644 }
645 f.err = io.EOF
646 }
647 f.step = (*decompressor).nextBlock
648}
649
650// noEOF returns err, unless err == io.EOF, in which case it returns io.ErrUnexpectedEOF.
651func noEOF(e error) error {
652 if e == io.EOF {
653 return io.ErrUnexpectedEOF
654 }
655 return e
656}
657
658func (f *decompressor) moreBits() error {
659 c, err := f.r.ReadByte()
660 if err != nil {
661 return noEOF(err)
662 }
663 f.roffset++
664 f.b |= uint32(c) << (f.nb & regSizeMaskUint32)
665 f.nb += 8
666 return nil
667}
668
669// Read the next Huffman-encoded symbol from f according to h.
670func (f *decompressor) huffSym(h *huffmanDecoder) (int, error) {
671 // Since a huffmanDecoder can be empty or be composed of a degenerate tree
672 // with single element, huffSym must error on these two edge cases. In both
673 // cases, the chunks slice will be 0 for the invalid sequence, leading it
674 // satisfy the n == 0 check below.
675 n := uint(h.maxRead)
676 // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
677 // but is smart enough to keep local variables in registers, so use nb and b,
678 // inline call to moreBits and reassign b,nb back to f on return.
679 nb, b := f.nb, f.b
680 for {
681 for nb < n {
682 c, err := f.r.ReadByte()
683 if err != nil {
684 f.b = b
685 f.nb = nb
686 return 0, noEOF(err)
687 }
688 f.roffset++
689 b |= uint32(c) << (nb & regSizeMaskUint32)
690 nb += 8
691 }
692 chunk := h.chunks[b&(huffmanNumChunks-1)]
693 n = uint(chunk & huffmanCountMask)
694 if n > huffmanChunkBits {
695 chunk = h.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&h.linkMask]
696 n = uint(chunk & huffmanCountMask)
697 }
698 if n <= nb {
699 if n == 0 {
700 f.b = b
701 f.nb = nb
702 if debugDecode {
703 fmt.Println("huffsym: n==0")
704 }
705 f.err = CorruptInputError(f.roffset)
706 return 0, f.err
707 }
708 f.b = b >> (n & regSizeMaskUint32)
709 f.nb = nb - n
710 return int(chunk >> huffmanValueShift), nil
711 }
712 }
713}
714
715func makeReader(r io.Reader) Reader {
716 if rr, ok := r.(Reader); ok {
717 return rr
718 }
719 return bufio.NewReader(r)
720}
721
722func fixedHuffmanDecoderInit() {
723 fixedOnce.Do(func() {
724 // These come from the RFC section 3.2.6.
725 var bits [288]int
726 for i := 0; i < 144; i++ {
727 bits[i] = 8
728 }
729 for i := 144; i < 256; i++ {
730 bits[i] = 9
731 }
732 for i := 256; i < 280; i++ {
733 bits[i] = 7
734 }
735 for i := 280; i < 288; i++ {
736 bits[i] = 8
737 }
738 fixedHuffmanDecoder.init(bits[:])
739 })
740}
741
742func (f *decompressor) Reset(r io.Reader, dict []byte) error {
743 *f = decompressor{
744 r: makeReader(r),
745 bits: f.bits,
746 codebits: f.codebits,
747 h1: f.h1,
748 h2: f.h2,
749 dict: f.dict,
750 step: (*decompressor).nextBlock,
751 }
752 f.dict.init(maxMatchOffset, dict)
753 return nil
754}
755
756// NewReader returns a new ReadCloser that can be used
757// to read the uncompressed version of r.
758// If r does not also implement io.ByteReader,
759// the decompressor may read more data than necessary from r.
760// It is the caller's responsibility to call Close on the ReadCloser
761// when finished reading.
762//
763// The ReadCloser returned by NewReader also implements Resetter.
764func NewReader(r io.Reader) io.ReadCloser {
765 fixedHuffmanDecoderInit()
766
767 var f decompressor
768 f.r = makeReader(r)
769 f.bits = new([maxNumLit + maxNumDist]int)
770 f.codebits = new([numCodes]int)
771 f.step = (*decompressor).nextBlock
772 f.dict.init(maxMatchOffset, nil)
773 return &f
774}
775
776// NewReaderDict is like NewReader but initializes the reader
777// with a preset dictionary. The returned Reader behaves as if
778// the uncompressed data stream started with the given dictionary,
779// which has already been read. NewReaderDict is typically used
780// to read data compressed by NewWriterDict.
781//
782// The ReadCloser returned by NewReader also implements Resetter.
783func NewReaderDict(r io.Reader, dict []byte) io.ReadCloser {
784 fixedHuffmanDecoderInit()
785
786 var f decompressor
787 f.r = makeReader(r)
788 f.bits = new([maxNumLit + maxNumDist]int)
789 f.codebits = new([numCodes]int)
790 f.step = (*decompressor).nextBlock
791 f.dict.init(maxMatchOffset, dict)
792 return &f
793}
Note: See TracBrowser for help on using the repository browser.