source: code/trunk/vendor/github.com/valyala/fasthttp/compress.go@ 145

Last change on this file since 145 was 145, checked in by Izuru Yakumo, 22 months ago

Updated the Makefile and vendored depedencies

Signed-off-by: Izuru Yakumo <yakumo.izuru@…>

File size: 11.1 KB
RevLine 
[145]1package fasthttp
2
3import (
4 "bytes"
5 "fmt"
6 "io"
7 "os"
8 "sync"
9
10 "github.com/klauspost/compress/flate"
11 "github.com/klauspost/compress/gzip"
12 "github.com/klauspost/compress/zlib"
13 "github.com/valyala/bytebufferpool"
14 "github.com/valyala/fasthttp/stackless"
15)
16
17// Supported compression levels.
18const (
19 CompressNoCompression = flate.NoCompression
20 CompressBestSpeed = flate.BestSpeed
21 CompressBestCompression = flate.BestCompression
22 CompressDefaultCompression = 6 // flate.DefaultCompression
23 CompressHuffmanOnly = -2 // flate.HuffmanOnly
24)
25
26func acquireGzipReader(r io.Reader) (*gzip.Reader, error) {
27 v := gzipReaderPool.Get()
28 if v == nil {
29 return gzip.NewReader(r)
30 }
31 zr := v.(*gzip.Reader)
32 if err := zr.Reset(r); err != nil {
33 return nil, err
34 }
35 return zr, nil
36}
37
38func releaseGzipReader(zr *gzip.Reader) {
39 zr.Close()
40 gzipReaderPool.Put(zr)
41}
42
43var gzipReaderPool sync.Pool
44
45func acquireFlateReader(r io.Reader) (io.ReadCloser, error) {
46 v := flateReaderPool.Get()
47 if v == nil {
48 zr, err := zlib.NewReader(r)
49 if err != nil {
50 return nil, err
51 }
52 return zr, nil
53 }
54 zr := v.(io.ReadCloser)
55 if err := resetFlateReader(zr, r); err != nil {
56 return nil, err
57 }
58 return zr, nil
59}
60
61func releaseFlateReader(zr io.ReadCloser) {
62 zr.Close()
63 flateReaderPool.Put(zr)
64}
65
66func resetFlateReader(zr io.ReadCloser, r io.Reader) error {
67 zrr, ok := zr.(zlib.Resetter)
68 if !ok {
69 panic("BUG: zlib.Reader doesn't implement zlib.Resetter???")
70 }
71 return zrr.Reset(r, nil)
72}
73
74var flateReaderPool sync.Pool
75
76func acquireStacklessGzipWriter(w io.Writer, level int) stackless.Writer {
77 nLevel := normalizeCompressLevel(level)
78 p := stacklessGzipWriterPoolMap[nLevel]
79 v := p.Get()
80 if v == nil {
81 return stackless.NewWriter(w, func(w io.Writer) stackless.Writer {
82 return acquireRealGzipWriter(w, level)
83 })
84 }
85 sw := v.(stackless.Writer)
86 sw.Reset(w)
87 return sw
88}
89
90func releaseStacklessGzipWriter(sw stackless.Writer, level int) {
91 sw.Close()
92 nLevel := normalizeCompressLevel(level)
93 p := stacklessGzipWriterPoolMap[nLevel]
94 p.Put(sw)
95}
96
97func acquireRealGzipWriter(w io.Writer, level int) *gzip.Writer {
98 nLevel := normalizeCompressLevel(level)
99 p := realGzipWriterPoolMap[nLevel]
100 v := p.Get()
101 if v == nil {
102 zw, err := gzip.NewWriterLevel(w, level)
103 if err != nil {
104 panic(fmt.Sprintf("BUG: unexpected error from gzip.NewWriterLevel(%d): %s", level, err))
105 }
106 return zw
107 }
108 zw := v.(*gzip.Writer)
109 zw.Reset(w)
110 return zw
111}
112
113func releaseRealGzipWriter(zw *gzip.Writer, level int) {
114 zw.Close()
115 nLevel := normalizeCompressLevel(level)
116 p := realGzipWriterPoolMap[nLevel]
117 p.Put(zw)
118}
119
120var (
121 stacklessGzipWriterPoolMap = newCompressWriterPoolMap()
122 realGzipWriterPoolMap = newCompressWriterPoolMap()
123)
124
125// AppendGzipBytesLevel appends gzipped src to dst using the given
126// compression level and returns the resulting dst.
127//
128// Supported compression levels are:
129//
130// * CompressNoCompression
131// * CompressBestSpeed
132// * CompressBestCompression
133// * CompressDefaultCompression
134// * CompressHuffmanOnly
135func AppendGzipBytesLevel(dst, src []byte, level int) []byte {
136 w := &byteSliceWriter{dst}
137 WriteGzipLevel(w, src, level) //nolint:errcheck
138 return w.b
139}
140
141// WriteGzipLevel writes gzipped p to w using the given compression level
142// and returns the number of compressed bytes written to w.
143//
144// Supported compression levels are:
145//
146// * CompressNoCompression
147// * CompressBestSpeed
148// * CompressBestCompression
149// * CompressDefaultCompression
150// * CompressHuffmanOnly
151func WriteGzipLevel(w io.Writer, p []byte, level int) (int, error) {
152 switch w.(type) {
153 case *byteSliceWriter,
154 *bytes.Buffer,
155 *bytebufferpool.ByteBuffer:
156 // These writers don't block, so we can just use stacklessWriteGzip
157 ctx := &compressCtx{
158 w: w,
159 p: p,
160 level: level,
161 }
162 stacklessWriteGzip(ctx)
163 return len(p), nil
164 default:
165 zw := acquireStacklessGzipWriter(w, level)
166 n, err := zw.Write(p)
167 releaseStacklessGzipWriter(zw, level)
168 return n, err
169 }
170}
171
172var stacklessWriteGzip = stackless.NewFunc(nonblockingWriteGzip)
173
174func nonblockingWriteGzip(ctxv interface{}) {
175 ctx := ctxv.(*compressCtx)
176 zw := acquireRealGzipWriter(ctx.w, ctx.level)
177
178 _, err := zw.Write(ctx.p)
179 if err != nil {
180 panic(fmt.Sprintf("BUG: gzip.Writer.Write for len(p)=%d returned unexpected error: %s", len(ctx.p), err))
181 }
182
183 releaseRealGzipWriter(zw, ctx.level)
184}
185
186// WriteGzip writes gzipped p to w and returns the number of compressed
187// bytes written to w.
188func WriteGzip(w io.Writer, p []byte) (int, error) {
189 return WriteGzipLevel(w, p, CompressDefaultCompression)
190}
191
192// AppendGzipBytes appends gzipped src to dst and returns the resulting dst.
193func AppendGzipBytes(dst, src []byte) []byte {
194 return AppendGzipBytesLevel(dst, src, CompressDefaultCompression)
195}
196
197// WriteGunzip writes ungzipped p to w and returns the number of uncompressed
198// bytes written to w.
199func WriteGunzip(w io.Writer, p []byte) (int, error) {
200 r := &byteSliceReader{p}
201 zr, err := acquireGzipReader(r)
202 if err != nil {
203 return 0, err
204 }
205 n, err := copyZeroAlloc(w, zr)
206 releaseGzipReader(zr)
207 nn := int(n)
208 if int64(nn) != n {
209 return 0, fmt.Errorf("too much data gunzipped: %d", n)
210 }
211 return nn, err
212}
213
214// AppendGunzipBytes appends gunzipped src to dst and returns the resulting dst.
215func AppendGunzipBytes(dst, src []byte) ([]byte, error) {
216 w := &byteSliceWriter{dst}
217 _, err := WriteGunzip(w, src)
218 return w.b, err
219}
220
221// AppendDeflateBytesLevel appends deflated src to dst using the given
222// compression level and returns the resulting dst.
223//
224// Supported compression levels are:
225//
226// * CompressNoCompression
227// * CompressBestSpeed
228// * CompressBestCompression
229// * CompressDefaultCompression
230// * CompressHuffmanOnly
231func AppendDeflateBytesLevel(dst, src []byte, level int) []byte {
232 w := &byteSliceWriter{dst}
233 WriteDeflateLevel(w, src, level) //nolint:errcheck
234 return w.b
235}
236
237// WriteDeflateLevel writes deflated p to w using the given compression level
238// and returns the number of compressed bytes written to w.
239//
240// Supported compression levels are:
241//
242// * CompressNoCompression
243// * CompressBestSpeed
244// * CompressBestCompression
245// * CompressDefaultCompression
246// * CompressHuffmanOnly
247func WriteDeflateLevel(w io.Writer, p []byte, level int) (int, error) {
248 switch w.(type) {
249 case *byteSliceWriter,
250 *bytes.Buffer,
251 *bytebufferpool.ByteBuffer:
252 // These writers don't block, so we can just use stacklessWriteDeflate
253 ctx := &compressCtx{
254 w: w,
255 p: p,
256 level: level,
257 }
258 stacklessWriteDeflate(ctx)
259 return len(p), nil
260 default:
261 zw := acquireStacklessDeflateWriter(w, level)
262 n, err := zw.Write(p)
263 releaseStacklessDeflateWriter(zw, level)
264 return n, err
265 }
266}
267
268var stacklessWriteDeflate = stackless.NewFunc(nonblockingWriteDeflate)
269
270func nonblockingWriteDeflate(ctxv interface{}) {
271 ctx := ctxv.(*compressCtx)
272 zw := acquireRealDeflateWriter(ctx.w, ctx.level)
273
274 _, err := zw.Write(ctx.p)
275 if err != nil {
276 panic(fmt.Sprintf("BUG: zlib.Writer.Write for len(p)=%d returned unexpected error: %s", len(ctx.p), err))
277 }
278
279 releaseRealDeflateWriter(zw, ctx.level)
280}
281
282type compressCtx struct {
283 w io.Writer
284 p []byte
285 level int
286}
287
288// WriteDeflate writes deflated p to w and returns the number of compressed
289// bytes written to w.
290func WriteDeflate(w io.Writer, p []byte) (int, error) {
291 return WriteDeflateLevel(w, p, CompressDefaultCompression)
292}
293
294// AppendDeflateBytes appends deflated src to dst and returns the resulting dst.
295func AppendDeflateBytes(dst, src []byte) []byte {
296 return AppendDeflateBytesLevel(dst, src, CompressDefaultCompression)
297}
298
299// WriteInflate writes inflated p to w and returns the number of uncompressed
300// bytes written to w.
301func WriteInflate(w io.Writer, p []byte) (int, error) {
302 r := &byteSliceReader{p}
303 zr, err := acquireFlateReader(r)
304 if err != nil {
305 return 0, err
306 }
307 n, err := copyZeroAlloc(w, zr)
308 releaseFlateReader(zr)
309 nn := int(n)
310 if int64(nn) != n {
311 return 0, fmt.Errorf("too much data inflated: %d", n)
312 }
313 return nn, err
314}
315
316// AppendInflateBytes appends inflated src to dst and returns the resulting dst.
317func AppendInflateBytes(dst, src []byte) ([]byte, error) {
318 w := &byteSliceWriter{dst}
319 _, err := WriteInflate(w, src)
320 return w.b, err
321}
322
323type byteSliceWriter struct {
324 b []byte
325}
326
327func (w *byteSliceWriter) Write(p []byte) (int, error) {
328 w.b = append(w.b, p...)
329 return len(p), nil
330}
331
332type byteSliceReader struct {
333 b []byte
334}
335
336func (r *byteSliceReader) Read(p []byte) (int, error) {
337 if len(r.b) == 0 {
338 return 0, io.EOF
339 }
340 n := copy(p, r.b)
341 r.b = r.b[n:]
342 return n, nil
343}
344
345func (r *byteSliceReader) ReadByte() (byte, error) {
346 if len(r.b) == 0 {
347 return 0, io.EOF
348 }
349 n := r.b[0]
350 r.b = r.b[1:]
351 return n, nil
352}
353
354func acquireStacklessDeflateWriter(w io.Writer, level int) stackless.Writer {
355 nLevel := normalizeCompressLevel(level)
356 p := stacklessDeflateWriterPoolMap[nLevel]
357 v := p.Get()
358 if v == nil {
359 return stackless.NewWriter(w, func(w io.Writer) stackless.Writer {
360 return acquireRealDeflateWriter(w, level)
361 })
362 }
363 sw := v.(stackless.Writer)
364 sw.Reset(w)
365 return sw
366}
367
368func releaseStacklessDeflateWriter(sw stackless.Writer, level int) {
369 sw.Close()
370 nLevel := normalizeCompressLevel(level)
371 p := stacklessDeflateWriterPoolMap[nLevel]
372 p.Put(sw)
373}
374
375func acquireRealDeflateWriter(w io.Writer, level int) *zlib.Writer {
376 nLevel := normalizeCompressLevel(level)
377 p := realDeflateWriterPoolMap[nLevel]
378 v := p.Get()
379 if v == nil {
380 zw, err := zlib.NewWriterLevel(w, level)
381 if err != nil {
382 panic(fmt.Sprintf("BUG: unexpected error from zlib.NewWriterLevel(%d): %s", level, err))
383 }
384 return zw
385 }
386 zw := v.(*zlib.Writer)
387 zw.Reset(w)
388 return zw
389}
390
391func releaseRealDeflateWriter(zw *zlib.Writer, level int) {
392 zw.Close()
393 nLevel := normalizeCompressLevel(level)
394 p := realDeflateWriterPoolMap[nLevel]
395 p.Put(zw)
396}
397
398var (
399 stacklessDeflateWriterPoolMap = newCompressWriterPoolMap()
400 realDeflateWriterPoolMap = newCompressWriterPoolMap()
401)
402
403func newCompressWriterPoolMap() []*sync.Pool {
404 // Initialize pools for all the compression levels defined
405 // in https://golang.org/pkg/compress/flate/#pkg-constants .
406 // Compression levels are normalized with normalizeCompressLevel,
407 // so the fit [0..11].
408 var m []*sync.Pool
409 for i := 0; i < 12; i++ {
410 m = append(m, &sync.Pool{})
411 }
412 return m
413}
414
415func isFileCompressible(f *os.File, minCompressRatio float64) bool {
416 // Try compressing the first 4kb of of the file
417 // and see if it can be compressed by more than
418 // the given minCompressRatio.
419 b := bytebufferpool.Get()
420 zw := acquireStacklessGzipWriter(b, CompressDefaultCompression)
421 lr := &io.LimitedReader{
422 R: f,
423 N: 4096,
424 }
425 _, err := copyZeroAlloc(zw, lr)
426 releaseStacklessGzipWriter(zw, CompressDefaultCompression)
427 f.Seek(0, 0) //nolint:errcheck
428 if err != nil {
429 return false
430 }
431
432 n := 4096 - lr.N
433 zn := len(b.B)
434 bytebufferpool.Put(b)
435 return float64(zn) < float64(n)*minCompressRatio
436}
437
438// normalizes compression level into [0..11], so it could be used as an index
439// in *PoolMap.
440func normalizeCompressLevel(level int) int {
441 // -2 is the lowest compression level - CompressHuffmanOnly
442 // 9 is the highest compression level - CompressBestCompression
443 if level < -2 || level > 9 {
444 level = CompressDefaultCompression
445 }
446 return level + 2
447}
Note: See TracBrowser for help on using the repository browser.