2019-03-07 03:16:48 +03:00
|
|
|
package brotli
|
|
|
|
|
|
|
|
import (
|
|
|
|
"errors"
|
|
|
|
"io"
|
2024-01-12 04:31:05 +03:00
|
|
|
|
|
|
|
"github.com/andybalholm/brotli/matchfinder"
|
2019-03-07 03:16:48 +03:00
|
|
|
)
|
|
|
|
|
2019-04-25 18:25:59 +03:00
|
|
|
const (
|
|
|
|
BestSpeed = 0
|
|
|
|
BestCompression = 11
|
|
|
|
DefaultCompression = 6
|
|
|
|
)
|
|
|
|
|
2019-03-07 03:16:48 +03:00
|
|
|
// WriterOptions configures Writer.
|
|
|
|
type WriterOptions struct {
|
|
|
|
// Quality controls the compression-speed vs compression-density trade-offs.
|
|
|
|
// The higher the quality, the slower the compression. Range is 0 to 11.
|
|
|
|
Quality int
|
|
|
|
// LGWin is the base 2 logarithm of the sliding window size.
|
|
|
|
// Range is 10 to 24. 0 indicates automatic configuration based on Quality.
|
|
|
|
LGWin int
|
|
|
|
}
|
|
|
|
|
|
|
|
var (
|
|
|
|
errEncode = errors.New("brotli: encode error")
|
|
|
|
errWriterClosed = errors.New("brotli: Writer is closed")
|
|
|
|
)
|
|
|
|
|
2019-04-25 18:25:59 +03:00
|
|
|
// Writes to the returned writer are compressed and written to dst.
|
|
|
|
// It is the caller's responsibility to call Close on the Writer when done.
|
|
|
|
// Writes may be buffered and not flushed until Close.
|
|
|
|
func NewWriter(dst io.Writer) *Writer {
|
|
|
|
return NewWriterLevel(dst, DefaultCompression)
|
|
|
|
}
|
|
|
|
|
|
|
|
// NewWriterLevel is like NewWriter but specifies the compression level instead
|
|
|
|
// of assuming DefaultCompression.
|
|
|
|
// The compression level can be DefaultCompression or any integer value between
|
|
|
|
// BestSpeed and BestCompression inclusive.
|
|
|
|
func NewWriterLevel(dst io.Writer, level int) *Writer {
|
|
|
|
return NewWriterOptions(dst, WriterOptions{
|
|
|
|
Quality: level,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// NewWriterOptions is like NewWriter but specifies WriterOptions
|
|
|
|
func NewWriterOptions(dst io.Writer, options WriterOptions) *Writer {
|
2019-03-07 03:16:48 +03:00
|
|
|
w := new(Writer)
|
2019-07-04 18:13:24 +03:00
|
|
|
w.options = options
|
2019-04-25 18:25:59 +03:00
|
|
|
w.Reset(dst)
|
2019-03-07 03:16:48 +03:00
|
|
|
return w
|
|
|
|
}
|
|
|
|
|
2019-04-25 18:25:59 +03:00
|
|
|
// Reset discards the Writer's state and makes it equivalent to the result of
|
|
|
|
// its original state from NewWriter or NewWriterLevel, but writing to dst
|
|
|
|
// instead. This permits reusing a Writer rather than allocating a new one.
|
|
|
|
func (w *Writer) Reset(dst io.Writer) {
|
|
|
|
encoderInitState(w)
|
2019-07-04 18:13:24 +03:00
|
|
|
w.params.quality = w.options.Quality
|
|
|
|
if w.options.LGWin > 0 {
|
|
|
|
w.params.lgwin = uint(w.options.LGWin)
|
|
|
|
}
|
2019-04-25 18:25:59 +03:00
|
|
|
w.dst = dst
|
2021-08-25 05:29:47 +03:00
|
|
|
w.err = nil
|
2019-04-25 18:25:59 +03:00
|
|
|
}
|
|
|
|
|
2019-03-07 03:16:48 +03:00
|
|
|
func (w *Writer) writeChunk(p []byte, op int) (n int, err error) {
|
|
|
|
if w.dst == nil {
|
|
|
|
return 0, errWriterClosed
|
|
|
|
}
|
2020-05-08 03:27:37 +03:00
|
|
|
if w.err != nil {
|
|
|
|
return 0, w.err
|
|
|
|
}
|
2019-03-07 03:16:48 +03:00
|
|
|
|
|
|
|
for {
|
|
|
|
availableIn := uint(len(p))
|
|
|
|
nextIn := p
|
2019-03-16 03:24:40 +03:00
|
|
|
success := encoderCompressStream(w, op, &availableIn, &nextIn)
|
2019-03-07 03:16:48 +03:00
|
|
|
bytesConsumed := len(p) - int(availableIn)
|
|
|
|
p = p[bytesConsumed:]
|
|
|
|
n += bytesConsumed
|
|
|
|
if !success {
|
|
|
|
return n, errEncode
|
|
|
|
}
|
|
|
|
|
2020-05-08 03:27:37 +03:00
|
|
|
if len(p) == 0 || w.err != nil {
|
|
|
|
return n, w.err
|
2019-03-07 03:16:48 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Flush outputs encoded data for all input provided to Write. The resulting
|
|
|
|
// output can be decoded to match all input before Flush, but the stream is
|
|
|
|
// not yet complete until after Close.
|
|
|
|
// Flush has a negative impact on compression.
|
|
|
|
func (w *Writer) Flush() error {
|
2019-03-16 03:24:40 +03:00
|
|
|
_, err := w.writeChunk(nil, operationFlush)
|
2019-03-07 03:16:48 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Close flushes remaining data to the decorated writer.
|
|
|
|
func (w *Writer) Close() error {
|
|
|
|
// If stream is already closed, it is reported by `writeChunk`.
|
2019-03-16 03:24:40 +03:00
|
|
|
_, err := w.writeChunk(nil, operationFinish)
|
2019-03-07 03:16:48 +03:00
|
|
|
w.dst = nil
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Write implements io.Writer. Flush or Close must be called to ensure that the
|
|
|
|
// encoded bytes are actually flushed to the underlying Writer.
|
|
|
|
func (w *Writer) Write(p []byte) (n int, err error) {
|
2019-03-16 03:24:40 +03:00
|
|
|
return w.writeChunk(p, operationProcess)
|
2019-03-07 03:16:48 +03:00
|
|
|
}
|
2019-05-01 00:53:06 +03:00
|
|
|
|
|
|
|
type nopCloser struct {
|
|
|
|
io.Writer
|
|
|
|
}
|
|
|
|
|
|
|
|
func (nopCloser) Close() error { return nil }
|
2024-01-12 04:31:05 +03:00
|
|
|
|
|
|
|
// NewWriterV2 is like NewWriterLevel, but it uses the new implementation
|
|
|
|
// based on the matchfinder package. It currently supports up to level 7;
|
|
|
|
// if a higher level is specified, level 7 will be used.
|
|
|
|
func NewWriterV2(dst io.Writer, level int) *matchfinder.Writer {
|
|
|
|
var mf matchfinder.MatchFinder
|
|
|
|
if level < 2 {
|
|
|
|
mf = matchfinder.M0{Lazy: level == 1}
|
|
|
|
} else {
|
|
|
|
hashLen := 6
|
|
|
|
if level >= 6 {
|
|
|
|
hashLen = 5
|
|
|
|
}
|
|
|
|
chainLen := 64
|
|
|
|
switch level {
|
|
|
|
case 2:
|
|
|
|
chainLen = 0
|
|
|
|
case 3:
|
|
|
|
chainLen = 1
|
|
|
|
case 4:
|
|
|
|
chainLen = 2
|
|
|
|
case 5:
|
|
|
|
chainLen = 4
|
|
|
|
case 6:
|
|
|
|
chainLen = 8
|
|
|
|
}
|
|
|
|
mf = &matchfinder.M4{
|
|
|
|
MaxDistance: 1 << 20,
|
|
|
|
ChainLength: chainLen,
|
|
|
|
HashLen: hashLen,
|
|
|
|
DistanceBitCost: 57,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return &matchfinder.Writer{
|
|
|
|
Dest: dst,
|
|
|
|
MatchFinder: mf,
|
|
|
|
Encoder: &Encoder{},
|
|
|
|
BlockSize: 1 << 16,
|
|
|
|
}
|
|
|
|
}
|