vendor: update github.com/klauspost/compress from v1.10.3 to v1.10.4

This commit is contained in:
Aliaksandr Valialkin 2020-04-10 18:39:15 +03:00
parent a30c98c0bc
commit 66d8086a5e
17 changed files with 147 additions and 107 deletions

2
go.mod
View file

@ -9,7 +9,7 @@ require (
github.com/cespare/xxhash/v2 v2.1.1
github.com/golang/snappy v0.0.1
github.com/jmespath/go-jmespath v0.3.0 // indirect
github.com/klauspost/compress v1.10.3
github.com/klauspost/compress v1.10.4
github.com/valyala/fasthttp v1.9.0
github.com/valyala/fastjson v1.5.0
github.com/valyala/fastrand v1.0.0

4
go.sum
View file

@ -116,8 +116,8 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o
github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/klauspost/compress v1.8.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/klauspost/compress v1.10.3 h1:OP96hzwJVBIHYU52pVTI6CczrxPvrGfgqF9N5eTO0Q8=
github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.10.4 h1:jFzIFaf586tquEB5EhzQG0HwGNSlgAJpG53G6Ss11wc=
github.com/klauspost/compress v1.10.4/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=

View file

@ -80,9 +80,7 @@ type advancedState struct {
// deflate state
length int
offset int
hash uint32
maxInsertIndex int
ii uint16 // position of last match, intended to overflow to reset.
// Input hash chains
// hashHead[hashValue] contains the largest inputIndex with the specified hash value
@ -97,6 +95,9 @@ type advancedState struct {
// input window: unprocessed data is window[index:windowEnd]
index int
hashMatch [maxMatchLength + minMatchLength]uint32
hash uint32
ii uint16 // position of last match, intended to overflow to reset.
}
type compressor struct {
@ -107,18 +108,19 @@ type compressor struct {
// compression algorithm
fill func(*compressor, []byte) int // copy data to window
step func(*compressor) // process window
sync bool // requesting flush
window []byte
windowEnd int
blockStart int // window index where current tokens start
byteAvailable bool // if true, still need to process window[index-1].
err error
window []byte
windowEnd int
blockStart int // window index where current tokens start
err error
// queued output tokens
tokens tokens
fast fastEnc
state *advancedState
sync bool // requesting flush
byteAvailable bool // if true, still need to process window[index-1].
}
func (d *compressor) fillDeflate(b []byte) int {

View file

@ -295,10 +295,6 @@ type decompressor struct {
r Reader
roffset int64
// Input bits, in top of b.
b uint32
nb uint
// Huffman decoders for literal/length, distance.
h1, h2 huffmanDecoder
@ -309,19 +305,24 @@ type decompressor struct {
// Output history, buffer.
dict dictDecoder
// Temporary buffer (avoids repeated allocation).
buf [4]byte
// Next step in the decompression,
// and decompression state.
step func(*decompressor)
stepState int
final bool
err error
toRead []byte
hl, hd *huffmanDecoder
copyLen int
copyDist int
// Temporary buffer (avoids repeated allocation).
buf [4]byte
// Input bits, in top of b.
b uint32
nb uint
final bool
}
func (f *decompressor) nextBlock() {

View file

@ -44,18 +44,14 @@ var (
// Scratch provides temporary storage for compression and decompression.
type Scratch struct {
// Private
count [maxSymbolValue + 1]uint32
norm [maxSymbolValue + 1]int16
symbolLen uint16 // Length of active part of the symbol table.
actualTableLog uint8 // Selected tablelog.
br byteReader
bits bitReader
bw bitWriter
ct cTable // Compression tables.
decTable []decSymbol // Decompression table.
zeroBits bool // no bits has prob > 50%.
clearCount bool // clear count
maxCount int // count of the most probable symbol
count [maxSymbolValue + 1]uint32
norm [maxSymbolValue + 1]int16
br byteReader
bits bitReader
bw bitWriter
ct cTable // Compression tables.
decTable []decSymbol // Decompression table.
maxCount int // count of the most probable symbol
// Per block parameters.
// These can be used to override compression parameters of the block.
@ -68,17 +64,22 @@ type Scratch struct {
// and allocation will be avoided.
Out []byte
// MaxSymbolValue will override the maximum symbol value of the next block.
MaxSymbolValue uint8
// TableLog will attempt to override the tablelog for the next block.
TableLog uint8
// DecompressLimit limits the maximum decoded size acceptable.
// If > 0 decompression will stop when approximately this many bytes
// has been decoded.
// If 0, maximum size will be 2GB.
DecompressLimit int
symbolLen uint16 // Length of active part of the symbol table.
actualTableLog uint8 // Selected tablelog.
zeroBits bool // no bits has prob > 50%.
clearCount bool // clear count
// MaxSymbolValue will override the maximum symbol value of the next block.
MaxSymbolValue uint8
// TableLog will attempt to override the tablelog for the next block.
TableLog uint8
}
// Histogram allows to populate the histogram and skip that step in the compression,

View file

@ -37,13 +37,13 @@ type Writer struct {
Header // written at first call to Write, Flush, or Close
w io.Writer
level int
wroteHeader bool
err error
compressor *flate.Writer
digest uint32 // CRC-32, IEEE polynomial (section 8)
size uint32 // Uncompressed size (section 2.3.1)
wroteHeader bool
closed bool
buf [10]byte
err error
}
// NewWriter returns a new Writer.

View file

@ -79,6 +79,13 @@ type Scratch struct {
// Slice of the returned data.
OutData []byte
// MaxDecodedSize will set the maximum allowed output size.
// This value will automatically be set to BlockSizeMax if not set.
// Decoders will return ErrMaxDecodedSizeExceeded is this limit is exceeded.
MaxDecodedSize int
br byteReader
// MaxSymbolValue will override the maximum symbol value of the next block.
MaxSymbolValue uint8
@ -95,12 +102,6 @@ type Scratch struct {
// If WantLogLess == 0 any improvement will do.
WantLogLess uint8
// MaxDecodedSize will set the maximum allowed output size.
// This value will automatically be set to BlockSizeMax if not set.
// Decoders will return ErrMaxDecodedSizeExceeded is this limit is exceeded.
MaxDecodedSize int
br byteReader
symbolLen uint16 // Length of active part of the symbol table.
maxCount int // count of the most probable symbol
clearCount bool // clear count

View file

@ -75,21 +75,25 @@ type blockDec struct {
// Window size of the block.
WindowSize uint64
Type blockType
RLESize uint32
history chan *history
input chan struct{}
result chan decodeOutput
sequenceBuf []seq
err error
decWG sync.WaitGroup
// Block is RLE, this is the size.
RLESize uint32
tmp [4]byte
Type blockType
// Is this the last block of a frame?
Last bool
// Use less memory
lowMem bool
history chan *history
input chan struct{}
result chan decodeOutput
sequenceBuf []seq
tmp [4]byte
err error
decWG sync.WaitGroup
lowMem bool
}
func (b *blockDec) String() string {

View file

@ -169,7 +169,12 @@ func (d *Decoder) Reset(r io.Reader) error {
println("*bytes.Buffer detected, doing sync decode, len:", bb.Len())
}
b := bb.Bytes()
dst, err := d.DecodeAll(b, nil)
var dst []byte
if cap(d.current.b) > 0 {
dst = d.current.b
}
dst, err := d.DecodeAll(b, dst[:0])
if err == nil {
err = io.EOF
}

View file

@ -104,10 +104,7 @@ func (e *betterFastEncoder) Encode(blk *blockEnc, src []byte) {
sLimit := int32(len(src)) - inputMargin
// stepSize is the number of bytes to skip on every main loop iteration.
// It should be >= 1.
stepSize := int32(e.o.targetLength)
if stepSize == 0 {
stepSize++
}
const stepSize = 1
const kSearchStrength = 9

View file

@ -80,10 +80,7 @@ func (e *doubleFastEncoder) Encode(blk *blockEnc, src []byte) {
sLimit := int32(len(src)) - inputMargin
// stepSize is the number of bytes to skip on every main loop iteration.
// It should be >= 1.
stepSize := int32(e.o.targetLength)
if stepSize == 0 {
stepSize++
}
const stepSize = 1
const kSearchStrength = 8
@ -401,10 +398,7 @@ func (e *doubleFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) {
sLimit := int32(len(src)) - inputMargin
// stepSize is the number of bytes to skip on every main loop iteration.
// It should be >= 1.
stepSize := int32(e.o.targetLength)
if stepSize == 0 {
stepSize++
}
const stepSize = 1
const kSearchStrength = 8

View file

@ -25,7 +25,6 @@ type tableEntry struct {
}
type fastBase struct {
o encParams
// cur is the offset at the start of hist
cur int32
// maximum offset. Should be at least 2x block size.
@ -117,11 +116,7 @@ func (e *fastEncoder) Encode(blk *blockEnc, src []byte) {
sLimit := int32(len(src)) - inputMargin
// stepSize is the number of bytes to skip on every main loop iteration.
// It should be >= 2.
stepSize := int32(e.o.targetLength)
if stepSize == 0 {
stepSize++
}
stepSize++
const stepSize = 2
// TEMPLATE
const hashLog = tableBits

View file

@ -4,6 +4,8 @@
package zstd
/*
// encParams are not really used, just here for reference.
type encParams struct {
// largest match distance : larger == more compression, more memory needed during decompression
windowLog uint8
@ -152,3 +154,4 @@ var defEncParams = [4][]encParams{
{14, 15, 15, 10, 3, 999, strategyBtultra2}, // level 22.
},
}
*/

View file

@ -39,17 +39,18 @@ type encoder interface {
}
type encoderState struct {
w io.Writer
filling []byte
current []byte
previous []byte
encoder encoder
writing *blockEnc
err error
writeErr error
nWritten int64
headerWritten bool
eofWritten bool
w io.Writer
filling []byte
current []byte
previous []byte
encoder encoder
writing *blockEnc
err error
writeErr error
nWritten int64
headerWritten bool
eofWritten bool
fullFrameWritten bool
// This waitgroup indicates an encode is running.
wg sync.WaitGroup
@ -114,6 +115,7 @@ func (e *Encoder) Reset(w io.Writer) {
s.encoder.Reset()
s.headerWritten = false
s.eofWritten = false
s.fullFrameWritten = false
s.w = w
s.err = nil
s.nWritten = 0
@ -172,6 +174,22 @@ func (e *Encoder) nextBlock(final bool) error {
return fmt.Errorf("block > maxStoreBlockSize")
}
if !s.headerWritten {
// If we have a single block encode, do a sync compression.
if final && len(s.filling) > 0 {
s.current = e.EncodeAll(s.filling, s.current[:0])
var n2 int
n2, s.err = s.w.Write(s.current)
if s.err != nil {
return s.err
}
s.nWritten += int64(n2)
s.current = s.current[:0]
s.filling = s.filling[:0]
s.headerWritten = true
s.fullFrameWritten = true
return nil
}
var tmp [maxHeaderSize]byte
fh := frameHeader{
ContentSize: 0,
@ -294,7 +312,9 @@ func (e *Encoder) ReadFrom(r io.Reader) (n int64, err error) {
src := e.state.filling
for {
n2, err := r.Read(src)
_, _ = e.state.encoder.CRC().Write(src[:n2])
if e.o.crc {
_, _ = e.state.encoder.CRC().Write(src[:n2])
}
// src is now the unfilled part...
src = src[n2:]
n += int64(n2)
@ -359,6 +379,9 @@ func (e *Encoder) Close() error {
if err != nil {
return err
}
if e.state.fullFrameWritten {
return s.err
}
s.wg.Wait()
s.wWg.Wait()

View file

@ -12,15 +12,16 @@ type EOption func(*encoderOptions) error
// options retains accumulated state of multiple options.
type encoderOptions struct {
concurrent int
crc bool
single *bool
pad int
blockSize int
windowSize int
level EncoderLevel
fullZero bool
noEntropy bool
concurrent int
level EncoderLevel
single *bool
pad int
blockSize int
windowSize int
crc bool
fullZero bool
noEntropy bool
customWindow bool
}
func (o *encoderOptions) setDefault() {
@ -30,7 +31,7 @@ func (o *encoderOptions) setDefault() {
crc: true,
single: nil,
blockSize: 1 << 16,
windowSize: 1 << 22,
windowSize: 8 << 20,
level: SpeedDefault,
}
}
@ -85,6 +86,7 @@ func WithWindowSize(n int) EOption {
}
o.windowSize = n
o.customWindow = true
if o.blockSize > o.windowSize {
o.blockSize = o.windowSize
}
@ -195,6 +197,16 @@ func WithEncoderLevel(l EncoderLevel) EOption {
return fmt.Errorf("unknown encoder level")
}
o.level = l
if !o.customWindow {
switch o.level {
case SpeedFastest:
o.windowSize = 4 << 20
case SpeedDefault:
o.windowSize = 8 << 20
case SpeedBetterCompression:
o.windowSize = 16 << 20
}
}
return nil
}
}

View file

@ -16,16 +16,11 @@ import (
)
type frameDec struct {
o decoderOptions
crc hash.Hash64
frameDone sync.WaitGroup
offset int64
o decoderOptions
crc hash.Hash64
offset int64
WindowSize uint64
DictionaryID uint32
FrameContentSize uint64
HasCheckSum bool
SingleSegment bool
WindowSize uint64
// maxWindowSize is the maximum windows size to support.
// should never be bigger than max-int.
@ -42,9 +37,16 @@ type frameDec struct {
// Byte buffer that can be reused for small input blocks.
bBuf byteBuf
FrameContentSize uint64
frameDone sync.WaitGroup
DictionaryID uint32
HasCheckSum bool
SingleSegment bool
// asyncRunning indicates whether the async routine processes input on 'decoding'.
asyncRunning bool
asyncRunningMu sync.Mutex
asyncRunning bool
}
const (

2
vendor/modules.txt vendored
View file

@ -85,7 +85,7 @@ github.com/jmespath/go-jmespath
github.com/jstemmer/go-junit-report
github.com/jstemmer/go-junit-report/formatter
github.com/jstemmer/go-junit-report/parser
# github.com/klauspost/compress v1.10.3
# github.com/klauspost/compress v1.10.4
github.com/klauspost/compress/flate
github.com/klauspost/compress/fse
github.com/klauspost/compress/gzip