vendor: update github.com/klauspost/compress from v1.7.6 to v1.8.2

This commit is contained in:
Aliaksandr Valialkin 2019-09-06 00:45:26 +03:00
parent 7343e8b408
commit 6c46afb087
17 changed files with 189 additions and 109 deletions

2
go.mod
View file

@ -6,7 +6,7 @@ require (
github.com/cespare/xxhash/v2 v2.0.1-0.20190104013014-3767db7a7e18 github.com/cespare/xxhash/v2 v2.0.1-0.20190104013014-3767db7a7e18
github.com/golang/snappy v0.0.1 github.com/golang/snappy v0.0.1
github.com/google/go-cmp v0.3.0 // indirect github.com/google/go-cmp v0.3.0 // indirect
github.com/klauspost/compress v1.7.6 github.com/klauspost/compress v1.8.2
github.com/spaolacci/murmur3 v1.1.0 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect
github.com/valyala/fastjson v1.4.1 github.com/valyala/fastjson v1.4.1
github.com/valyala/gozstd v1.6.1 github.com/valyala/gozstd v1.6.1

4
go.sum
View file

@ -20,8 +20,8 @@ github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/klauspost/compress v1.7.6 h1:GH2karLOcuZtA5a3+KuzSU33A2cvcHGbtEWM6K4t7oU= github.com/klauspost/compress v1.8.2 h1:Bx0qjetmNjdFXASH02NSAREKpiaDwkO1DRZ3dV2KCcs=
github.com/klauspost/compress v1.7.6/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.8.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/cpuid v1.2.0 h1:NMpwD2G9JSFOE1/TJjGSo5zG7Yb2bTe7eq1jH+irmeE= github.com/klauspost/cpuid v1.2.0 h1:NMpwD2G9JSFOE1/TJjGSo5zG7Yb2bTe7eq1jH+irmeE=
github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=

View file

@ -193,14 +193,26 @@ func (s *Scratch) Decompress1X(in []byte) (out []byte, err error) {
tmp[off+3] = hasDec(dt[br.peekBitsFast(s.actualTableLog)&tlMask]) tmp[off+3] = hasDec(dt[br.peekBitsFast(s.actualTableLog)&tlMask])
off += 4 off += 4
if off == 0 { if off == 0 {
if len(s.Out)+256 > s.MaxDecodedSize {
br.close()
return nil, ErrMaxDecodedSizeExceeded
}
s.Out = append(s.Out, tmp...) s.Out = append(s.Out, tmp...)
} }
} }
if len(s.Out)+int(off) > s.MaxDecodedSize {
br.close()
return nil, ErrMaxDecodedSizeExceeded
}
s.Out = append(s.Out, tmp[:off]...) s.Out = append(s.Out, tmp[:off]...)
for !br.finished() { for !br.finished() {
br.fill() br.fill()
if len(s.Out) >= s.MaxDecodedSize {
br.close()
return nil, ErrMaxDecodedSizeExceeded
}
s.Out = append(s.Out, decode()) s.Out = append(s.Out, decode())
} }
return s.Out, br.close() return s.Out, br.close()
@ -218,6 +230,9 @@ func (s *Scratch) Decompress4X(in []byte, dstSize int) (out []byte, err error) {
if len(in) < 6+(4*1) { if len(in) < 6+(4*1) {
return nil, errors.New("input too small") return nil, errors.New("input too small")
} }
if dstSize > s.MaxDecodedSize {
return nil, ErrMaxDecodedSizeExceeded
}
// TODO: We do not detect when we overrun a buffer, except if the last one does. // TODO: We do not detect when we overrun a buffer, except if the last one does.
var br [4]bitReader var br [4]bitReader

View file

@ -35,6 +35,9 @@ var (
// ErrTooBig is return if input is too large for a single block. // ErrTooBig is return if input is too large for a single block.
ErrTooBig = errors.New("input too big") ErrTooBig = errors.New("input too big")
// ErrMaxDecodedSizeExceeded is return if input is too large for a single block.
ErrMaxDecodedSizeExceeded = errors.New("maximum output size exceeded")
) )
type ReusePolicy uint8 type ReusePolicy uint8
@ -86,6 +89,11 @@ type Scratch struct {
// Reuse will specify the reuse policy // Reuse will specify the reuse policy
Reuse ReusePolicy Reuse ReusePolicy
// MaxDecodedSize will set the maximum allowed output size.
// This value will automatically be set to BlockSizeMax if not set.
// Decoders will return ErrMaxDecodedSizeExceeded is this limit is exceeded.
MaxDecodedSize int
br byteReader br byteReader
symbolLen uint16 // Length of active part of the symbol table. symbolLen uint16 // Length of active part of the symbol table.
maxCount int // count of the most probable symbol maxCount int // count of the most probable symbol
@ -116,6 +124,9 @@ func (s *Scratch) prepare(in []byte) (*Scratch, error) {
if s.TableLog > tableLogMax { if s.TableLog > tableLogMax {
return nil, fmt.Errorf("tableLog (%d) > maxTableLog (%d)", s.TableLog, tableLogMax) return nil, fmt.Errorf("tableLog (%d) > maxTableLog (%d)", s.TableLog, tableLogMax)
} }
if s.MaxDecodedSize <= 0 || s.MaxDecodedSize > BlockSizeMax {
s.MaxDecodedSize = BlockSizeMax
}
if s.clearCount && s.maxCount == 0 { if s.clearCount && s.maxCount == 0 {
for i := range s.count { for i := range s.count {
s.count[i] = 0 s.count[i] = 0

View file

@ -184,9 +184,7 @@ tagLit60Plus:
// checks. In the asm version, we code it once instead of once per switch case. // checks. In the asm version, we code it once instead of once per switch case.
ADDQ CX, SI ADDQ CX, SI
SUBQ $58, SI SUBQ $58, SI
MOVQ SI, BX CMPQ SI, R13
SUBQ R11, BX
CMPQ BX, R12
JA errCorrupt JA errCorrupt
// case x == 60: // case x == 60:
@ -232,9 +230,7 @@ tagCopy4:
ADDQ $5, SI ADDQ $5, SI
// if uint(s) > uint(len(src)) { etc } // if uint(s) > uint(len(src)) { etc }
MOVQ SI, BX CMPQ SI, R13
SUBQ R11, BX
CMPQ BX, R12
JA errCorrupt JA errCorrupt
// length = 1 + int(src[s-5])>>2 // length = 1 + int(src[s-5])>>2
@ -251,9 +247,7 @@ tagCopy2:
ADDQ $3, SI ADDQ $3, SI
// if uint(s) > uint(len(src)) { etc } // if uint(s) > uint(len(src)) { etc }
MOVQ SI, BX CMPQ SI, R13
SUBQ R11, BX
CMPQ BX, R12
JA errCorrupt JA errCorrupt
// length = 1 + int(src[s-3])>>2 // length = 1 + int(src[s-3])>>2
@ -277,9 +271,7 @@ tagCopy:
ADDQ $2, SI ADDQ $2, SI
// if uint(s) > uint(len(src)) { etc } // if uint(s) > uint(len(src)) { etc }
MOVQ SI, BX CMPQ SI, R13
SUBQ R11, BX
CMPQ BX, R12
JA errCorrupt JA errCorrupt
// offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) // offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))

View file

@ -85,14 +85,28 @@ func decode(dst, src []byte) int {
if offset <= 0 || d < offset || length > len(dst)-d { if offset <= 0 || d < offset || length > len(dst)-d {
return decodeErrCodeCorrupt return decodeErrCodeCorrupt
} }
// Copy from an earlier sub-slice of dst to a later sub-slice. Unlike // Copy from an earlier sub-slice of dst to a later sub-slice.
// the built-in copy function, this byte-by-byte copy always runs // If no overlap, use the built-in copy:
if offset > length {
copy(dst[d:d+length], dst[d-offset:])
d += length
continue
}
// Unlike the built-in copy function, this byte-by-byte copy always runs
// forwards, even if the slices overlap. Conceptually, this is: // forwards, even if the slices overlap. Conceptually, this is:
// //
// d += forwardCopy(dst[d:d+length], dst[d-offset:]) // d += forwardCopy(dst[d:d+length], dst[d-offset:])
for end := d + length; d != end; d++ { //
dst[d] = dst[d-offset] // We align the slices into a and b and show the compiler they are the same size.
// This allows the loop to run without bounds checks.
a := dst[d : d+length]
b := dst[d-offset:]
b = b[:len(a)]
for i := range a {
a[i] = b[i]
} }
d += length
} }
if d != len(dst) { if d != len(dst) {
return decodeErrCodeCorrupt return decodeErrCodeCorrupt

View file

@ -448,6 +448,7 @@ func (b *blockDec) decodeCompressed(hist *history) error {
} }
// Use our out buffer. // Use our out buffer.
huff.Out = b.literalBuf[:0] huff.Out = b.literalBuf[:0]
huff.MaxDecodedSize = litRegenSize
if fourStreams { if fourStreams {
literals, err = huff.Decompress4X(literals, litRegenSize) literals, err = huff.Decompress4X(literals, litRegenSize)
} else { } else {
@ -610,6 +611,7 @@ func (b *blockDec) decodeCompressed(hist *history) error {
// Use our out buffer. // Use our out buffer.
huff = hist.huffTree huff = hist.huffTree
huff.Out = b.literalBuf[:0] huff.Out = b.literalBuf[:0]
huff.MaxDecodedSize = litRegenSize
if fourStreams { if fourStreams {
literals, err = huff.Decompress4X(literals, litRegenSize) literals, err = huff.Decompress4X(literals, litRegenSize)
} else { } else {

View file

@ -75,6 +75,7 @@ var (
// The Reset function can be used to initiate a new stream, which is will considerably // The Reset function can be used to initiate a new stream, which is will considerably
// reduce the allocations normally caused by NewReader. // reduce the allocations normally caused by NewReader.
func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) { func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) {
initPredefined()
var d Decoder var d Decoder
d.o.setDefault() d.o.setDefault()
for _, o := range opts { for _, o := range opts {
@ -285,10 +286,6 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
d.frames <- frame d.frames <- frame
}() }()
frame.bBuf = input frame.bBuf = input
if cap(dst) == 0 {
// Allocate 1MB by default if nothing is provided.
dst = make([]byte, 0, 1<<20)
}
for { for {
err := frame.reset(&frame.bBuf) err := frame.reset(&frame.bBuf)
@ -309,6 +306,16 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
dst = dst2 dst = dst2
} }
} }
if cap(dst) == 0 {
// Allocate window size * 2 by default if nothing is provided and we didn't get frame content size.
size := frame.WindowSize * 2
// Cap to 1 MB.
if size > 1<<20 {
size = 1 << 20
}
dst = make([]byte, 0, frame.WindowSize)
}
dst, err = frame.runDecoder(dst, block) dst, err = frame.runDecoder(dst, block)
if err != nil { if err != nil {
return dst, err return dst, err

View file

@ -50,15 +50,17 @@ func WithDecoderConcurrency(n int) DOption {
} }
// WithDecoderMaxMemory allows to set a maximum decoded size for in-memory // WithDecoderMaxMemory allows to set a maximum decoded size for in-memory
// (non-streaming) operations. // non-streaming operations or maximum window size for streaming operations.
// Maxmimum and default is 1 << 63 bytes. // This can be used to control memory usage of potentially hostile content.
// For streaming operations, the maximum window size is capped at 1<<30 bytes.
// Maximum and default is 1 << 63 bytes.
func WithDecoderMaxMemory(n uint64) DOption { func WithDecoderMaxMemory(n uint64) DOption {
return func(o *decoderOptions) error { return func(o *decoderOptions) error {
if n == 0 { if n == 0 {
return errors.New("WithDecoderMaxmemory must be at least 1") return errors.New("WithDecoderMaxMemory must be at least 1")
} }
if n > 1<<63 { if n > 1<<63 {
return fmt.Errorf("WithDecoderMaxmemorymust be less than 1 << 63") return fmt.Errorf("WithDecoderMaxmemory must be less than 1 << 63")
} }
o.maxDecodedSize = n o.maxDecodedSize = n
return nil return nil

View file

@ -59,6 +59,7 @@ type encoderState struct {
// NewWriter will create a new Zstandard encoder. // NewWriter will create a new Zstandard encoder.
// If the encoder will be used for encoding blocks a nil writer can be used. // If the encoder will be used for encoding blocks a nil writer can be used.
func NewWriter(w io.Writer, opts ...EOption) (*Encoder, error) { func NewWriter(w io.Writer, opts ...EOption) (*Encoder, error) {
initPredefined()
var e Encoder var e Encoder
e.o.setDefault() e.o.setDefault()
for _, o := range opts { for _, o := range opts {
@ -393,12 +394,31 @@ func (e *Encoder) Close() error {
// EncodeAll will encode all input in src and append it to dst. // EncodeAll will encode all input in src and append it to dst.
// This function can be called concurrently, but each call will only run on a single goroutine. // This function can be called concurrently, but each call will only run on a single goroutine.
// If empty input is given, nothing is returned. // If empty input is given, nothing is returned, unless WithZeroFrames is specified.
// Encoded blocks can be concatenated and the result will be the combined input stream. // Encoded blocks can be concatenated and the result will be the combined input stream.
// Data compressed with EncodeAll can be decoded with the Decoder, // Data compressed with EncodeAll can be decoded with the Decoder,
// using either a stream or DecodeAll. // using either a stream or DecodeAll.
func (e *Encoder) EncodeAll(src, dst []byte) []byte { func (e *Encoder) EncodeAll(src, dst []byte) []byte {
if len(src) == 0 { if len(src) == 0 {
if e.o.fullZero {
// Add frame header.
fh := frameHeader{
ContentSize: 0,
WindowSize: minWindowSize,
SingleSegment: true,
// Adding a checksum would be a waste of space.
Checksum: false,
DictID: 0,
}
dst, _ = fh.appendTo(dst)
// Write raw block as last one only.
var blk blockHeader
blk.setSize(0)
blk.setType(blockTypeRaw)
blk.setLast(true)
dst = blk.appendTo(dst)
}
return dst return dst
} }
e.init.Do(func() { e.init.Do(func() {

View file

@ -6,7 +6,7 @@ import (
"strings" "strings"
) )
// DOption is an option for creating a encoder. // EOption is an option for creating a encoder.
type EOption func(*encoderOptions) error type EOption func(*encoderOptions) error
// options retains accumulated state of multiple options. // options retains accumulated state of multiple options.
@ -18,6 +18,7 @@ type encoderOptions struct {
blockSize int blockSize int
windowSize int windowSize int
level EncoderLevel level EncoderLevel
fullZero bool
} }
func (o *encoderOptions) setDefault() { func (o *encoderOptions) setDefault() {
@ -166,6 +167,16 @@ func WithEncoderLevel(l EncoderLevel) EOption {
} }
} }
// WithZeroFrames will encode 0 length input as full frames.
// This can be needed for compatibility with zstandard usage,
// but is not needed for this package.
func WithZeroFrames(b bool) EOption {
return func(o *encoderOptions) error {
o.fullZero = b
return nil
}
}
// WithSingleSegment will set the "single segment" flag when EncodeAll is used. // WithSingleSegment will set the "single segment" flag when EncodeAll is used.
// If this flag is set, data must be regenerated within a single continuous memory segment. // If this flag is set, data must be regenerated within a single continuous memory segment.
// In this case, Window_Descriptor byte is skipped, but Frame_Content_Size is necessarily present. // In this case, Window_Descriptor byte is skipped, but Frame_Content_Size is necessarily present.

View file

@ -62,6 +62,9 @@ func newFrameDec(o decoderOptions) *frameDec {
o: o, o: o,
maxWindowSize: 1 << 30, maxWindowSize: 1 << 30,
} }
if d.maxWindowSize > o.maxDecodedSize {
d.maxWindowSize = o.maxDecodedSize
}
return &d return &d
} }

View file

@ -5,7 +5,6 @@
package zstd package zstd
import ( import (
"errors"
"fmt" "fmt"
"io" "io"
"math" "math"
@ -49,9 +48,7 @@ func (f frameHeader) appendTo(dst []byte) ([]byte, error) {
windowLog := (bits.Len32(f.WindowSize-1) - winLogMin) << 3 windowLog := (bits.Len32(f.WindowSize-1) - winLogMin) << 3
dst = append(dst, uint8(windowLog)) dst = append(dst, uint8(windowLog))
} }
if f.SingleSegment && f.ContentSize == 0 {
return nil, errors.New("single segment, but no size set")
}
switch fcs { switch fcs {
case 0: case 0:
if f.SingleSegment { if f.SingleSegment {

View file

@ -7,6 +7,7 @@ package zstd
import ( import (
"fmt" "fmt"
"math" "math"
"sync"
) )
var ( var (
@ -69,85 +70,89 @@ func fillBase(dst []baseOffset, base uint32, bits ...uint8) {
} }
} }
func init() { var predef sync.Once
// Literals length codes
tmp := make([]baseOffset, 36)
for i := range tmp[:16] {
tmp[i] = baseOffset{
baseLine: uint32(i),
addBits: 0,
}
}
fillBase(tmp[16:], 16, 1, 1, 1, 1, 2, 2, 3, 3, 4, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16)
symbolTableX[tableLiteralLengths] = tmp
// Match length codes func initPredefined() {
tmp = make([]baseOffset, 53) predef.Do(func() {
for i := range tmp[:32] { // Literals length codes
tmp[i] = baseOffset{ tmp := make([]baseOffset, 36)
// The transformation adds the 3 length. for i := range tmp[:16] {
baseLine: uint32(i) + 3, tmp[i] = baseOffset{
addBits: 0, baseLine: uint32(i),
addBits: 0,
}
} }
} fillBase(tmp[16:], 16, 1, 1, 1, 1, 2, 2, 3, 3, 4, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16)
fillBase(tmp[32:], 35, 1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16) symbolTableX[tableLiteralLengths] = tmp
symbolTableX[tableMatchLengths] = tmp
// Offset codes // Match length codes
tmp = make([]baseOffset, maxOffsetBits+1) tmp = make([]baseOffset, 53)
tmp[1] = baseOffset{ for i := range tmp[:32] {
baseLine: 1, tmp[i] = baseOffset{
addBits: 1, // The transformation adds the 3 length.
} baseLine: uint32(i) + 3,
fillBase(tmp[2:], 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30) addBits: 0,
symbolTableX[tableOffsets] = tmp }
}
fillBase(tmp[32:], 35, 1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16)
symbolTableX[tableMatchLengths] = tmp
// Fill predefined tables and transform them. // Offset codes
// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions tmp = make([]baseOffset, maxOffsetBits+1)
for i := range fsePredef[:] { tmp[1] = baseOffset{
f := &fsePredef[i] baseLine: 1,
switch tableIndex(i) { addBits: 1,
case tableLiteralLengths:
// https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L243
f.actualTableLog = 6
copy(f.norm[:], []int16{4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1,
2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 2, 1, 1, 1, 1, 1,
-1, -1, -1, -1})
f.symbolLen = 36
case tableOffsets:
// https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L281
f.actualTableLog = 5
copy(f.norm[:], []int16{
1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1})
f.symbolLen = 29
case tableMatchLengths:
//https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L304
f.actualTableLog = 6
copy(f.norm[:], []int16{
1, 4, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1,
-1, -1, -1, -1, -1})
f.symbolLen = 53
} }
if err := f.buildDtable(); err != nil { fillBase(tmp[2:], 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30)
panic(fmt.Errorf("building table %v: %v", tableIndex(i), err)) symbolTableX[tableOffsets] = tmp
}
if err := f.transform(symbolTableX[i]); err != nil {
panic(fmt.Errorf("building table %v: %v", tableIndex(i), err))
}
f.preDefined = true
// Create encoder as well // Fill predefined tables and transform them.
enc := &fsePredefEnc[i] // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions
copy(enc.norm[:], f.norm[:]) for i := range fsePredef[:] {
enc.symbolLen = f.symbolLen f := &fsePredef[i]
enc.actualTableLog = f.actualTableLog switch tableIndex(i) {
if err := enc.buildCTable(); err != nil { case tableLiteralLengths:
panic(fmt.Errorf("building encoding table %v: %v", tableIndex(i), err)) // https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L243
f.actualTableLog = 6
copy(f.norm[:], []int16{4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1,
2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 2, 1, 1, 1, 1, 1,
-1, -1, -1, -1})
f.symbolLen = 36
case tableOffsets:
// https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L281
f.actualTableLog = 5
copy(f.norm[:], []int16{
1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1})
f.symbolLen = 29
case tableMatchLengths:
//https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L304
f.actualTableLog = 6
copy(f.norm[:], []int16{
1, 4, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1,
-1, -1, -1, -1, -1})
f.symbolLen = 53
}
if err := f.buildDtable(); err != nil {
panic(fmt.Errorf("building table %v: %v", tableIndex(i), err))
}
if err := f.transform(symbolTableX[i]); err != nil {
panic(fmt.Errorf("building table %v: %v", tableIndex(i), err))
}
f.preDefined = true
// Create encoder as well
enc := &fsePredefEnc[i]
copy(enc.norm[:], f.norm[:])
enc.symbolLen = f.symbolLen
enc.actualTableLog = f.actualTableLog
if err := enc.buildCTable(); err != nil {
panic(fmt.Errorf("building encoding table %v: %v", tableIndex(i), err))
}
enc.setBits(bitTables[i])
enc.preDefined = true
} }
enc.setBits(bitTables[i]) })
enc.preDefined = true
}
} }

View file

@ -64,7 +64,7 @@ func hash6(u uint64, h uint8) uint32 {
return uint32(((u << (64 - 48)) * prime6bytes) >> ((64 - h) & 63)) return uint32(((u << (64 - 48)) * prime6bytes) >> ((64 - h) & 63))
} }
// hash6 returns the hash of the lowest 7 bytes of u to fit in a hash table with h bits. // hash7 returns the hash of the lowest 7 bytes of u to fit in a hash table with h bits.
// Preferably h should be a constant and should always be <64. // Preferably h should be a constant and should always be <64.
func hash7(u uint64, h uint8) uint32 { func hash7(u uint64, h uint8) uint32 {
return uint32(((u << (64 - 56)) * prime7bytes) >> ((64 - h) & 63)) return uint32(((u << (64 - 56)) * prime7bytes) >> ((64 - h) & 63))

View file

@ -80,6 +80,7 @@ type SnappyConverter struct {
// If any error is detected on the Snappy stream it is returned. // If any error is detected on the Snappy stream it is returned.
// The number of bytes written is returned. // The number of bytes written is returned.
func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) { func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) {
initPredefined()
r.err = nil r.err = nil
r.r = in r.r = in
if r.block == nil { if r.block == nil {

2
vendor/modules.txt vendored
View file

@ -6,7 +6,7 @@ github.com/VictoriaMetrics/metrics
github.com/cespare/xxhash/v2 github.com/cespare/xxhash/v2
# github.com/golang/snappy v0.0.1 # github.com/golang/snappy v0.0.1
github.com/golang/snappy github.com/golang/snappy
# github.com/klauspost/compress v1.7.6 # github.com/klauspost/compress v1.8.2
github.com/klauspost/compress/fse github.com/klauspost/compress/fse
github.com/klauspost/compress/huff0 github.com/klauspost/compress/huff0
github.com/klauspost/compress/snappy github.com/klauspost/compress/snappy