vendor: update github.com/klauspost/compress from v1.12.3 to v1.13.0

This commit is contained in:
Aliaksandr Valialkin 2021-06-08 15:42:57 +03:00
parent 96b691a0ab
commit 645e18dd88
18 changed files with 176 additions and 145 deletions

2
go.mod
View file

@ -19,7 +19,7 @@ require (
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/snappy v0.0.3 github.com/golang/snappy v0.0.3
github.com/influxdata/influxdb v1.9.0 github.com/influxdata/influxdb v1.9.0
github.com/klauspost/compress v1.12.3 github.com/klauspost/compress v1.13.0
github.com/mattn/go-isatty v0.0.13 // indirect github.com/mattn/go-isatty v0.0.13 // indirect
github.com/mattn/go-runewidth v0.0.13 // indirect github.com/mattn/go-runewidth v0.0.13 // indirect
github.com/prometheus/client_golang v1.10.0 // indirect github.com/prometheus/client_golang v1.10.0 // indirect

4
go.sum
View file

@ -583,8 +583,8 @@ github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0
github.com/klauspost/compress v1.10.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.10.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.11.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.12.2/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/klauspost/compress v1.12.2/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
github.com/klauspost/compress v1.12.3 h1:G5AfA94pHPysR56qqrkO2pxEexdDzrpFJ6yt/VqWxVU= github.com/klauspost/compress v1.13.0 h1:2T7tUoQrQT+fQWdaY5rjWztFGAFwbGD04iPJg90ZiOs=
github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/klauspost/compress v1.13.0/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg= github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg=
github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=

View file

@ -75,6 +75,7 @@ type Header struct {
type Reader struct { type Reader struct {
Header // valid after NewReader or Reader.Reset Header // valid after NewReader or Reader.Reset
r flate.Reader r flate.Reader
br *bufio.Reader
decompressor io.ReadCloser decompressor io.ReadCloser
digest uint32 // CRC-32, IEEE polynomial (section 8) digest uint32 // CRC-32, IEEE polynomial (section 8)
size uint32 // Uncompressed size (section 2.3.1) size uint32 // Uncompressed size (section 2.3.1)
@ -109,7 +110,13 @@ func (z *Reader) Reset(r io.Reader) error {
if rr, ok := r.(flate.Reader); ok { if rr, ok := r.(flate.Reader); ok {
z.r = rr z.r = rr
} else { } else {
z.r = bufio.NewReader(r) // Reuse if we can.
if z.br != nil {
z.br.Reset(r)
} else {
z.br = bufio.NewReader(r)
}
z.r = z.br
} }
z.Header, z.err = z.readHeader() z.Header, z.err = z.readHeader()
return z.err return z.err

View file

@ -144,7 +144,7 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error {
} }
cSize = 1 cSize = 1
case blockTypeCompressed: case blockTypeCompressed:
if debug { if debugDecoder {
println("Data size on stream:", cSize) println("Data size on stream:", cSize)
} }
b.RLESize = 0 b.RLESize = 0
@ -153,7 +153,7 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error {
maxSize = int(windowSize) maxSize = int(windowSize)
} }
if cSize > maxCompressedBlockSize || uint64(cSize) > b.WindowSize { if cSize > maxCompressedBlockSize || uint64(cSize) > b.WindowSize {
if debug { if debugDecoder {
printf("compressed block too big: csize:%d block: %+v\n", uint64(cSize), b) printf("compressed block too big: csize:%d block: %+v\n", uint64(cSize), b)
} }
return ErrCompressedSizeTooBig return ErrCompressedSizeTooBig
@ -179,7 +179,7 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error {
} }
b.data, err = br.readBig(cSize, b.dataStorage) b.data, err = br.readBig(cSize, b.dataStorage)
if err != nil { if err != nil {
if debug { if debugDecoder {
println("Reading block:", err, "(", cSize, ")", len(b.data)) println("Reading block:", err, "(", cSize, ")", len(b.data))
printf("%T", br) printf("%T", br)
} }
@ -249,7 +249,7 @@ func (b *blockDec) startDecoder() {
b: b.dst, b: b.dst,
err: err, err: err,
} }
if debug { if debugDecoder {
println("Decompressed to", len(b.dst), "bytes, error:", err) println("Decompressed to", len(b.dst), "bytes, error:", err)
} }
b.result <- o b.result <- o
@ -264,7 +264,7 @@ func (b *blockDec) startDecoder() {
default: default:
panic("Invalid block type") panic("Invalid block type")
} }
if debug { if debugDecoder {
println("blockDec: Finished block") println("blockDec: Finished block")
} }
} }
@ -297,7 +297,7 @@ func (b *blockDec) decodeBuf(hist *history) error {
b.dst = hist.b b.dst = hist.b
hist.b = nil hist.b = nil
err := b.decodeCompressed(hist) err := b.decodeCompressed(hist)
if debug { if debugDecoder {
println("Decompressed to total", len(b.dst), "bytes, hash:", xxhash.Sum64(b.dst), "error:", err) println("Decompressed to total", len(b.dst), "bytes, hash:", xxhash.Sum64(b.dst), "error:", err)
} }
hist.b = b.dst hist.b = b.dst
@ -390,7 +390,7 @@ func (b *blockDec) decodeCompressed(hist *history) error {
in = in[5:] in = in[5:]
} }
} }
if debug { if debugDecoder {
println("literals type:", litType, "litRegenSize:", litRegenSize, "litCompSize:", litCompSize, "sizeFormat:", sizeFormat, "4X:", fourStreams) println("literals type:", litType, "litRegenSize:", litRegenSize, "litCompSize:", litCompSize, "sizeFormat:", sizeFormat, "4X:", fourStreams)
} }
var literals []byte var literals []byte
@ -428,7 +428,7 @@ func (b *blockDec) decodeCompressed(hist *history) error {
literals[i] = v literals[i] = v
} }
in = in[1:] in = in[1:]
if debug { if debugDecoder {
printf("Found %d RLE compressed literals\n", litRegenSize) printf("Found %d RLE compressed literals\n", litRegenSize)
} }
case literalsBlockTreeless: case literalsBlockTreeless:
@ -439,7 +439,7 @@ func (b *blockDec) decodeCompressed(hist *history) error {
// Store compressed literals, so we defer decoding until we get history. // Store compressed literals, so we defer decoding until we get history.
literals = in[:litCompSize] literals = in[:litCompSize]
in = in[litCompSize:] in = in[litCompSize:]
if debug { if debugDecoder {
printf("Found %d compressed literals\n", litCompSize) printf("Found %d compressed literals\n", litCompSize)
} }
case literalsBlockCompressed: case literalsBlockCompressed:
@ -481,7 +481,7 @@ func (b *blockDec) decodeCompressed(hist *history) error {
if len(literals) != litRegenSize { if len(literals) != litRegenSize {
return fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) return fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals))
} }
if debug { if debugDecoder {
printf("Decompressed %d literals into %d bytes\n", litCompSize, litRegenSize) printf("Decompressed %d literals into %d bytes\n", litCompSize, litRegenSize)
} }
} }
@ -532,12 +532,12 @@ func (b *blockDec) decodeCompressed(hist *history) error {
br := byteReader{b: in, off: 0} br := byteReader{b: in, off: 0}
compMode := br.Uint8() compMode := br.Uint8()
br.advance(1) br.advance(1)
if debug { if debugDecoder {
printf("Compression modes: 0b%b", compMode) printf("Compression modes: 0b%b", compMode)
} }
for i := uint(0); i < 3; i++ { for i := uint(0); i < 3; i++ {
mode := seqCompMode((compMode >> (6 - i*2)) & 3) mode := seqCompMode((compMode >> (6 - i*2)) & 3)
if debug { if debugDecoder {
println("Table", tableIndex(i), "is", mode) println("Table", tableIndex(i), "is", mode)
} }
var seq *sequenceDec var seq *sequenceDec
@ -568,7 +568,7 @@ func (b *blockDec) decodeCompressed(hist *history) error {
} }
dec.setRLE(symb) dec.setRLE(symb)
seq.fse = dec seq.fse = dec
if debug { if debugDecoder {
printf("RLE set to %+v, code: %v", symb, v) printf("RLE set to %+v, code: %v", symb, v)
} }
case compModeFSE: case compModeFSE:
@ -584,7 +584,7 @@ func (b *blockDec) decodeCompressed(hist *history) error {
println("Transform table error:", err) println("Transform table error:", err)
return err return err
} }
if debug { if debugDecoder {
println("Read table ok", "symbolLen:", dec.symbolLen) println("Read table ok", "symbolLen:", dec.symbolLen)
} }
seq.fse = dec seq.fse = dec
@ -652,7 +652,7 @@ func (b *blockDec) decodeCompressed(hist *history) error {
if huff != nil { if huff != nil {
hist.huffTree = huff hist.huffTree = huff
} }
if debug { if debugDecoder {
println("Final literals:", len(literals), "hash:", xxhash.Sum64(literals), "and", nSeqs, "sequences.") println("Final literals:", len(literals), "hash:", xxhash.Sum64(literals), "and", nSeqs, "sequences.")
} }
@ -669,7 +669,7 @@ func (b *blockDec) decodeCompressed(hist *history) error {
if err != nil { if err != nil {
return err return err
} }
if debug { if debugDecoder {
println("History merged ok") println("History merged ok")
} }
br := &bitReader{} br := &bitReader{}
@ -728,7 +728,7 @@ func (b *blockDec) decodeCompressed(hist *history) error {
} }
hist.append(b.dst) hist.append(b.dst)
hist.recentOffsets = seqs.prevOffset hist.recentOffsets = seqs.prevOffset
if debug { if debugDecoder {
println("Finished block with literals:", len(literals), "and", nSeqs, "sequences.") println("Finished block with literals:", len(literals), "and", nSeqs, "sequences.")
} }

View file

@ -156,7 +156,7 @@ func (h *literalsHeader) setSize(regenLen int) {
switch { switch {
case inBits < 5: case inBits < 5:
lh |= (uint64(regenLen) << 3) | (1 << 60) lh |= (uint64(regenLen) << 3) | (1 << 60)
if debug { if debugEncoder {
got := int(lh>>3) & 0xff got := int(lh>>3) & 0xff
if got != regenLen { if got != regenLen {
panic(fmt.Sprint("litRegenSize = ", regenLen, "(want) != ", got, "(got)")) panic(fmt.Sprint("litRegenSize = ", regenLen, "(want) != ", got, "(got)"))
@ -184,7 +184,7 @@ func (h *literalsHeader) setSizes(compLen, inLen int, single bool) {
lh |= 1 << 2 lh |= 1 << 2
} }
lh |= (uint64(inLen) << 4) | (uint64(compLen) << (10 + 4)) | (3 << 60) lh |= (uint64(inLen) << 4) | (uint64(compLen) << (10 + 4)) | (3 << 60)
if debug { if debugEncoder {
const mmask = (1 << 24) - 1 const mmask = (1 << 24) - 1
n := (lh >> 4) & mmask n := (lh >> 4) & mmask
if int(n&1023) != inLen { if int(n&1023) != inLen {
@ -312,7 +312,7 @@ func (b *blockEnc) encodeRaw(a []byte) {
bh.setType(blockTypeRaw) bh.setType(blockTypeRaw)
b.output = bh.appendTo(b.output[:0]) b.output = bh.appendTo(b.output[:0])
b.output = append(b.output, a...) b.output = append(b.output, a...)
if debug { if debugEncoder {
println("Adding RAW block, length", len(a), "last:", b.last) println("Adding RAW block, length", len(a), "last:", b.last)
} }
} }
@ -325,7 +325,7 @@ func (b *blockEnc) encodeRawTo(dst, src []byte) []byte {
bh.setType(blockTypeRaw) bh.setType(blockTypeRaw)
dst = bh.appendTo(dst) dst = bh.appendTo(dst)
dst = append(dst, src...) dst = append(dst, src...)
if debug { if debugEncoder {
println("Adding RAW block, length", len(src), "last:", b.last) println("Adding RAW block, length", len(src), "last:", b.last)
} }
return dst return dst
@ -339,7 +339,7 @@ func (b *blockEnc) encodeLits(lits []byte, raw bool) error {
// Don't compress extremely small blocks // Don't compress extremely small blocks
if len(lits) < 8 || (len(lits) < 32 && b.dictLitEnc == nil) || raw { if len(lits) < 8 || (len(lits) < 32 && b.dictLitEnc == nil) || raw {
if debug { if debugEncoder {
println("Adding RAW block, length", len(lits), "last:", b.last) println("Adding RAW block, length", len(lits), "last:", b.last)
} }
bh.setType(blockTypeRaw) bh.setType(blockTypeRaw)
@ -371,7 +371,7 @@ func (b *blockEnc) encodeLits(lits []byte, raw bool) error {
switch err { switch err {
case huff0.ErrIncompressible: case huff0.ErrIncompressible:
if debug { if debugEncoder {
println("Adding RAW block, length", len(lits), "last:", b.last) println("Adding RAW block, length", len(lits), "last:", b.last)
} }
bh.setType(blockTypeRaw) bh.setType(blockTypeRaw)
@ -379,7 +379,7 @@ func (b *blockEnc) encodeLits(lits []byte, raw bool) error {
b.output = append(b.output, lits...) b.output = append(b.output, lits...)
return nil return nil
case huff0.ErrUseRLE: case huff0.ErrUseRLE:
if debug { if debugEncoder {
println("Adding RLE block, length", len(lits)) println("Adding RLE block, length", len(lits))
} }
bh.setType(blockTypeRLE) bh.setType(blockTypeRLE)
@ -396,12 +396,12 @@ func (b *blockEnc) encodeLits(lits []byte, raw bool) error {
bh.setType(blockTypeCompressed) bh.setType(blockTypeCompressed)
var lh literalsHeader var lh literalsHeader
if reUsed { if reUsed {
if debug { if debugEncoder {
println("Reused tree, compressed to", len(out)) println("Reused tree, compressed to", len(out))
} }
lh.setType(literalsBlockTreeless) lh.setType(literalsBlockTreeless)
} else { } else {
if debug { if debugEncoder {
println("New tree, compressed to", len(out), "tree size:", len(b.litEnc.OutTable)) println("New tree, compressed to", len(out), "tree size:", len(b.litEnc.OutTable))
} }
lh.setType(literalsBlockCompressed) lh.setType(literalsBlockCompressed)
@ -517,7 +517,7 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error {
lh.setSize(len(b.literals)) lh.setSize(len(b.literals))
b.output = lh.appendTo(b.output) b.output = lh.appendTo(b.output)
b.output = append(b.output, b.literals...) b.output = append(b.output, b.literals...)
if debug { if debugEncoder {
println("Adding literals RAW, length", len(b.literals)) println("Adding literals RAW, length", len(b.literals))
} }
case huff0.ErrUseRLE: case huff0.ErrUseRLE:
@ -525,22 +525,22 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error {
lh.setSize(len(b.literals)) lh.setSize(len(b.literals))
b.output = lh.appendTo(b.output) b.output = lh.appendTo(b.output)
b.output = append(b.output, b.literals[0]) b.output = append(b.output, b.literals[0])
if debug { if debugEncoder {
println("Adding literals RLE") println("Adding literals RLE")
} }
case nil: case nil:
// Compressed litLen... // Compressed litLen...
if reUsed { if reUsed {
if debug { if debugEncoder {
println("reused tree") println("reused tree")
} }
lh.setType(literalsBlockTreeless) lh.setType(literalsBlockTreeless)
} else { } else {
if debug { if debugEncoder {
println("new tree, size:", len(b.litEnc.OutTable)) println("new tree, size:", len(b.litEnc.OutTable))
} }
lh.setType(literalsBlockCompressed) lh.setType(literalsBlockCompressed)
if debug { if debugEncoder {
_, _, err := huff0.ReadTable(out, nil) _, _, err := huff0.ReadTable(out, nil)
if err != nil { if err != nil {
panic(err) panic(err)
@ -548,18 +548,18 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error {
} }
} }
lh.setSizes(len(out), len(b.literals), single) lh.setSizes(len(out), len(b.literals), single)
if debug { if debugEncoder {
printf("Compressed %d literals to %d bytes", len(b.literals), len(out)) printf("Compressed %d literals to %d bytes", len(b.literals), len(out))
println("Adding literal header:", lh) println("Adding literal header:", lh)
} }
b.output = lh.appendTo(b.output) b.output = lh.appendTo(b.output)
b.output = append(b.output, out...) b.output = append(b.output, out...)
b.litEnc.Reuse = huff0.ReusePolicyAllow b.litEnc.Reuse = huff0.ReusePolicyAllow
if debug { if debugEncoder {
println("Adding literals compressed") println("Adding literals compressed")
} }
default: default:
if debug { if debugEncoder {
println("Adding literals ERROR:", err) println("Adding literals ERROR:", err)
} }
return err return err
@ -577,7 +577,7 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error {
n := len(b.sequences) - 0x7f00 n := len(b.sequences) - 0x7f00
b.output = append(b.output, 255, uint8(n), uint8(n>>8)) b.output = append(b.output, 255, uint8(n), uint8(n>>8))
} }
if debug { if debugEncoder {
println("Encoding", len(b.sequences), "sequences") println("Encoding", len(b.sequences), "sequences")
} }
b.genCodes() b.genCodes()
@ -611,17 +611,17 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error {
nSize = nSize + (nSize+2*8*16)>>4 nSize = nSize + (nSize+2*8*16)>>4
switch { switch {
case predefSize <= prevSize && predefSize <= nSize || forcePreDef: case predefSize <= prevSize && predefSize <= nSize || forcePreDef:
if debug { if debugEncoder {
println("Using predefined", predefSize>>3, "<=", nSize>>3) println("Using predefined", predefSize>>3, "<=", nSize>>3)
} }
return preDef, compModePredefined return preDef, compModePredefined
case prevSize <= nSize: case prevSize <= nSize:
if debug { if debugEncoder {
println("Using previous", prevSize>>3, "<=", nSize>>3) println("Using previous", prevSize>>3, "<=", nSize>>3)
} }
return prev, compModeRepeat return prev, compModeRepeat
default: default:
if debug { if debugEncoder {
println("Using new, predef", predefSize>>3, ". previous:", prevSize>>3, ">", nSize>>3, "header max:", cur.maxHeaderSize()>>3, "bytes") println("Using new, predef", predefSize>>3, ". previous:", prevSize>>3, ">", nSize>>3, "header max:", cur.maxHeaderSize()>>3, "bytes")
println("tl:", cur.actualTableLog, "symbolLen:", cur.symbolLen, "norm:", cur.norm[:cur.symbolLen], "hist", cur.count[:cur.symbolLen]) println("tl:", cur.actualTableLog, "symbolLen:", cur.symbolLen, "norm:", cur.norm[:cur.symbolLen], "hist", cur.count[:cur.symbolLen])
} }
@ -634,7 +634,7 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error {
if llEnc.useRLE { if llEnc.useRLE {
mode |= uint8(compModeRLE) << 6 mode |= uint8(compModeRLE) << 6
llEnc.setRLE(b.sequences[0].llCode) llEnc.setRLE(b.sequences[0].llCode)
if debug { if debugEncoder {
println("llEnc.useRLE") println("llEnc.useRLE")
} }
} else { } else {
@ -645,7 +645,7 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error {
if ofEnc.useRLE { if ofEnc.useRLE {
mode |= uint8(compModeRLE) << 4 mode |= uint8(compModeRLE) << 4
ofEnc.setRLE(b.sequences[0].ofCode) ofEnc.setRLE(b.sequences[0].ofCode)
if debug { if debugEncoder {
println("ofEnc.useRLE") println("ofEnc.useRLE")
} }
} else { } else {
@ -657,7 +657,7 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error {
if mlEnc.useRLE { if mlEnc.useRLE {
mode |= uint8(compModeRLE) << 2 mode |= uint8(compModeRLE) << 2
mlEnc.setRLE(b.sequences[0].mlCode) mlEnc.setRLE(b.sequences[0].mlCode)
if debug { if debugEncoder {
println("mlEnc.useRLE, code: ", b.sequences[0].mlCode, "value", b.sequences[0].matchLen) println("mlEnc.useRLE, code: ", b.sequences[0].mlCode, "value", b.sequences[0].matchLen)
} }
} else { } else {
@ -666,7 +666,7 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error {
mode |= uint8(m) << 2 mode |= uint8(m) << 2
} }
b.output = append(b.output, mode) b.output = append(b.output, mode)
if debug { if debugEncoder {
printf("Compression modes: 0b%b", mode) printf("Compression modes: 0b%b", mode)
} }
b.output, err = llEnc.writeCount(b.output) b.output, err = llEnc.writeCount(b.output)
@ -786,7 +786,7 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error {
// Size is output minus block header. // Size is output minus block header.
bh.setSize(uint32(len(b.output)-bhOffset) - 3) bh.setSize(uint32(len(b.output)-bhOffset) - 3)
if debug { if debugEncoder {
println("Rewriting block header", bh) println("Rewriting block header", bh)
} }
_ = bh.appendTo(b.output[bhOffset:bhOffset]) _ = bh.appendTo(b.output[bhOffset:bhOffset])

View file

@ -91,7 +91,7 @@ func (r *readerWrapper) readSmall(n int) ([]byte, error) {
if err == io.EOF { if err == io.EOF {
return nil, io.ErrUnexpectedEOF return nil, io.ErrUnexpectedEOF
} }
if debug { if debugDecoder {
println("readSmall: got", n2, "want", n, "err", err) println("readSmall: got", n2, "want", n, "err", err)
} }
return nil, err return nil, err

View file

@ -113,9 +113,6 @@ func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) {
// Returns the number of bytes written and any error that occurred. // Returns the number of bytes written and any error that occurred.
// When the stream is done, io.EOF will be returned. // When the stream is done, io.EOF will be returned.
func (d *Decoder) Read(p []byte) (int, error) { func (d *Decoder) Read(p []byte) (int, error) {
if d.stream == nil {
return 0, ErrDecoderNilInput
}
var n int var n int
for { for {
if len(d.current.b) > 0 { if len(d.current.b) > 0 {
@ -138,7 +135,7 @@ func (d *Decoder) Read(p []byte) (int, error) {
} }
} }
if len(d.current.b) > 0 { if len(d.current.b) > 0 {
if debug { if debugDecoder {
println("returning", n, "still bytes left:", len(d.current.b)) println("returning", n, "still bytes left:", len(d.current.b))
} }
// Only return error at end of block // Only return error at end of block
@ -147,7 +144,7 @@ func (d *Decoder) Read(p []byte) (int, error) {
if d.current.err != nil { if d.current.err != nil {
d.drainOutput() d.drainOutput()
} }
if debug { if debugDecoder {
println("returning", n, d.current.err, len(d.decoders)) println("returning", n, d.current.err, len(d.decoders))
} }
return n, d.current.err return n, d.current.err
@ -167,20 +164,17 @@ func (d *Decoder) Reset(r io.Reader) error {
if r == nil { if r == nil {
d.current.err = ErrDecoderNilInput d.current.err = ErrDecoderNilInput
if len(d.current.b) > 0 {
d.current.b = d.current.b[:0]
}
d.current.flushed = true d.current.flushed = true
return nil return nil
} }
if d.stream == nil { // If bytes buffer and < 5MB, do sync decoding anyway.
d.stream = make(chan decodeStream, 1) if bb, ok := r.(byter); ok && bb.Len() < 5<<20 {
d.streamWg.Add(1)
go d.startStreamDecoder(d.stream)
}
// If bytes buffer and < 1MB, do sync decoding anyway.
if bb, ok := r.(byter); ok && bb.Len() < 1<<20 {
bb2 := bb bb2 := bb
if debug { if debugDecoder {
println("*bytes.Buffer detected, doing sync decode, len:", bb.Len()) println("*bytes.Buffer detected, doing sync decode, len:", bb.Len())
} }
b := bb2.Bytes() b := bb2.Bytes()
@ -196,12 +190,18 @@ func (d *Decoder) Reset(r io.Reader) error {
d.current.b = dst d.current.b = dst
d.current.err = err d.current.err = err
d.current.flushed = true d.current.flushed = true
if debug { if debugDecoder {
println("sync decode to", len(dst), "bytes, err:", err) println("sync decode to", len(dst), "bytes, err:", err)
} }
return nil return nil
} }
if d.stream == nil {
d.stream = make(chan decodeStream, 1)
d.streamWg.Add(1)
go d.startStreamDecoder(d.stream)
}
// Remove current block. // Remove current block.
d.current.decodeOutput = decodeOutput{} d.current.decodeOutput = decodeOutput{}
d.current.err = nil d.current.err = nil
@ -225,7 +225,7 @@ func (d *Decoder) drainOutput() {
d.current.cancel = nil d.current.cancel = nil
} }
if d.current.d != nil { if d.current.d != nil {
if debug { if debugDecoder {
printf("re-adding current decoder %p, decoders: %d", d.current.d, len(d.decoders)) printf("re-adding current decoder %p, decoders: %d", d.current.d, len(d.decoders))
} }
d.decoders <- d.current.d d.decoders <- d.current.d
@ -238,7 +238,7 @@ func (d *Decoder) drainOutput() {
} }
for v := range d.current.output { for v := range d.current.output {
if v.d != nil { if v.d != nil {
if debug { if debugDecoder {
printf("re-adding decoder %p", v.d) printf("re-adding decoder %p", v.d)
} }
d.decoders <- v.d d.decoders <- v.d
@ -255,9 +255,6 @@ func (d *Decoder) drainOutput() {
// The return value n is the number of bytes written. // The return value n is the number of bytes written.
// Any error encountered during the write is also returned. // Any error encountered during the write is also returned.
func (d *Decoder) WriteTo(w io.Writer) (int64, error) { func (d *Decoder) WriteTo(w io.Writer) (int64, error) {
if d.stream == nil {
return 0, ErrDecoderNilInput
}
var n int64 var n int64
for { for {
if len(d.current.b) > 0 { if len(d.current.b) > 0 {
@ -297,7 +294,7 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
block := <-d.decoders block := <-d.decoders
frame := block.localFrame frame := block.localFrame
defer func() { defer func() {
if debug { if debugDecoder {
printf("re-adding decoder: %p", block) printf("re-adding decoder: %p", block)
} }
frame.rawInput = nil frame.rawInput = nil
@ -310,7 +307,7 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
frame.history.reset() frame.history.reset()
err := frame.reset(&frame.bBuf) err := frame.reset(&frame.bBuf)
if err == io.EOF { if err == io.EOF {
if debug { if debugDecoder {
println("frame reset return EOF") println("frame reset return EOF")
} }
return dst, nil return dst, nil
@ -355,7 +352,7 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
return dst, err return dst, err
} }
if len(frame.bBuf) == 0 { if len(frame.bBuf) == 0 {
if debug { if debugDecoder {
println("frame dbuf empty") println("frame dbuf empty")
} }
break break
@ -371,7 +368,7 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
// if no data was available without blocking. // if no data was available without blocking.
func (d *Decoder) nextBlock(blocking bool) (ok bool) { func (d *Decoder) nextBlock(blocking bool) (ok bool) {
if d.current.d != nil { if d.current.d != nil {
if debug { if debugDecoder {
printf("re-adding current decoder %p", d.current.d) printf("re-adding current decoder %p", d.current.d)
} }
d.decoders <- d.current.d d.decoders <- d.current.d
@ -391,7 +388,7 @@ func (d *Decoder) nextBlock(blocking bool) (ok bool) {
return false return false
} }
} }
if debug { if debugDecoder {
println("got", len(d.current.b), "bytes, error:", d.current.err) println("got", len(d.current.b), "bytes, error:", d.current.err)
} }
return true return true
@ -485,7 +482,7 @@ func (d *Decoder) startStreamDecoder(inStream chan decodeStream) {
defer d.streamWg.Done() defer d.streamWg.Done()
frame := newFrameDec(d.o) frame := newFrameDec(d.o)
for stream := range inStream { for stream := range inStream {
if debug { if debugDecoder {
println("got new stream") println("got new stream")
} }
br := readerWrapper{r: stream.r} br := readerWrapper{r: stream.r}
@ -493,7 +490,7 @@ func (d *Decoder) startStreamDecoder(inStream chan decodeStream) {
for { for {
frame.history.reset() frame.history.reset()
err := frame.reset(&br) err := frame.reset(&br)
if debug && err != nil { if debugDecoder && err != nil {
println("Frame decoder returned", err) println("Frame decoder returned", err)
} }
if err == nil && frame.DictionaryID != nil { if err == nil && frame.DictionaryID != nil {
@ -510,7 +507,7 @@ func (d *Decoder) startStreamDecoder(inStream chan decodeStream) {
} }
break break
} }
if debug { if debugDecoder {
println("starting frame decoder") println("starting frame decoder")
} }

View file

@ -82,7 +82,7 @@ func loadDict(b []byte) (*dict, error) {
println("Transform table error:", err) println("Transform table error:", err)
return err return err
} }
if debug { if debugDecoder || debugEncoder {
println("Read table ok", "symbolLen:", dec.symbolLen) println("Read table ok", "symbolLen:", dec.symbolLen)
} }
// Set decoders as predefined so they aren't reused. // Set decoders as predefined so they aren't reused.

View file

@ -132,7 +132,7 @@ func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) {
} }
_ = addLiterals _ = addLiterals
if debug { if debugEncoder {
println("recent offsets:", blk.recentOffsets) println("recent offsets:", blk.recentOffsets)
} }
@ -274,7 +274,7 @@ encodeLoop:
nextEmit = s nextEmit = s
if s >= sLimit { if s >= sLimit {
if debug { if debugEncoder {
println("repeat ended", s, best.length) println("repeat ended", s, best.length)
} }
@ -412,7 +412,7 @@ encodeLoop:
blk.recentOffsets[0] = uint32(offset1) blk.recentOffsets[0] = uint32(offset1)
blk.recentOffsets[1] = uint32(offset2) blk.recentOffsets[1] = uint32(offset2)
blk.recentOffsets[2] = uint32(offset3) blk.recentOffsets[2] = uint32(offset3)
if debug { if debugEncoder {
println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
} }
} }

View file

@ -138,7 +138,7 @@ func (e *betterFastEncoder) Encode(blk *blockEnc, src []byte) {
blk.literals = append(blk.literals, src[nextEmit:until]...) blk.literals = append(blk.literals, src[nextEmit:until]...)
s.litLen = uint32(until - nextEmit) s.litLen = uint32(until - nextEmit)
} }
if debug { if debugEncoder {
println("recent offsets:", blk.recentOffsets) println("recent offsets:", blk.recentOffsets)
} }
@ -204,7 +204,7 @@ encodeLoop:
nextEmit = s nextEmit = s
if s >= sLimit { if s >= sLimit {
if debug { if debugEncoder {
println("repeat ended", s, lenght) println("repeat ended", s, lenght)
} }
@ -264,7 +264,7 @@ encodeLoop:
s += lenght + repOff2 s += lenght + repOff2
nextEmit = s nextEmit = s
if s >= sLimit { if s >= sLimit {
if debug { if debugEncoder {
println("repeat ended", s, lenght) println("repeat ended", s, lenght)
} }
@ -553,7 +553,7 @@ encodeLoop:
} }
blk.recentOffsets[0] = uint32(offset1) blk.recentOffsets[0] = uint32(offset1)
blk.recentOffsets[1] = uint32(offset2) blk.recentOffsets[1] = uint32(offset2)
if debug { if debugEncoder {
println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
} }
} }
@ -656,7 +656,7 @@ func (e *betterFastEncoderDict) Encode(blk *blockEnc, src []byte) {
blk.literals = append(blk.literals, src[nextEmit:until]...) blk.literals = append(blk.literals, src[nextEmit:until]...)
s.litLen = uint32(until - nextEmit) s.litLen = uint32(until - nextEmit)
} }
if debug { if debugEncoder {
println("recent offsets:", blk.recentOffsets) println("recent offsets:", blk.recentOffsets)
} }
@ -724,7 +724,7 @@ encodeLoop:
nextEmit = s nextEmit = s
if s >= sLimit { if s >= sLimit {
if debug { if debugEncoder {
println("repeat ended", s, lenght) println("repeat ended", s, lenght)
} }
@ -787,7 +787,7 @@ encodeLoop:
s += lenght + repOff2 s += lenght + repOff2
nextEmit = s nextEmit = s
if s >= sLimit { if s >= sLimit {
if debug { if debugEncoder {
println("repeat ended", s, lenght) println("repeat ended", s, lenght)
} }
@ -1084,7 +1084,7 @@ encodeLoop:
} }
blk.recentOffsets[0] = uint32(offset1) blk.recentOffsets[0] = uint32(offset1)
blk.recentOffsets[1] = uint32(offset2) blk.recentOffsets[1] = uint32(offset2)
if debug { if debugEncoder {
println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
} }
} }

View file

@ -109,7 +109,7 @@ func (e *doubleFastEncoder) Encode(blk *blockEnc, src []byte) {
blk.literals = append(blk.literals, src[nextEmit:until]...) blk.literals = append(blk.literals, src[nextEmit:until]...)
s.litLen = uint32(until - nextEmit) s.litLen = uint32(until - nextEmit)
} }
if debug { if debugEncoder {
println("recent offsets:", blk.recentOffsets) println("recent offsets:", blk.recentOffsets)
} }
@ -170,7 +170,7 @@ encodeLoop:
s += lenght + repOff s += lenght + repOff
nextEmit = s nextEmit = s
if s >= sLimit { if s >= sLimit {
if debug { if debugEncoder {
println("repeat ended", s, lenght) println("repeat ended", s, lenght)
} }
@ -368,7 +368,7 @@ encodeLoop:
} }
blk.recentOffsets[0] = uint32(offset1) blk.recentOffsets[0] = uint32(offset1)
blk.recentOffsets[1] = uint32(offset2) blk.recentOffsets[1] = uint32(offset2)
if debug { if debugEncoder {
println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
} }
} }
@ -427,7 +427,7 @@ func (e *doubleFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) {
blk.literals = append(blk.literals, src[nextEmit:until]...) blk.literals = append(blk.literals, src[nextEmit:until]...)
s.litLen = uint32(until - nextEmit) s.litLen = uint32(until - nextEmit)
} }
if debug { if debugEncoder {
println("recent offsets:", blk.recentOffsets) println("recent offsets:", blk.recentOffsets)
} }
@ -483,7 +483,7 @@ encodeLoop:
s += length + repOff s += length + repOff
nextEmit = s nextEmit = s
if s >= sLimit { if s >= sLimit {
if debug { if debugEncoder {
println("repeat ended", s, length) println("repeat ended", s, length)
} }
@ -677,7 +677,7 @@ encodeLoop:
blk.literals = append(blk.literals, src[nextEmit:]...) blk.literals = append(blk.literals, src[nextEmit:]...)
blk.extraLits = len(src) - int(nextEmit) blk.extraLits = len(src) - int(nextEmit)
} }
if debug { if debugEncoder {
println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
} }
@ -767,7 +767,7 @@ func (e *doubleFastEncoderDict) Encode(blk *blockEnc, src []byte) {
blk.literals = append(blk.literals, src[nextEmit:until]...) blk.literals = append(blk.literals, src[nextEmit:until]...)
s.litLen = uint32(until - nextEmit) s.litLen = uint32(until - nextEmit)
} }
if debug { if debugEncoder {
println("recent offsets:", blk.recentOffsets) println("recent offsets:", blk.recentOffsets)
} }
@ -830,7 +830,7 @@ encodeLoop:
s += lenght + repOff s += lenght + repOff
nextEmit = s nextEmit = s
if s >= sLimit { if s >= sLimit {
if debug { if debugEncoder {
println("repeat ended", s, lenght) println("repeat ended", s, lenght)
} }
@ -1039,7 +1039,7 @@ encodeLoop:
} }
blk.recentOffsets[0] = uint32(offset1) blk.recentOffsets[0] = uint32(offset1)
blk.recentOffsets[1] = uint32(offset2) blk.recentOffsets[1] = uint32(offset2)
if debug { if debugEncoder {
println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
} }
// If we encoded more than 64K mark all dirty. // If we encoded more than 64K mark all dirty.

View file

@ -103,7 +103,7 @@ func (e *fastEncoder) Encode(blk *blockEnc, src []byte) {
blk.literals = append(blk.literals, src[nextEmit:until]...) blk.literals = append(blk.literals, src[nextEmit:until]...)
s.litLen = uint32(until - nextEmit) s.litLen = uint32(until - nextEmit)
} }
if debug { if debugEncoder {
println("recent offsets:", blk.recentOffsets) println("recent offsets:", blk.recentOffsets)
} }
@ -178,7 +178,7 @@ encodeLoop:
s += length + 2 s += length + 2
nextEmit = s nextEmit = s
if s >= sLimit { if s >= sLimit {
if debug { if debugEncoder {
println("repeat ended", s, length) println("repeat ended", s, length)
} }
@ -330,7 +330,7 @@ encodeLoop:
} }
blk.recentOffsets[0] = uint32(offset1) blk.recentOffsets[0] = uint32(offset1)
blk.recentOffsets[1] = uint32(offset2) blk.recentOffsets[1] = uint32(offset2)
if debug { if debugEncoder {
println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
} }
} }
@ -343,7 +343,7 @@ func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) {
inputMargin = 8 inputMargin = 8
minNonLiteralBlockSize = 1 + 1 + inputMargin minNonLiteralBlockSize = 1 + 1 + inputMargin
) )
if debug { if debugEncoder {
if len(src) > maxBlockSize { if len(src) > maxBlockSize {
panic("src too big") panic("src too big")
} }
@ -391,7 +391,7 @@ func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) {
blk.literals = append(blk.literals, src[nextEmit:until]...) blk.literals = append(blk.literals, src[nextEmit:until]...)
s.litLen = uint32(until - nextEmit) s.litLen = uint32(until - nextEmit)
} }
if debug { if debugEncoder {
println("recent offsets:", blk.recentOffsets) println("recent offsets:", blk.recentOffsets)
} }
@ -462,7 +462,7 @@ encodeLoop:
s += length + 2 s += length + 2
nextEmit = s nextEmit = s
if s >= sLimit { if s >= sLimit {
if debug { if debugEncoder {
println("repeat ended", s, length) println("repeat ended", s, length)
} }
@ -616,7 +616,7 @@ encodeLoop:
blk.literals = append(blk.literals, src[nextEmit:]...) blk.literals = append(blk.literals, src[nextEmit:]...)
blk.extraLits = len(src) - int(nextEmit) blk.extraLits = len(src) - int(nextEmit)
} }
if debug { if debugEncoder {
println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
} }
// We do not store history, so we must offset e.cur to avoid false matches for next user. // We do not store history, so we must offset e.cur to avoid false matches for next user.
@ -696,7 +696,7 @@ func (e *fastEncoderDict) Encode(blk *blockEnc, src []byte) {
blk.literals = append(blk.literals, src[nextEmit:until]...) blk.literals = append(blk.literals, src[nextEmit:until]...)
s.litLen = uint32(until - nextEmit) s.litLen = uint32(until - nextEmit)
} }
if debug { if debugEncoder {
println("recent offsets:", blk.recentOffsets) println("recent offsets:", blk.recentOffsets)
} }
@ -773,7 +773,7 @@ encodeLoop:
s += length + 2 s += length + 2
nextEmit = s nextEmit = s
if s >= sLimit { if s >= sLimit {
if debug { if debugEncoder {
println("repeat ended", s, length) println("repeat ended", s, length)
} }
@ -926,7 +926,7 @@ encodeLoop:
} }
blk.recentOffsets[0] = uint32(offset1) blk.recentOffsets[0] = uint32(offset1)
blk.recentOffsets[1] = uint32(offset2) blk.recentOffsets[1] = uint32(offset2)
if debug { if debugEncoder {
println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
} }
} }

View file

@ -245,7 +245,7 @@ func (e *Encoder) nextBlock(final bool) error {
s.filling, s.current, s.previous = s.previous[:0], s.filling, s.current s.filling, s.current, s.previous = s.previous[:0], s.filling, s.current
s.wg.Add(1) s.wg.Add(1)
go func(src []byte) { go func(src []byte) {
if debug { if debugEncoder {
println("Adding block,", len(src), "bytes, final:", final) println("Adding block,", len(src), "bytes, final:", final)
} }
defer func() { defer func() {
@ -290,7 +290,7 @@ func (e *Encoder) nextBlock(final bool) error {
} }
switch err { switch err {
case errIncompressible: case errIncompressible:
if debug { if debugEncoder {
println("Storing incompressible block as raw") println("Storing incompressible block as raw")
} }
blk.encodeRaw(src) blk.encodeRaw(src)
@ -313,7 +313,7 @@ func (e *Encoder) nextBlock(final bool) error {
// //
// The Copy function uses ReaderFrom if available. // The Copy function uses ReaderFrom if available.
func (e *Encoder) ReadFrom(r io.Reader) (n int64, err error) { func (e *Encoder) ReadFrom(r io.Reader) (n int64, err error) {
if debug { if debugEncoder {
println("Using ReadFrom") println("Using ReadFrom")
} }
@ -336,20 +336,20 @@ func (e *Encoder) ReadFrom(r io.Reader) (n int64, err error) {
switch err { switch err {
case io.EOF: case io.EOF:
e.state.filling = e.state.filling[:len(e.state.filling)-len(src)] e.state.filling = e.state.filling[:len(e.state.filling)-len(src)]
if debug { if debugEncoder {
println("ReadFrom: got EOF final block:", len(e.state.filling)) println("ReadFrom: got EOF final block:", len(e.state.filling))
} }
return n, nil return n, nil
case nil: case nil:
default: default:
if debug { if debugEncoder {
println("ReadFrom: got error:", err) println("ReadFrom: got error:", err)
} }
e.state.err = err e.state.err = err
return n, err return n, err
} }
if len(src) > 0 { if len(src) > 0 {
if debug { if debugEncoder {
println("ReadFrom: got space left in source:", len(src)) println("ReadFrom: got space left in source:", len(src))
} }
continue continue
@ -512,7 +512,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
switch err { switch err {
case errIncompressible: case errIncompressible:
if debug { if debugEncoder {
println("Storing incompressible block as raw") println("Storing incompressible block as raw")
} }
dst = blk.encodeRawTo(dst, src) dst = blk.encodeRawTo(dst, src)
@ -548,7 +548,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
switch err { switch err {
case errIncompressible: case errIncompressible:
if debug { if debugEncoder {
println("Storing incompressible block as raw") println("Storing incompressible block as raw")
} }
dst = blk.encodeRawTo(dst, todo) dst = blk.encodeRawTo(dst, todo)

View file

@ -78,20 +78,33 @@ func newFrameDec(o decoderOptions) *frameDec {
func (d *frameDec) reset(br byteBuffer) error { func (d *frameDec) reset(br byteBuffer) error {
d.HasCheckSum = false d.HasCheckSum = false
d.WindowSize = 0 d.WindowSize = 0
var b []byte var signature [4]byte
for { for {
var err error var err error
b, err = br.readSmall(4) // Check if we can read more...
b, err := br.readSmall(1)
switch err { switch err {
case io.EOF, io.ErrUnexpectedEOF: case io.EOF, io.ErrUnexpectedEOF:
return io.EOF return io.EOF
default: default:
return err return err
case nil: case nil:
signature[0] = b[0]
} }
if !bytes.Equal(b[1:4], skippableFrameMagic) || b[0]&0xf0 != 0x50 { // Read the rest, don't allow io.ErrUnexpectedEOF
if debug { b, err = br.readSmall(3)
println("Not skippable", hex.EncodeToString(b), hex.EncodeToString(skippableFrameMagic)) switch err {
case io.EOF:
return io.EOF
default:
return err
case nil:
copy(signature[1:], b)
}
if !bytes.Equal(signature[1:4], skippableFrameMagic) || signature[0]&0xf0 != 0x50 {
if debugDecoder {
println("Not skippable", hex.EncodeToString(signature[:]), hex.EncodeToString(skippableFrameMagic))
} }
// Break if not skippable frame. // Break if not skippable frame.
break break
@ -99,28 +112,34 @@ func (d *frameDec) reset(br byteBuffer) error {
// Read size to skip // Read size to skip
b, err = br.readSmall(4) b, err = br.readSmall(4)
if err != nil { if err != nil {
println("Reading Frame Size", err) if debugDecoder {
println("Reading Frame Size", err)
}
return err return err
} }
n := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) n := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24)
println("Skipping frame with", n, "bytes.") println("Skipping frame with", n, "bytes.")
err = br.skipN(int(n)) err = br.skipN(int(n))
if err != nil { if err != nil {
if debug { if debugDecoder {
println("Reading discarded frame", err) println("Reading discarded frame", err)
} }
return err return err
} }
} }
if !bytes.Equal(b, frameMagic) { if !bytes.Equal(signature[:], frameMagic) {
println("Got magic numbers: ", b, "want:", frameMagic) if debugDecoder {
println("Got magic numbers: ", signature, "want:", frameMagic)
}
return ErrMagicMismatch return ErrMagicMismatch
} }
// Read Frame_Header_Descriptor // Read Frame_Header_Descriptor
fhd, err := br.readByte() fhd, err := br.readByte()
if err != nil { if err != nil {
println("Reading Frame_Header_Descriptor", err) if debugDecoder {
println("Reading Frame_Header_Descriptor", err)
}
return err return err
} }
d.SingleSegment = fhd&(1<<5) != 0 d.SingleSegment = fhd&(1<<5) != 0
@ -135,7 +154,9 @@ func (d *frameDec) reset(br byteBuffer) error {
if !d.SingleSegment { if !d.SingleSegment {
wd, err := br.readByte() wd, err := br.readByte()
if err != nil { if err != nil {
println("Reading Window_Descriptor", err) if debugDecoder {
println("Reading Window_Descriptor", err)
}
return err return err
} }
printf("raw: %x, mantissa: %d, exponent: %d\n", wd, wd&7, wd>>3) printf("raw: %x, mantissa: %d, exponent: %d\n", wd, wd&7, wd>>3)
@ -153,7 +174,7 @@ func (d *frameDec) reset(br byteBuffer) error {
size = 4 size = 4
} }
b, err = br.readSmall(int(size)) b, err := br.readSmall(int(size))
if err != nil { if err != nil {
println("Reading Dictionary_ID", err) println("Reading Dictionary_ID", err)
return err return err
@ -167,7 +188,7 @@ func (d *frameDec) reset(br byteBuffer) error {
case 4: case 4:
id = uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) id = uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24)
} }
if debug { if debugDecoder {
println("Dict size", size, "ID:", id) println("Dict size", size, "ID:", id)
} }
if id > 0 { if id > 0 {
@ -191,7 +212,7 @@ func (d *frameDec) reset(br byteBuffer) error {
} }
d.FrameContentSize = 0 d.FrameContentSize = 0
if fcsSize > 0 { if fcsSize > 0 {
b, err = br.readSmall(fcsSize) b, err := br.readSmall(fcsSize)
if err != nil { if err != nil {
println("Reading Frame content", err) println("Reading Frame content", err)
return err return err
@ -209,7 +230,7 @@ func (d *frameDec) reset(br byteBuffer) error {
d2 := uint32(b[4]) | (uint32(b[5]) << 8) | (uint32(b[6]) << 16) | (uint32(b[7]) << 24) d2 := uint32(b[4]) | (uint32(b[5]) << 8) | (uint32(b[6]) << 16) | (uint32(b[7]) << 24)
d.FrameContentSize = uint64(d1) | (uint64(d2) << 32) d.FrameContentSize = uint64(d1) | (uint64(d2) << 32)
} }
if debug { if debugDecoder {
println("field size bits:", v, "fcsSize:", fcsSize, "FrameContentSize:", d.FrameContentSize, hex.EncodeToString(b[:fcsSize]), "singleseg:", d.SingleSegment, "window:", d.WindowSize) println("field size bits:", v, "fcsSize:", fcsSize, "FrameContentSize:", d.FrameContentSize, hex.EncodeToString(b[:fcsSize]), "singleseg:", d.SingleSegment, "window:", d.WindowSize)
} }
} }
@ -252,7 +273,7 @@ func (d *frameDec) reset(br byteBuffer) error {
// next will start decoding the next block from stream. // next will start decoding the next block from stream.
func (d *frameDec) next(block *blockDec) error { func (d *frameDec) next(block *blockDec) error {
if debug { if debugDecoder {
printf("decoding new block %p:%p", block, block.data) printf("decoding new block %p:%p", block, block.data)
} }
err := block.reset(d.rawInput, d.WindowSize) err := block.reset(d.rawInput, d.WindowSize)
@ -263,7 +284,7 @@ func (d *frameDec) next(block *blockDec) error {
return err return err
} }
block.input <- struct{}{} block.input <- struct{}{}
if debug { if debugDecoder {
println("next block:", block) println("next block:", block)
} }
d.asyncRunningMu.Lock() d.asyncRunningMu.Lock()
@ -318,12 +339,12 @@ func (d *frameDec) checkCRC() error {
} }
if !bytes.Equal(tmp[:], want) { if !bytes.Equal(tmp[:], want) {
if debug { if debugDecoder {
println("CRC Check Failed:", tmp[:], "!=", want) println("CRC Check Failed:", tmp[:], "!=", want)
} }
return ErrCRCMismatch return ErrCRCMismatch
} }
if debug { if debugDecoder {
println("CRC ok", tmp[:]) println("CRC ok", tmp[:])
} }
return nil return nil
@ -344,7 +365,7 @@ func (d *frameDec) initAsync() {
if cap(d.decoding) < d.o.concurrent { if cap(d.decoding) < d.o.concurrent {
d.decoding = make(chan *blockDec, d.o.concurrent) d.decoding = make(chan *blockDec, d.o.concurrent)
} }
if debug { if debugDecoder {
h := d.history h := d.history
printf("history init. len: %d, cap: %d", len(h.b), cap(h.b)) printf("history init. len: %d, cap: %d", len(h.b), cap(h.b))
} }
@ -392,7 +413,7 @@ func (d *frameDec) startDecoder(output chan decodeOutput) {
output <- r output <- r
return return
} }
if debug { if debugDecoder {
println("got result, from ", d.offset, "to", d.offset+int64(len(r.b))) println("got result, from ", d.offset, "to", d.offset+int64(len(r.b)))
d.offset += int64(len(r.b)) d.offset += int64(len(r.b))
} }
@ -400,7 +421,7 @@ func (d *frameDec) startDecoder(output chan decodeOutput) {
// Send history to next block // Send history to next block
select { select {
case next = <-d.decoding: case next = <-d.decoding:
if debug { if debugDecoder {
println("Sending ", len(d.history.b), "bytes as history") println("Sending ", len(d.history.b), "bytes as history")
} }
next.history <- &d.history next.history <- &d.history
@ -438,7 +459,7 @@ func (d *frameDec) startDecoder(output chan decodeOutput) {
output <- r output <- r
if next == nil { if next == nil {
// There was no decoder available, we wait for one now that we have sent to the writer. // There was no decoder available, we wait for one now that we have sent to the writer.
if debug { if debugDecoder {
println("Sending ", len(d.history.b), " bytes as history") println("Sending ", len(d.history.b), " bytes as history")
} }
next = <-d.decoding next = <-d.decoding
@ -462,7 +483,7 @@ func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
if err != nil { if err != nil {
break break
} }
if debug { if debugDecoder {
println("next block:", dec) println("next block:", dec)
} }
err = dec.decodeBuf(&d.history) err = dec.decodeBuf(&d.history)

View file

@ -229,7 +229,7 @@ func (s *fseEncoder) setRLE(val byte) {
deltaFindState: 0, deltaFindState: 0,
deltaNbBits: 0, deltaNbBits: 0,
} }
if debug { if debugEncoder {
println("setRLE: val", val, "symbolTT", s.ct.symbolTT[val]) println("setRLE: val", val, "symbolTT", s.ct.symbolTT[val])
} }
s.rleVal = val s.rleVal = val

View file

@ -203,7 +203,7 @@ func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) {
written += int64(n) written += int64(n)
continue continue
case chunkTypeUncompressedData: case chunkTypeUncompressedData:
if debug { if debugEncoder {
println("Uncompressed, chunklen", chunkLen) println("Uncompressed, chunklen", chunkLen)
} }
// Section 4.3. Uncompressed data (chunk type 0x01). // Section 4.3. Uncompressed data (chunk type 0x01).
@ -246,7 +246,7 @@ func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) {
continue continue
case chunkTypeStreamIdentifier: case chunkTypeStreamIdentifier:
if debug { if debugEncoder {
println("stream id", chunkLen, len(snappyMagicBody)) println("stream id", chunkLen, len(snappyMagicBody))
} }
// Section 4.1. Stream identifier (chunk type 0xff). // Section 4.1. Stream identifier (chunk type 0xff).

View file

@ -15,6 +15,12 @@ import (
// enable debug printing // enable debug printing
const debug = false const debug = false
// enable encoding debug printing
const debugEncoder = debug
// enable decoding debug printing
const debugDecoder = debug
// Enable extra assertions. // Enable extra assertions.
const debugAsserts = debug || false const debugAsserts = debug || false
@ -82,13 +88,13 @@ var (
) )
func println(a ...interface{}) { func println(a ...interface{}) {
if debug { if debug || debugDecoder || debugEncoder {
log.Println(a...) log.Println(a...)
} }
} }
func printf(format string, a ...interface{}) { func printf(format string, a ...interface{}) {
if debug { if debug || debugDecoder || debugEncoder {
log.Printf(format, a...) log.Printf(format, a...)
} }
} }

2
vendor/modules.txt vendored
View file

@ -128,7 +128,7 @@ github.com/jmespath/go-jmespath
github.com/jstemmer/go-junit-report github.com/jstemmer/go-junit-report
github.com/jstemmer/go-junit-report/formatter github.com/jstemmer/go-junit-report/formatter
github.com/jstemmer/go-junit-report/parser github.com/jstemmer/go-junit-report/parser
# github.com/klauspost/compress v1.12.3 # github.com/klauspost/compress v1.13.0
## explicit ## explicit
github.com/klauspost/compress/flate github.com/klauspost/compress/flate
github.com/klauspost/compress/fse github.com/klauspost/compress/fse