lib: dump compressed block contents on error during decompression

This should improve detecting root cause for https://github.com/facebook/zstd/issues/2222
This commit is contained in:
Aliaksandr Valialkin 2020-08-15 14:44:29 +03:00
parent 528e25bdde
commit 00b1659dde
5 changed files with 12 additions and 6 deletions

View file

@ -1,6 +1,8 @@
package encoding
import (
"fmt"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding/zstd"
"github.com/VictoriaMetrics/metrics"
)
@ -22,7 +24,11 @@ func CompressZSTDLevel(dst, src []byte, compressLevel int) []byte {
// the appended dst.
func DecompressZSTD(dst, src []byte) ([]byte, error) {
decompressCalls.Inc()
return zstd.Decompress(dst, src)
b, err := zstd.Decompress(dst, src)
if err != nil {
return b, fmt.Errorf("cannot decompress zstd block with len=%d to a buffer with len=%d: %w; block data (hex): %X", len(src), len(dst), err, src)
}
return b, nil
}
var (

View file

@ -166,7 +166,7 @@ func unmarshalInt64Array(dst []int64, src []byte, mt MarshalType, firstValue int
bb := bbPool.Get()
bb.B, err = DecompressZSTD(bb.B[:0], src)
if err != nil {
return nil, fmt.Errorf("cannot decompress zstd data of size %d: %w; src_zstd=%X", len(src), err, src)
return nil, fmt.Errorf("cannot decompress zstd data: %w", err)
}
dst, err = unmarshalInt64NearestDelta(dst, bb.B, firstValue, itemsCount)
bbPool.Put(bb)
@ -178,7 +178,7 @@ func unmarshalInt64Array(dst []int64, src []byte, mt MarshalType, firstValue int
bb := bbPool.Get()
bb.B, err = DecompressZSTD(bb.B[:0], src)
if err != nil {
return nil, fmt.Errorf("cannot decompress zstd data of size %d: %w; src_zstd=%X", len(src), err, src)
return nil, fmt.Errorf("cannot decompress zstd data: %w", err)
}
dst, err = unmarshalInt64NearestDelta2(dst, bb.B, firstValue, itemsCount)
bbPool.Put(bb)

View file

@ -267,7 +267,7 @@ func (bsr *blockStreamReader) readNextBHS() error {
var err error
bsr.unpackedBuf, err = encoding.DecompressZSTD(bsr.unpackedBuf[:0], bsr.packedBuf)
if err != nil {
return fmt.Errorf("cannot decompress index block with size %d: %w", mr.indexBlockSize, err)
return fmt.Errorf("cannot decompress index block: %w", err)
}
// Unmarshal the unpacked index block into bsr.bhs.

View file

@ -89,7 +89,7 @@ func unmarshalMetaindexRows(dst []metaindexRow, r io.Reader) ([]metaindexRow, er
}
data, err := encoding.DecompressZSTD(nil, compressedData)
if err != nil {
return dst, fmt.Errorf("cannot decompress metaindex data with size %d bytes: %w", len(compressedData), err)
return dst, fmt.Errorf("cannot decompress metaindex data: %w", err)
}
dstLen := len(dst)

View file

@ -294,7 +294,7 @@ func (ps *partSearch) readIndexBlock(mr *metaindexRow) (*indexBlock, error) {
var err error
ps.indexBuf, err = encoding.DecompressZSTD(ps.indexBuf[:0], ps.compressedIndexBuf)
if err != nil {
return nil, fmt.Errorf("cannot decompress index block with size %d bytes: %w", len(ps.compressedIndexBuf), err)
return nil, fmt.Errorf("cannot decompress index block: %w", err)
}
idxb := getIndexBlock()
idxb.bhs, err = unmarshalBlockHeaders(idxb.bhs[:0], ps.indexBuf, int(mr.blockHeadersCount))