diff --git a/lib/encoding/compress.go b/lib/encoding/compress.go index 4739b8962..b8acf541e 100644 --- a/lib/encoding/compress.go +++ b/lib/encoding/compress.go @@ -1,6 +1,8 @@ package encoding import ( + "fmt" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding/zstd" "github.com/VictoriaMetrics/metrics" ) @@ -22,7 +24,11 @@ func CompressZSTDLevel(dst, src []byte, compressLevel int) []byte { // the appended dst. func DecompressZSTD(dst, src []byte) ([]byte, error) { decompressCalls.Inc() - return zstd.Decompress(dst, src) + b, err := zstd.Decompress(dst, src) + if err != nil { + return b, fmt.Errorf("cannot decompress zstd block with len=%d to a buffer with len=%d: %w; block data (hex): %X", len(src), len(dst), err, src) + } + return b, nil } var ( diff --git a/lib/encoding/encoding.go b/lib/encoding/encoding.go index 807f8eb17..01829c815 100644 --- a/lib/encoding/encoding.go +++ b/lib/encoding/encoding.go @@ -166,7 +166,7 @@ func unmarshalInt64Array(dst []int64, src []byte, mt MarshalType, firstValue int bb := bbPool.Get() bb.B, err = DecompressZSTD(bb.B[:0], src) if err != nil { - return nil, fmt.Errorf("cannot decompress zstd data of size %d: %w; src_zstd=%X", len(src), err, src) + return nil, fmt.Errorf("cannot decompress zstd data: %w", err) } dst, err = unmarshalInt64NearestDelta(dst, bb.B, firstValue, itemsCount) bbPool.Put(bb) @@ -178,7 +178,7 @@ func unmarshalInt64Array(dst []int64, src []byte, mt MarshalType, firstValue int bb := bbPool.Get() bb.B, err = DecompressZSTD(bb.B[:0], src) if err != nil { - return nil, fmt.Errorf("cannot decompress zstd data of size %d: %w; src_zstd=%X", len(src), err, src) + return nil, fmt.Errorf("cannot decompress zstd data: %w", err) } dst, err = unmarshalInt64NearestDelta2(dst, bb.B, firstValue, itemsCount) bbPool.Put(bb) diff --git a/lib/mergeset/block_stream_reader.go b/lib/mergeset/block_stream_reader.go index 0949c507f..a89710887 100644 --- a/lib/mergeset/block_stream_reader.go +++ b/lib/mergeset/block_stream_reader.go @@ -267,7 +267,7 @@ func (bsr *blockStreamReader) readNextBHS() error { var err error bsr.unpackedBuf, err = encoding.DecompressZSTD(bsr.unpackedBuf[:0], bsr.packedBuf) if err != nil { - return fmt.Errorf("cannot decompress index block with size %d: %w", mr.indexBlockSize, err) + return fmt.Errorf("cannot decompress index block: %w", err) } // Unmarshal the unpacked index block into bsr.bhs. diff --git a/lib/mergeset/metaindex_row.go b/lib/mergeset/metaindex_row.go index 27569a321..3d5b07fe9 100644 --- a/lib/mergeset/metaindex_row.go +++ b/lib/mergeset/metaindex_row.go @@ -89,7 +89,7 @@ func unmarshalMetaindexRows(dst []metaindexRow, r io.Reader) ([]metaindexRow, er } data, err := encoding.DecompressZSTD(nil, compressedData) if err != nil { - return dst, fmt.Errorf("cannot decompress metaindex data with size %d bytes: %w", len(compressedData), err) + return dst, fmt.Errorf("cannot decompress metaindex data: %w", err) } dstLen := len(dst) diff --git a/lib/mergeset/part_search.go b/lib/mergeset/part_search.go index 01d067688..79ed0968f 100644 --- a/lib/mergeset/part_search.go +++ b/lib/mergeset/part_search.go @@ -294,7 +294,7 @@ func (ps *partSearch) readIndexBlock(mr *metaindexRow) (*indexBlock, error) { var err error ps.indexBuf, err = encoding.DecompressZSTD(ps.indexBuf[:0], ps.compressedIndexBuf) if err != nil { - return nil, fmt.Errorf("cannot decompress index block with size %d bytes: %w", len(ps.compressedIndexBuf), err) + return nil, fmt.Errorf("cannot decompress index block: %w", err) } idxb := getIndexBlock() idxb.bhs, err = unmarshalBlockHeaders(idxb.bhs[:0], ps.indexBuf, int(mr.blockHeadersCount))