2019-05-22 21:16:55 +00:00
|
|
|
package storage
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
|
|
|
"io"
|
|
|
|
"path/filepath"
|
|
|
|
"sync"
|
|
|
|
|
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding"
|
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/filestream"
|
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
|
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
|
|
|
)
|
|
|
|
|
|
|
|
// blockStreamReader represents block stream reader.
|
|
|
|
type blockStreamReader struct {
|
|
|
|
// Currently active block.
|
|
|
|
Block Block
|
|
|
|
|
2022-01-31 20:45:56 +00:00
|
|
|
// Contains TSID for the previous block.
|
|
|
|
// This field is needed for checking that TSIDs
|
|
|
|
// increase over time when reading blocks.
|
|
|
|
tsidPrev TSID
|
|
|
|
|
2019-05-22 21:16:55 +00:00
|
|
|
// Filesystem path to the stream reader.
|
|
|
|
//
|
|
|
|
// Is empty for inmemory stream readers.
|
|
|
|
path string
|
|
|
|
|
|
|
|
ph partHeader
|
|
|
|
|
2023-04-14 21:39:26 +00:00
|
|
|
timestampsReader filestream.ReadCloser
|
|
|
|
valuesReader filestream.ReadCloser
|
|
|
|
indexReader filestream.ReadCloser
|
2019-05-22 21:16:55 +00:00
|
|
|
|
|
|
|
mrs []metaindexRow
|
|
|
|
|
|
|
|
// Points the current mr from mrs.
|
|
|
|
mr *metaindexRow
|
|
|
|
|
|
|
|
// The total number of rows read so far.
|
|
|
|
rowsCount uint64
|
|
|
|
|
|
|
|
// The total number of blocks read so far.
|
|
|
|
blocksCount uint64
|
|
|
|
|
|
|
|
// The number of block headers in the current index block.
|
|
|
|
indexBlockHeadersCount uint32
|
|
|
|
|
|
|
|
timestampsBlockOffset uint64
|
|
|
|
valuesBlockOffset uint64
|
|
|
|
indexBlockOffset uint64
|
|
|
|
|
2020-09-09 20:18:32 +00:00
|
|
|
prevTimestampsBlockOffset uint64
|
|
|
|
prevTimestampsData []byte
|
|
|
|
|
2019-05-22 21:16:55 +00:00
|
|
|
indexData []byte
|
|
|
|
compressedIndexData []byte
|
|
|
|
|
|
|
|
// Cursor to indexData.
|
|
|
|
indexCursor []byte
|
|
|
|
|
|
|
|
err error
|
|
|
|
}
|
|
|
|
|
|
|
|
func (bsr *blockStreamReader) reset() {
|
|
|
|
bsr.Block.Reset()
|
|
|
|
|
|
|
|
bsr.path = ""
|
|
|
|
|
|
|
|
bsr.ph.Reset()
|
|
|
|
|
|
|
|
bsr.timestampsReader = nil
|
|
|
|
bsr.valuesReader = nil
|
|
|
|
bsr.indexReader = nil
|
|
|
|
|
|
|
|
bsr.mrs = bsr.mrs[:0]
|
|
|
|
bsr.mr = nil
|
|
|
|
|
|
|
|
bsr.rowsCount = 0
|
|
|
|
bsr.blocksCount = 0
|
|
|
|
bsr.indexBlockHeadersCount = 0
|
|
|
|
|
|
|
|
bsr.timestampsBlockOffset = 0
|
|
|
|
bsr.valuesBlockOffset = 0
|
|
|
|
bsr.indexBlockOffset = 0
|
|
|
|
|
2020-09-09 20:18:32 +00:00
|
|
|
bsr.prevTimestampsBlockOffset = 0
|
|
|
|
bsr.prevTimestampsData = bsr.prevTimestampsData[:0]
|
|
|
|
|
2019-05-22 21:16:55 +00:00
|
|
|
bsr.indexData = bsr.indexData[:0]
|
|
|
|
bsr.compressedIndexData = bsr.compressedIndexData[:0]
|
|
|
|
|
|
|
|
bsr.indexCursor = nil
|
|
|
|
|
|
|
|
bsr.err = nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// String returns human-readable representation of bsr.
|
|
|
|
func (bsr *blockStreamReader) String() string {
|
|
|
|
if len(bsr.path) > 0 {
|
|
|
|
return bsr.path
|
|
|
|
}
|
|
|
|
return bsr.ph.String()
|
|
|
|
}
|
|
|
|
|
2023-04-14 22:46:09 +00:00
|
|
|
// MustInitFromInmemoryPart initializes bsr from the given mp.
|
|
|
|
func (bsr *blockStreamReader) MustInitFromInmemoryPart(mp *inmemoryPart) {
|
2019-05-22 21:16:55 +00:00
|
|
|
bsr.reset()
|
|
|
|
|
|
|
|
bsr.ph = mp.ph
|
|
|
|
bsr.timestampsReader = mp.timestampsData.NewReader()
|
|
|
|
bsr.valuesReader = mp.valuesData.NewReader()
|
|
|
|
bsr.indexReader = mp.indexData.NewReader()
|
|
|
|
|
|
|
|
var err error
|
|
|
|
bsr.mrs, err = unmarshalMetaindexRows(bsr.mrs[:0], mp.metaindexData.NewReader())
|
|
|
|
if err != nil {
|
|
|
|
logger.Panicf("BUG: cannot unmarshal metaindex rows from inmemoryPart: %s", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-04-14 22:46:09 +00:00
|
|
|
// MustInitFromFilePart initializes bsr from a file-based part on the given path.
|
2019-05-22 21:16:55 +00:00
|
|
|
//
|
|
|
|
// Files in the part are always read without OS cache pollution,
|
|
|
|
// since they are usually deleted after the merge.
|
2023-04-14 22:46:09 +00:00
|
|
|
func (bsr *blockStreamReader) MustInitFromFilePart(path string) {
|
2019-05-22 21:16:55 +00:00
|
|
|
bsr.reset()
|
|
|
|
|
|
|
|
path = filepath.Clean(path)
|
|
|
|
|
2023-04-14 22:46:09 +00:00
|
|
|
bsr.ph.MustReadMetadata(path)
|
2019-05-22 21:16:55 +00:00
|
|
|
|
2023-03-25 21:33:54 +00:00
|
|
|
timestampsPath := filepath.Join(path, timestampsFilename)
|
2023-04-14 22:03:39 +00:00
|
|
|
timestampsFile := filestream.MustOpen(timestampsPath, true)
|
2019-05-22 21:16:55 +00:00
|
|
|
|
2023-03-25 21:33:54 +00:00
|
|
|
valuesPath := filepath.Join(path, valuesFilename)
|
2023-04-14 22:03:39 +00:00
|
|
|
valuesFile := filestream.MustOpen(valuesPath, true)
|
2019-05-22 21:16:55 +00:00
|
|
|
|
2023-03-25 21:33:54 +00:00
|
|
|
indexPath := filepath.Join(path, indexFilename)
|
2023-04-14 22:03:39 +00:00
|
|
|
indexFile := filestream.MustOpen(indexPath, true)
|
2019-05-22 21:16:55 +00:00
|
|
|
|
2023-03-25 21:33:54 +00:00
|
|
|
metaindexPath := filepath.Join(path, metaindexFilename)
|
2023-04-14 22:03:39 +00:00
|
|
|
metaindexFile := filestream.MustOpen(metaindexPath, true)
|
2019-05-22 21:16:55 +00:00
|
|
|
mrs, err := unmarshalMetaindexRows(bsr.mrs[:0], metaindexFile)
|
|
|
|
metaindexFile.MustClose()
|
|
|
|
if err != nil {
|
2023-04-14 22:03:39 +00:00
|
|
|
logger.Panicf("FATAL: cannot unmarshal metaindex rows from file part %q: %s", metaindexPath, err)
|
2019-05-22 21:16:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
bsr.path = path
|
|
|
|
bsr.timestampsReader = timestampsFile
|
|
|
|
bsr.valuesReader = valuesFile
|
|
|
|
bsr.indexReader = indexFile
|
|
|
|
bsr.mrs = mrs
|
|
|
|
}
|
|
|
|
|
|
|
|
// MustClose closes the bsr.
|
|
|
|
//
|
|
|
|
// It closes *Reader files passed to Init.
|
|
|
|
func (bsr *blockStreamReader) MustClose() {
|
2023-04-14 21:39:26 +00:00
|
|
|
bsr.timestampsReader.MustClose()
|
|
|
|
bsr.valuesReader.MustClose()
|
2019-05-22 21:16:55 +00:00
|
|
|
bsr.indexReader.MustClose()
|
|
|
|
|
|
|
|
bsr.reset()
|
|
|
|
}
|
|
|
|
|
|
|
|
// Error returns the last error.
|
|
|
|
func (bsr *blockStreamReader) Error() error {
|
|
|
|
if bsr.err == nil || bsr.err == io.EOF {
|
|
|
|
return nil
|
|
|
|
}
|
2020-06-30 19:58:18 +00:00
|
|
|
return fmt.Errorf("error when reading part %q: %w", bsr, bsr.err)
|
2019-05-22 21:16:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// NextBlock advances bsr to the next block.
|
|
|
|
func (bsr *blockStreamReader) NextBlock() bool {
|
|
|
|
if bsr.err != nil {
|
|
|
|
return false
|
|
|
|
}
|
2022-01-31 20:45:56 +00:00
|
|
|
bsr.tsidPrev = bsr.Block.bh.TSID
|
2019-05-22 21:16:55 +00:00
|
|
|
bsr.Block.Reset()
|
|
|
|
err := bsr.readBlock()
|
|
|
|
if err == nil {
|
2022-01-31 20:45:56 +00:00
|
|
|
if bsr.Block.bh.TSID.Less(&bsr.tsidPrev) {
|
|
|
|
bsr.err = fmt.Errorf("possible data corruption: the next TSID=%v is smaller than the previous TSID=%v", &bsr.Block.bh.TSID, &bsr.tsidPrev)
|
2022-01-20 18:36:33 +00:00
|
|
|
return false
|
2019-05-22 21:16:55 +00:00
|
|
|
}
|
2022-01-20 18:36:33 +00:00
|
|
|
if bsr.Block.bh.RowsCount == 0 {
|
|
|
|
bsr.err = fmt.Errorf("invalid block read with zero rows; block=%+v", &bsr.Block)
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
return true
|
2019-05-22 21:16:55 +00:00
|
|
|
}
|
|
|
|
if err == io.EOF {
|
|
|
|
bsr.err = io.EOF
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2020-06-30 19:58:18 +00:00
|
|
|
bsr.err = fmt.Errorf("cannot read next block: %w", err)
|
2019-05-22 21:16:55 +00:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
func (bsr *blockStreamReader) readBlock() error {
|
|
|
|
if len(bsr.indexCursor) == 0 {
|
|
|
|
if bsr.mr != nil && bsr.indexBlockHeadersCount != bsr.mr.BlockHeadersCount {
|
|
|
|
return fmt.Errorf("invalid number of block headers in the previous index block at offset %d; got %d; want %d",
|
|
|
|
bsr.prevIndexBlockOffset(), bsr.indexBlockHeadersCount, bsr.mr.BlockHeadersCount)
|
|
|
|
}
|
|
|
|
bsr.indexBlockHeadersCount = 0
|
|
|
|
if err := bsr.readIndexBlock(); err != nil {
|
|
|
|
if err == io.EOF {
|
|
|
|
return io.EOF
|
|
|
|
}
|
2022-12-04 06:13:13 +00:00
|
|
|
return fmt.Errorf("cannot read index block: %w", err)
|
2019-05-22 21:16:55 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Read block header.
|
|
|
|
if len(bsr.indexCursor) < marshaledBlockHeaderSize {
|
|
|
|
return fmt.Errorf("too short index data for reading block header at offset %d; got %d bytes; want %d bytes",
|
|
|
|
bsr.prevIndexBlockOffset(), len(bsr.indexCursor), marshaledBlockHeaderSize)
|
|
|
|
}
|
|
|
|
bsr.Block.headerData = append(bsr.Block.headerData[:0], bsr.indexCursor[:marshaledBlockHeaderSize]...)
|
|
|
|
bsr.indexCursor = bsr.indexCursor[marshaledBlockHeaderSize:]
|
|
|
|
tail, err := bsr.Block.bh.Unmarshal(bsr.Block.headerData)
|
|
|
|
if err != nil {
|
2020-06-30 19:58:18 +00:00
|
|
|
return fmt.Errorf("cannot parse block header read from index data at offset %d: %w", bsr.prevIndexBlockOffset(), err)
|
2019-05-22 21:16:55 +00:00
|
|
|
}
|
|
|
|
if len(tail) > 0 {
|
|
|
|
return fmt.Errorf("non-empty tail left after parsing block header at offset %d: %x", bsr.prevIndexBlockOffset(), tail)
|
|
|
|
}
|
|
|
|
|
|
|
|
bsr.blocksCount++
|
|
|
|
if bsr.blocksCount > bsr.ph.BlocksCount {
|
|
|
|
return fmt.Errorf("too many blocks found in the block stream; got %d; cannot be bigger than %d", bsr.blocksCount, bsr.ph.BlocksCount)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Validate block header.
|
|
|
|
bsr.rowsCount += uint64(bsr.Block.bh.RowsCount)
|
|
|
|
if bsr.rowsCount > bsr.ph.RowsCount {
|
|
|
|
return fmt.Errorf("too many rows found in the block stream; got %d; cannot be bigger than %d", bsr.rowsCount, bsr.ph.RowsCount)
|
|
|
|
}
|
|
|
|
if bsr.Block.bh.MinTimestamp < bsr.ph.MinTimestamp {
|
|
|
|
return fmt.Errorf("invalid MinTimestamp at block header at offset %d; got %d; cannot be smaller than %d",
|
|
|
|
bsr.prevIndexBlockOffset(), bsr.Block.bh.MinTimestamp, bsr.ph.MinTimestamp)
|
|
|
|
}
|
|
|
|
if bsr.Block.bh.MaxTimestamp > bsr.ph.MaxTimestamp {
|
|
|
|
return fmt.Errorf("invalid MaxTimestamp at block header at offset %d; got %d; cannot be bigger than %d",
|
|
|
|
bsr.prevIndexBlockOffset(), bsr.Block.bh.MaxTimestamp, bsr.ph.MaxTimestamp)
|
|
|
|
}
|
2020-09-09 20:18:32 +00:00
|
|
|
usePrevTimestamps := len(bsr.prevTimestampsData) > 0 && bsr.Block.bh.TimestampsBlockOffset == bsr.prevTimestampsBlockOffset
|
|
|
|
if usePrevTimestamps {
|
|
|
|
if int(bsr.Block.bh.TimestampsBlockSize) != len(bsr.prevTimestampsData) {
|
|
|
|
return fmt.Errorf("invalid TimestampsBlockSize at block header at offset %d; got %d; want %d",
|
|
|
|
bsr.prevIndexBlockOffset(), bsr.Block.bh.TimestampsBlockSize, len(bsr.prevTimestampsData))
|
|
|
|
}
|
|
|
|
} else if bsr.Block.bh.TimestampsBlockOffset != bsr.timestampsBlockOffset {
|
2019-05-22 21:16:55 +00:00
|
|
|
return fmt.Errorf("invalid TimestampsBlockOffset at block header at offset %d; got %d; want %d",
|
|
|
|
bsr.prevIndexBlockOffset(), bsr.Block.bh.TimestampsBlockOffset, bsr.timestampsBlockOffset)
|
|
|
|
}
|
|
|
|
if bsr.Block.bh.ValuesBlockOffset != bsr.valuesBlockOffset {
|
|
|
|
return fmt.Errorf("invalid ValuesBlockOffset at block header at offset %d; got %d; want %d",
|
|
|
|
bsr.prevIndexBlockOffset(), bsr.Block.bh.ValuesBlockOffset, bsr.valuesBlockOffset)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Read timestamps data.
|
2020-09-09 20:18:32 +00:00
|
|
|
if usePrevTimestamps {
|
|
|
|
bsr.Block.timestampsData = append(bsr.Block.timestampsData[:0], bsr.prevTimestampsData...)
|
|
|
|
} else {
|
2022-01-31 22:18:39 +00:00
|
|
|
bsr.Block.timestampsData = bytesutil.ResizeNoCopyMayOverallocate(bsr.Block.timestampsData, int(bsr.Block.bh.TimestampsBlockSize))
|
2023-04-14 21:39:26 +00:00
|
|
|
fs.MustReadData(bsr.timestampsReader, bsr.Block.timestampsData)
|
2020-09-09 20:18:32 +00:00
|
|
|
bsr.prevTimestampsBlockOffset = bsr.timestampsBlockOffset
|
|
|
|
bsr.prevTimestampsData = append(bsr.prevTimestampsData[:0], bsr.Block.timestampsData...)
|
2019-05-22 21:16:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Read values data.
|
2022-01-31 22:18:39 +00:00
|
|
|
bsr.Block.valuesData = bytesutil.ResizeNoCopyMayOverallocate(bsr.Block.valuesData, int(bsr.Block.bh.ValuesBlockSize))
|
2023-04-14 21:39:26 +00:00
|
|
|
fs.MustReadData(bsr.valuesReader, bsr.Block.valuesData)
|
2019-05-22 21:16:55 +00:00
|
|
|
|
|
|
|
// Update offsets.
|
2020-09-09 20:18:32 +00:00
|
|
|
if !usePrevTimestamps {
|
|
|
|
bsr.timestampsBlockOffset += uint64(bsr.Block.bh.TimestampsBlockSize)
|
|
|
|
}
|
2019-05-22 21:16:55 +00:00
|
|
|
bsr.valuesBlockOffset += uint64(bsr.Block.bh.ValuesBlockSize)
|
|
|
|
bsr.indexBlockHeadersCount++
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (bsr *blockStreamReader) readIndexBlock() error {
|
|
|
|
// Go to the next metaindex row.
|
|
|
|
if len(bsr.mrs) == 0 {
|
|
|
|
return io.EOF
|
|
|
|
}
|
|
|
|
bsr.mr = &bsr.mrs[0]
|
|
|
|
bsr.mrs = bsr.mrs[1:]
|
|
|
|
|
|
|
|
// Validate metaindex row.
|
|
|
|
if bsr.indexBlockOffset != bsr.mr.IndexBlockOffset {
|
|
|
|
return fmt.Errorf("invalid IndexBlockOffset in metaindex row; got %d; want %d", bsr.mr.IndexBlockOffset, bsr.indexBlockOffset)
|
|
|
|
}
|
|
|
|
if bsr.mr.MinTimestamp < bsr.ph.MinTimestamp {
|
|
|
|
return fmt.Errorf("invalid MinTimesamp in metaindex row; got %d; cannot be smaller than %d", bsr.mr.MinTimestamp, bsr.ph.MinTimestamp)
|
|
|
|
}
|
|
|
|
if bsr.mr.MaxTimestamp > bsr.ph.MaxTimestamp {
|
|
|
|
return fmt.Errorf("invalid MaxTimestamp in metaindex row; got %d; cannot be bigger than %d", bsr.mr.MaxTimestamp, bsr.ph.MaxTimestamp)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Read index block.
|
2022-01-31 22:18:39 +00:00
|
|
|
bsr.compressedIndexData = bytesutil.ResizeNoCopyMayOverallocate(bsr.compressedIndexData, int(bsr.mr.IndexBlockSize))
|
2023-04-14 21:39:26 +00:00
|
|
|
fs.MustReadData(bsr.indexReader, bsr.compressedIndexData)
|
2019-05-22 21:16:55 +00:00
|
|
|
tmpData, err := encoding.DecompressZSTD(bsr.indexData[:0], bsr.compressedIndexData)
|
|
|
|
if err != nil {
|
2022-12-04 06:13:13 +00:00
|
|
|
return fmt.Errorf("cannot decompress index block at offset %d: %w", bsr.indexBlockOffset, err)
|
2019-05-22 21:16:55 +00:00
|
|
|
}
|
|
|
|
bsr.indexData = tmpData
|
|
|
|
bsr.indexCursor = bsr.indexData
|
|
|
|
|
|
|
|
// Update offsets.
|
|
|
|
bsr.indexBlockOffset += uint64(bsr.mr.IndexBlockSize)
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (bsr *blockStreamReader) prevIndexBlockOffset() uint64 {
|
|
|
|
return bsr.indexBlockOffset - uint64(bsr.mr.IndexBlockSize)
|
|
|
|
}
|
|
|
|
|
|
|
|
func getBlockStreamReader() *blockStreamReader {
|
|
|
|
v := bsrPool.Get()
|
|
|
|
if v == nil {
|
|
|
|
return &blockStreamReader{}
|
|
|
|
}
|
|
|
|
return v.(*blockStreamReader)
|
|
|
|
}
|
|
|
|
|
|
|
|
func putBlockStreamReader(bsr *blockStreamReader) {
|
|
|
|
bsr.MustClose()
|
|
|
|
bsrPool.Put(bsr)
|
|
|
|
}
|
|
|
|
|
|
|
|
var bsrPool sync.Pool
|