2019-05-22 21:16:55 +00:00
|
|
|
package storage
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
|
|
|
"sync"
|
2020-11-04 23:12:21 +00:00
|
|
|
"sync/atomic"
|
2019-05-22 21:16:55 +00:00
|
|
|
|
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/decimal"
|
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
|
|
|
)
|
|
|
|
|
|
|
|
// mergeBlockStreams merges bsrs into bsw and updates ph.
|
|
|
|
//
|
|
|
|
// mergeBlockStreams returns immediately if stopCh is closed.
|
|
|
|
//
|
|
|
|
// rowsMerged is atomically updated with the number of merged rows during the merge.
|
2022-10-23 13:08:54 +00:00
|
|
|
func mergeBlockStreams(ph *partHeader, bsw *blockStreamWriter, bsrs []*blockStreamReader, stopCh <-chan struct{}, s *Storage, retentionDeadline int64,
|
|
|
|
rowsMerged, rowsDeleted *uint64) error {
|
2019-05-22 21:16:55 +00:00
|
|
|
ph.Reset()
|
|
|
|
|
|
|
|
bsm := bsmPool.Get().(*blockStreamMerger)
|
2022-10-23 11:30:16 +00:00
|
|
|
bsm.Init(bsrs, retentionDeadline)
|
2022-10-23 13:08:54 +00:00
|
|
|
err := mergeBlockStreamsInternal(ph, bsw, bsm, stopCh, s, rowsMerged, rowsDeleted)
|
2019-05-22 21:16:55 +00:00
|
|
|
bsm.reset()
|
|
|
|
bsmPool.Put(bsm)
|
|
|
|
bsw.MustClose()
|
|
|
|
if err == nil {
|
|
|
|
return nil
|
|
|
|
}
|
2020-06-30 19:58:18 +00:00
|
|
|
return fmt.Errorf("cannot merge %d streams: %s: %w", len(bsrs), bsrs, err)
|
2019-05-22 21:16:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
var bsmPool = &sync.Pool{
|
|
|
|
New: func() interface{} {
|
|
|
|
return &blockStreamMerger{}
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
var errForciblyStopped = fmt.Errorf("forcibly stopped")
|
|
|
|
|
2022-10-23 13:08:54 +00:00
|
|
|
func mergeBlockStreamsInternal(ph *partHeader, bsw *blockStreamWriter, bsm *blockStreamMerger, stopCh <-chan struct{}, s *Storage, rowsMerged, rowsDeleted *uint64) error {
|
|
|
|
dmis := s.getDeletedMetricIDs()
|
2020-10-31 18:42:13 +00:00
|
|
|
pendingBlockIsEmpty := true
|
|
|
|
pendingBlock := getBlock()
|
|
|
|
defer putBlock(pendingBlock)
|
|
|
|
tmpBlock := getBlock()
|
|
|
|
defer putBlock(tmpBlock)
|
2019-05-22 21:16:55 +00:00
|
|
|
for bsm.NextBlock() {
|
|
|
|
select {
|
|
|
|
case <-stopCh:
|
|
|
|
return errForciblyStopped
|
|
|
|
default:
|
|
|
|
}
|
2022-10-23 11:08:26 +00:00
|
|
|
b := bsm.Block
|
|
|
|
if dmis.Has(b.bh.TSID.MetricID) {
|
2019-05-22 21:16:55 +00:00
|
|
|
// Skip blocks for deleted metrics.
|
2022-10-23 11:08:26 +00:00
|
|
|
atomic.AddUint64(rowsDeleted, uint64(b.bh.RowsCount))
|
2019-05-22 21:16:55 +00:00
|
|
|
continue
|
|
|
|
}
|
2022-10-23 11:30:16 +00:00
|
|
|
retentionDeadline := bsm.getRetentionDeadline(b)
|
2022-10-23 11:08:26 +00:00
|
|
|
if b.bh.MaxTimestamp < retentionDeadline {
|
2020-10-20 11:29:26 +00:00
|
|
|
// Skip blocks out of the given retention.
|
2022-10-23 11:08:26 +00:00
|
|
|
atomic.AddUint64(rowsDeleted, uint64(b.bh.RowsCount))
|
2020-10-20 11:29:26 +00:00
|
|
|
continue
|
|
|
|
}
|
2020-10-31 18:42:13 +00:00
|
|
|
if pendingBlockIsEmpty {
|
|
|
|
// Load the next block if pendingBlock is empty.
|
2022-10-23 11:08:26 +00:00
|
|
|
pendingBlock.CopyFrom(b)
|
2020-10-31 18:42:13 +00:00
|
|
|
pendingBlockIsEmpty = false
|
2020-10-20 11:29:26 +00:00
|
|
|
continue
|
|
|
|
}
|
2019-05-22 21:16:55 +00:00
|
|
|
|
2022-10-23 11:08:26 +00:00
|
|
|
// Verify whether pendingBlock may be merged with b (the current block).
|
|
|
|
if pendingBlock.bh.TSID.MetricID != b.bh.TSID.MetricID {
|
2019-05-22 21:16:55 +00:00
|
|
|
// Fast path - blocks belong to distinct time series.
|
2022-10-23 11:08:26 +00:00
|
|
|
// Write the pendingBlock and then deal with b.
|
|
|
|
if b.bh.TSID.Less(&pendingBlock.bh.TSID) {
|
|
|
|
logger.Panicf("BUG: the next TSID=%+v is smaller than the current TSID=%+v", &b.bh.TSID, &pendingBlock.bh.TSID)
|
2019-05-22 21:16:55 +00:00
|
|
|
}
|
2021-12-15 13:58:27 +00:00
|
|
|
bsw.WriteExternalBlock(pendingBlock, ph, rowsMerged)
|
2022-10-23 11:08:26 +00:00
|
|
|
pendingBlock.CopyFrom(b)
|
2019-05-22 21:16:55 +00:00
|
|
|
continue
|
|
|
|
}
|
2022-10-23 11:08:26 +00:00
|
|
|
if pendingBlock.tooBig() && pendingBlock.bh.MaxTimestamp <= b.bh.MinTimestamp {
|
|
|
|
// Fast path - pendingBlock is too big and it doesn't overlap with b.
|
|
|
|
// Write the pendingBlock and then deal with b.
|
2021-12-15 13:58:27 +00:00
|
|
|
bsw.WriteExternalBlock(pendingBlock, ph, rowsMerged)
|
2022-10-23 11:08:26 +00:00
|
|
|
pendingBlock.CopyFrom(b)
|
2019-05-22 21:16:55 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2022-10-23 11:08:26 +00:00
|
|
|
// Slow path - pendingBlock and b belong to the same time series,
|
2019-05-22 21:16:55 +00:00
|
|
|
// so they must be merged.
|
2022-10-23 11:08:26 +00:00
|
|
|
if err := unmarshalAndCalibrateScale(pendingBlock, b); err != nil {
|
2020-06-30 19:58:18 +00:00
|
|
|
return fmt.Errorf("cannot unmarshal and calibrate scale for blocks to be merged: %w", err)
|
2019-05-22 21:16:55 +00:00
|
|
|
}
|
|
|
|
tmpBlock.Reset()
|
2022-10-23 11:08:26 +00:00
|
|
|
tmpBlock.bh.TSID = b.bh.TSID
|
|
|
|
tmpBlock.bh.Scale = b.bh.Scale
|
|
|
|
tmpBlock.bh.PrecisionBits = minUint8(pendingBlock.bh.PrecisionBits, b.bh.PrecisionBits)
|
|
|
|
mergeBlocks(tmpBlock, pendingBlock, b, retentionDeadline, rowsDeleted)
|
2019-05-22 21:16:55 +00:00
|
|
|
if len(tmpBlock.timestamps) <= maxRowsPerBlock {
|
|
|
|
// More entries may be added to tmpBlock. Swap it with pendingBlock,
|
|
|
|
// so more entries may be added to pendingBlock on the next iteration.
|
2020-10-31 18:42:13 +00:00
|
|
|
if len(tmpBlock.timestamps) > 0 {
|
|
|
|
tmpBlock.fixupTimestamps()
|
|
|
|
} else {
|
|
|
|
pendingBlockIsEmpty = true
|
|
|
|
}
|
2019-05-22 21:16:55 +00:00
|
|
|
pendingBlock, tmpBlock = tmpBlock, pendingBlock
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2020-10-31 18:42:13 +00:00
|
|
|
// Write the first maxRowsPerBlock of tmpBlock.timestamps to bsw,
|
2019-05-22 21:16:55 +00:00
|
|
|
// leave the rest in pendingBlock.
|
|
|
|
tmpBlock.nextIdx = maxRowsPerBlock
|
|
|
|
pendingBlock.CopyFrom(tmpBlock)
|
|
|
|
pendingBlock.fixupTimestamps()
|
|
|
|
tmpBlock.nextIdx = 0
|
|
|
|
tmpBlock.timestamps = tmpBlock.timestamps[:maxRowsPerBlock]
|
|
|
|
tmpBlock.values = tmpBlock.values[:maxRowsPerBlock]
|
|
|
|
tmpBlock.fixupTimestamps()
|
2021-12-15 13:58:27 +00:00
|
|
|
bsw.WriteExternalBlock(tmpBlock, ph, rowsMerged)
|
2019-05-22 21:16:55 +00:00
|
|
|
}
|
|
|
|
if err := bsm.Error(); err != nil {
|
2020-06-30 19:58:18 +00:00
|
|
|
return fmt.Errorf("cannot read block to be merged: %w", err)
|
2019-05-22 21:16:55 +00:00
|
|
|
}
|
2020-10-31 18:42:13 +00:00
|
|
|
if !pendingBlockIsEmpty {
|
2021-12-15 13:58:27 +00:00
|
|
|
bsw.WriteExternalBlock(pendingBlock, ph, rowsMerged)
|
2019-05-22 21:16:55 +00:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// mergeBlocks merges ib1 and ib2 to ob.
|
2020-10-31 18:42:13 +00:00
|
|
|
func mergeBlocks(ob, ib1, ib2 *Block, retentionDeadline int64, rowsDeleted *uint64) {
|
2019-05-22 21:16:55 +00:00
|
|
|
ib1.assertMergeable(ib2)
|
|
|
|
ib1.assertUnmarshaled()
|
|
|
|
ib2.assertUnmarshaled()
|
|
|
|
|
2020-10-31 18:42:13 +00:00
|
|
|
skipSamplesOutsideRetention(ib1, retentionDeadline, rowsDeleted)
|
|
|
|
skipSamplesOutsideRetention(ib2, retentionDeadline, rowsDeleted)
|
|
|
|
|
2019-05-22 21:16:55 +00:00
|
|
|
if ib1.bh.MaxTimestamp < ib2.bh.MinTimestamp {
|
|
|
|
// Fast path - ib1 values have smaller timestamps than ib2 values.
|
|
|
|
appendRows(ob, ib1)
|
|
|
|
appendRows(ob, ib2)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if ib2.bh.MaxTimestamp < ib1.bh.MinTimestamp {
|
|
|
|
// Fast path - ib2 values have smaller timestamps than ib1 values.
|
|
|
|
appendRows(ob, ib2)
|
|
|
|
appendRows(ob, ib1)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if ib1.nextIdx >= len(ib1.timestamps) {
|
|
|
|
appendRows(ob, ib2)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if ib2.nextIdx >= len(ib2.timestamps) {
|
|
|
|
appendRows(ob, ib1)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
for {
|
|
|
|
i := ib1.nextIdx
|
|
|
|
ts2 := ib2.timestamps[ib2.nextIdx]
|
|
|
|
for i < len(ib1.timestamps) && ib1.timestamps[i] <= ts2 {
|
|
|
|
i++
|
|
|
|
}
|
|
|
|
ob.timestamps = append(ob.timestamps, ib1.timestamps[ib1.nextIdx:i]...)
|
|
|
|
ob.values = append(ob.values, ib1.values[ib1.nextIdx:i]...)
|
|
|
|
ib1.nextIdx = i
|
|
|
|
if ib1.nextIdx >= len(ib1.timestamps) {
|
|
|
|
appendRows(ob, ib2)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
ib1, ib2 = ib2, ib1
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-31 18:42:13 +00:00
|
|
|
func skipSamplesOutsideRetention(b *Block, retentionDeadline int64, rowsDeleted *uint64) {
|
|
|
|
timestamps := b.timestamps
|
|
|
|
nextIdx := b.nextIdx
|
2021-03-26 15:57:51 +00:00
|
|
|
nextIdxOrig := nextIdx
|
2020-10-31 18:42:13 +00:00
|
|
|
for nextIdx < len(timestamps) && timestamps[nextIdx] < retentionDeadline {
|
|
|
|
nextIdx++
|
|
|
|
}
|
2021-03-26 15:57:51 +00:00
|
|
|
if n := nextIdx - nextIdxOrig; n > 0 {
|
|
|
|
atomic.AddUint64(rowsDeleted, uint64(n))
|
|
|
|
b.nextIdx = nextIdx
|
|
|
|
}
|
2020-10-31 18:42:13 +00:00
|
|
|
}
|
|
|
|
|
2019-05-22 21:16:55 +00:00
|
|
|
func appendRows(ob, ib *Block) {
|
|
|
|
ob.timestamps = append(ob.timestamps, ib.timestamps[ib.nextIdx:]...)
|
|
|
|
ob.values = append(ob.values, ib.values[ib.nextIdx:]...)
|
|
|
|
}
|
|
|
|
|
|
|
|
func unmarshalAndCalibrateScale(b1, b2 *Block) error {
|
|
|
|
if err := b1.UnmarshalData(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := b2.UnmarshalData(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-10-31 18:42:13 +00:00
|
|
|
scale := decimal.CalibrateScale(b1.values[b1.nextIdx:], b1.bh.Scale, b2.values[b2.nextIdx:], b2.bh.Scale)
|
2019-05-22 21:16:55 +00:00
|
|
|
b1.bh.Scale = scale
|
|
|
|
b2.bh.Scale = scale
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func minUint8(a, b uint8) uint8 {
|
|
|
|
if a < b {
|
|
|
|
return a
|
|
|
|
}
|
|
|
|
return b
|
|
|
|
}
|