2019-05-22 21:16:55 +00:00
|
|
|
package storage
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
|
|
|
"path/filepath"
|
2020-09-17 09:01:53 +00:00
|
|
|
"strings"
|
2019-05-22 21:16:55 +00:00
|
|
|
"sync"
|
|
|
|
"sync/atomic"
|
|
|
|
"time"
|
|
|
|
|
2020-05-14 19:01:51 +00:00
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
|
2019-05-22 21:16:55 +00:00
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
|
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
2024-01-22 16:12:37 +00:00
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/timeutil"
|
2019-05-22 21:16:55 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
// table represents a single table with time series data.
|
|
|
|
type table struct {
|
|
|
|
path string
|
|
|
|
smallPartitionsPath string
|
|
|
|
bigPartitionsPath string
|
|
|
|
|
2022-10-23 22:30:50 +00:00
|
|
|
s *Storage
|
2019-05-22 21:16:55 +00:00
|
|
|
|
|
|
|
ptws []*partitionWrapper
|
|
|
|
ptwsLock sync.Mutex
|
|
|
|
|
|
|
|
stop chan struct{}
|
|
|
|
|
2021-12-15 13:58:27 +00:00
|
|
|
retentionWatcherWG sync.WaitGroup
|
|
|
|
finalDedupWatcherWG sync.WaitGroup
|
2019-05-22 21:16:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// partitionWrapper provides refcounting mechanism for the partition.
|
|
|
|
type partitionWrapper struct {
|
2019-10-17 15:22:56 +00:00
|
|
|
// Atomic counters must be at the top of struct for proper 8-byte alignment on 32-bit archs.
|
|
|
|
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/212
|
|
|
|
|
2019-05-22 21:16:55 +00:00
|
|
|
refCount uint64
|
|
|
|
|
|
|
|
// The partition must be dropped if mustDrop > 0
|
|
|
|
mustDrop uint64
|
2019-10-17 15:22:56 +00:00
|
|
|
|
2019-10-17 17:04:26 +00:00
|
|
|
pt *partition
|
2019-05-22 21:16:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (ptw *partitionWrapper) incRef() {
|
|
|
|
atomic.AddUint64(&ptw.refCount, 1)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ptw *partitionWrapper) decRef() {
|
|
|
|
n := atomic.AddUint64(&ptw.refCount, ^uint64(0))
|
|
|
|
if int64(n) < 0 {
|
|
|
|
logger.Panicf("BUG: pts.refCount must be positive; got %d", int64(n))
|
|
|
|
}
|
|
|
|
if n > 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// refCount is zero. Close the partition.
|
|
|
|
ptw.pt.MustClose()
|
|
|
|
|
|
|
|
if atomic.LoadUint64(&ptw.mustDrop) == 0 {
|
|
|
|
ptw.pt = nil
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// ptw.mustDrop > 0. Drop the partition.
|
|
|
|
ptw.pt.Drop()
|
|
|
|
ptw.pt = nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ptw *partitionWrapper) scheduleToDrop() {
|
|
|
|
atomic.AddUint64(&ptw.mustDrop, 1)
|
|
|
|
}
|
|
|
|
|
2023-04-15 06:01:20 +00:00
|
|
|
// mustOpenTable opens a table on the given path.
|
2019-05-22 21:16:55 +00:00
|
|
|
//
|
|
|
|
// The table is created if it doesn't exist.
|
2023-04-15 06:01:20 +00:00
|
|
|
func mustOpenTable(path string, s *Storage) *table {
|
2019-05-22 21:16:55 +00:00
|
|
|
path = filepath.Clean(path)
|
|
|
|
|
|
|
|
// Create a directory for the table if it doesn't exist yet.
|
2023-04-14 05:11:56 +00:00
|
|
|
fs.MustMkdirIfNotExist(path)
|
2019-05-22 21:16:55 +00:00
|
|
|
|
|
|
|
// Create directories for small and big partitions if they don't exist yet.
|
2023-03-25 21:33:54 +00:00
|
|
|
smallPartitionsPath := filepath.Join(path, smallDirname)
|
2023-04-14 05:11:56 +00:00
|
|
|
fs.MustMkdirIfNotExist(smallPartitionsPath)
|
2022-09-13 10:37:34 +00:00
|
|
|
fs.MustRemoveTemporaryDirs(smallPartitionsPath)
|
2023-04-14 05:11:56 +00:00
|
|
|
|
2023-03-25 21:33:54 +00:00
|
|
|
smallSnapshotsPath := filepath.Join(smallPartitionsPath, snapshotsDirname)
|
2023-04-14 05:11:56 +00:00
|
|
|
fs.MustMkdirIfNotExist(smallSnapshotsPath)
|
2022-09-13 10:10:33 +00:00
|
|
|
fs.MustRemoveTemporaryDirs(smallSnapshotsPath)
|
|
|
|
|
2023-03-25 21:33:54 +00:00
|
|
|
bigPartitionsPath := filepath.Join(path, bigDirname)
|
2023-04-14 05:11:56 +00:00
|
|
|
fs.MustMkdirIfNotExist(bigPartitionsPath)
|
2022-09-13 10:37:34 +00:00
|
|
|
fs.MustRemoveTemporaryDirs(bigPartitionsPath)
|
2023-04-14 05:11:56 +00:00
|
|
|
|
2023-03-25 21:33:54 +00:00
|
|
|
bigSnapshotsPath := filepath.Join(bigPartitionsPath, snapshotsDirname)
|
2023-04-14 05:11:56 +00:00
|
|
|
fs.MustMkdirIfNotExist(bigSnapshotsPath)
|
2022-09-13 10:10:33 +00:00
|
|
|
fs.MustRemoveTemporaryDirs(bigSnapshotsPath)
|
2019-05-22 21:16:55 +00:00
|
|
|
|
|
|
|
// Open partitions.
|
2023-04-15 06:01:20 +00:00
|
|
|
pts := mustOpenPartitions(smallPartitionsPath, bigPartitionsPath, s)
|
2019-05-22 21:16:55 +00:00
|
|
|
|
|
|
|
tb := &table{
|
|
|
|
path: path,
|
|
|
|
smallPartitionsPath: smallPartitionsPath,
|
|
|
|
bigPartitionsPath: bigPartitionsPath,
|
2022-10-23 13:08:54 +00:00
|
|
|
s: s,
|
2019-05-22 21:16:55 +00:00
|
|
|
|
|
|
|
stop: make(chan struct{}),
|
|
|
|
}
|
|
|
|
for _, pt := range pts {
|
|
|
|
tb.addPartitionNolock(pt)
|
|
|
|
}
|
|
|
|
tb.startRetentionWatcher()
|
2021-12-15 13:58:27 +00:00
|
|
|
tb.startFinalDedupWatcher()
|
2023-04-15 06:01:20 +00:00
|
|
|
return tb
|
2019-05-22 21:16:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// CreateSnapshot creates tb snapshot and returns paths to small and big parts of it.
|
2023-02-27 20:12:03 +00:00
|
|
|
// If deadline is reached before snapshot is created error is returned.
|
|
|
|
// If any error occurs during snapshot created data is not removed.
|
|
|
|
func (tb *table) CreateSnapshot(snapshotName string, deadline uint64) (string, string, error) {
|
2019-05-22 21:16:55 +00:00
|
|
|
logger.Infof("creating table snapshot of %q...", tb.path)
|
|
|
|
startTime := time.Now()
|
|
|
|
|
|
|
|
ptws := tb.GetPartitions(nil)
|
|
|
|
defer tb.PutPartitions(ptws)
|
|
|
|
|
2023-03-25 21:33:54 +00:00
|
|
|
dstSmallDir := filepath.Join(tb.path, smallDirname, snapshotsDirname, snapshotName)
|
2023-04-14 05:11:56 +00:00
|
|
|
fs.MustMkdirFailIfExist(dstSmallDir)
|
|
|
|
|
2023-03-25 21:33:54 +00:00
|
|
|
dstBigDir := filepath.Join(tb.path, bigDirname, snapshotsDirname, snapshotName)
|
2023-04-14 05:11:56 +00:00
|
|
|
fs.MustMkdirFailIfExist(dstBigDir)
|
2019-05-22 21:16:55 +00:00
|
|
|
|
2023-02-27 20:57:22 +00:00
|
|
|
for _, ptw := range ptws {
|
|
|
|
if deadline > 0 && fasttime.UnixTimestamp() > deadline {
|
|
|
|
fs.MustRemoveAll(dstSmallDir)
|
|
|
|
fs.MustRemoveAll(dstBigDir)
|
|
|
|
return "", "", fmt.Errorf("cannot create snapshot for %q: timeout exceeded", tb.path)
|
2023-02-27 20:12:03 +00:00
|
|
|
}
|
|
|
|
|
2023-03-25 21:33:54 +00:00
|
|
|
smallPath := filepath.Join(dstSmallDir, ptw.pt.name)
|
|
|
|
bigPath := filepath.Join(dstBigDir, ptw.pt.name)
|
2023-04-14 06:02:55 +00:00
|
|
|
ptw.pt.MustCreateSnapshotAt(smallPath, bigPath)
|
2019-05-22 21:16:55 +00:00
|
|
|
}
|
|
|
|
|
2019-06-11 20:13:04 +00:00
|
|
|
fs.MustSyncPath(dstSmallDir)
|
|
|
|
fs.MustSyncPath(dstBigDir)
|
|
|
|
fs.MustSyncPath(filepath.Dir(dstSmallDir))
|
|
|
|
fs.MustSyncPath(filepath.Dir(dstBigDir))
|
2019-05-22 21:16:55 +00:00
|
|
|
|
2020-01-22 16:27:44 +00:00
|
|
|
logger.Infof("created table snapshot for %q at (%q, %q) in %.3f seconds", tb.path, dstSmallDir, dstBigDir, time.Since(startTime).Seconds())
|
2019-05-22 21:16:55 +00:00
|
|
|
return dstSmallDir, dstBigDir, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// MustDeleteSnapshot deletes snapshot with the given snapshotName.
|
|
|
|
func (tb *table) MustDeleteSnapshot(snapshotName string) {
|
2023-03-25 21:33:54 +00:00
|
|
|
smallDir := filepath.Join(tb.path, smallDirname, snapshotsDirname, snapshotName)
|
2022-09-13 10:10:33 +00:00
|
|
|
fs.MustRemoveDirAtomic(smallDir)
|
2023-03-25 21:33:54 +00:00
|
|
|
bigDir := filepath.Join(tb.path, bigDirname, snapshotsDirname, snapshotName)
|
2022-09-13 10:10:33 +00:00
|
|
|
fs.MustRemoveDirAtomic(bigDir)
|
2019-05-22 21:16:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (tb *table) addPartitionNolock(pt *partition) {
|
|
|
|
ptw := &partitionWrapper{
|
|
|
|
pt: pt,
|
|
|
|
refCount: 1,
|
|
|
|
}
|
|
|
|
tb.ptws = append(tb.ptws, ptw)
|
|
|
|
}
|
|
|
|
|
|
|
|
// MustClose closes the table.
|
2021-02-17 12:59:04 +00:00
|
|
|
// It is expected that all the pending searches on the table are finished before calling MustClose.
|
2019-05-22 21:16:55 +00:00
|
|
|
func (tb *table) MustClose() {
|
|
|
|
close(tb.stop)
|
|
|
|
tb.retentionWatcherWG.Wait()
|
2021-12-15 13:58:27 +00:00
|
|
|
tb.finalDedupWatcherWG.Wait()
|
2019-05-22 21:16:55 +00:00
|
|
|
|
|
|
|
tb.ptwsLock.Lock()
|
|
|
|
ptws := tb.ptws
|
|
|
|
tb.ptws = nil
|
|
|
|
tb.ptwsLock.Unlock()
|
|
|
|
|
|
|
|
for _, ptw := range ptws {
|
2021-02-17 12:59:04 +00:00
|
|
|
if n := atomic.LoadUint64(&ptw.refCount); n != 1 {
|
|
|
|
logger.Panicf("BUG: unexpected refCount=%d when closing the partition; probably there are pending searches", n)
|
|
|
|
}
|
2021-02-18 10:47:36 +00:00
|
|
|
ptw.decRef()
|
2019-05-22 21:16:55 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-12-05 23:15:00 +00:00
|
|
|
// flushPendingRows flushes all the pending raw rows, so they become visible to search.
|
2019-05-22 21:16:55 +00:00
|
|
|
//
|
|
|
|
// This function is for debug purposes only.
|
2022-12-04 06:17:46 +00:00
|
|
|
func (tb *table) flushPendingRows() {
|
2019-05-22 21:16:55 +00:00
|
|
|
ptws := tb.GetPartitions(nil)
|
|
|
|
defer tb.PutPartitions(ptws)
|
|
|
|
|
2022-12-05 23:15:00 +00:00
|
|
|
var rows []rawRow
|
2019-05-22 21:16:55 +00:00
|
|
|
for _, ptw := range ptws {
|
2022-12-05 23:15:00 +00:00
|
|
|
rows = ptw.pt.flushPendingRows(rows[:0], true)
|
2019-05-22 21:16:55 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// TableMetrics contains essential metrics for the table.
|
|
|
|
type TableMetrics struct {
|
|
|
|
partitionMetrics
|
|
|
|
|
|
|
|
PartitionsRefCount uint64
|
|
|
|
}
|
|
|
|
|
|
|
|
// UpdateMetrics updates m with metrics from tb.
|
|
|
|
func (tb *table) UpdateMetrics(m *TableMetrics) {
|
|
|
|
tb.ptwsLock.Lock()
|
|
|
|
for _, ptw := range tb.ptws {
|
|
|
|
ptw.pt.UpdateMetrics(&m.partitionMetrics)
|
|
|
|
m.PartitionsRefCount += atomic.LoadUint64(&ptw.refCount)
|
|
|
|
}
|
|
|
|
tb.ptwsLock.Unlock()
|
|
|
|
}
|
|
|
|
|
2020-09-17 09:01:53 +00:00
|
|
|
// ForceMergePartitions force-merges partitions in tb with names starting from the given partitionNamePrefix.
|
|
|
|
//
|
|
|
|
// Partitions are merged sequentially in order to reduce load on the system.
|
|
|
|
func (tb *table) ForceMergePartitions(partitionNamePrefix string) error {
|
|
|
|
ptws := tb.GetPartitions(nil)
|
|
|
|
defer tb.PutPartitions(ptws)
|
|
|
|
for _, ptw := range ptws {
|
|
|
|
if !strings.HasPrefix(ptw.pt.name, partitionNamePrefix) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
logger.Infof("starting forced merge for partition %q", ptw.pt.name)
|
|
|
|
startTime := time.Now()
|
|
|
|
if err := ptw.pt.ForceMergeAllParts(); err != nil {
|
|
|
|
return fmt.Errorf("cannot complete forced merge for partition %q: %w", ptw.pt.name, err)
|
|
|
|
}
|
|
|
|
logger.Infof("forced merge for partition %q has been finished in %.3f seconds", ptw.pt.name, time.Since(startTime).Seconds())
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2023-04-14 05:11:56 +00:00
|
|
|
// MustAddRows adds the given rows to the table tb.
|
|
|
|
func (tb *table) MustAddRows(rows []rawRow) {
|
2019-05-22 21:16:55 +00:00
|
|
|
if len(rows) == 0 {
|
2023-04-14 05:11:56 +00:00
|
|
|
return
|
2019-05-22 21:16:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Verify whether all the rows may be added to a single partition.
|
|
|
|
ptwsX := getPartitionWrappers()
|
|
|
|
defer putPartitionWrappers(ptwsX)
|
|
|
|
|
|
|
|
ptwsX.a = tb.GetPartitions(ptwsX.a[:0])
|
|
|
|
ptws := ptwsX.a
|
2021-02-17 12:59:04 +00:00
|
|
|
for i, ptw := range ptws {
|
2019-05-22 21:16:55 +00:00
|
|
|
singlePt := true
|
2021-02-17 12:59:04 +00:00
|
|
|
for j := range rows {
|
|
|
|
if !ptw.pt.HasTimestamp(rows[j].Timestamp) {
|
2019-05-22 21:16:55 +00:00
|
|
|
singlePt = false
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if !singlePt {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2021-02-17 12:59:04 +00:00
|
|
|
if i != 0 {
|
|
|
|
// Move the partition with the matching rows to the front of tb.ptws,
|
|
|
|
// so it will be detected faster next time.
|
|
|
|
tb.ptwsLock.Lock()
|
|
|
|
for j := range tb.ptws {
|
|
|
|
if ptw == tb.ptws[j] {
|
|
|
|
tb.ptws[0], tb.ptws[j] = tb.ptws[j], tb.ptws[0]
|
|
|
|
break
|
|
|
|
}
|
2019-05-22 21:16:55 +00:00
|
|
|
}
|
2021-02-17 12:59:04 +00:00
|
|
|
tb.ptwsLock.Unlock()
|
2019-05-22 21:16:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Fast path - add all the rows into the ptw.
|
|
|
|
ptw.pt.AddRows(rows)
|
|
|
|
tb.PutPartitions(ptws)
|
2023-04-14 05:11:56 +00:00
|
|
|
return
|
2019-05-22 21:16:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Slower path - split rows into per-partition buckets.
|
|
|
|
ptBuckets := make(map[*partitionWrapper][]rawRow)
|
|
|
|
var missingRows []rawRow
|
|
|
|
for i := range rows {
|
|
|
|
r := &rows[i]
|
|
|
|
ptFound := false
|
|
|
|
for _, ptw := range ptws {
|
|
|
|
if ptw.pt.HasTimestamp(r.Timestamp) {
|
|
|
|
ptBuckets[ptw] = append(ptBuckets[ptw], *r)
|
|
|
|
ptFound = true
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if !ptFound {
|
|
|
|
missingRows = append(missingRows, *r)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for ptw, ptRows := range ptBuckets {
|
|
|
|
ptw.pt.AddRows(ptRows)
|
|
|
|
}
|
|
|
|
tb.PutPartitions(ptws)
|
|
|
|
if len(missingRows) == 0 {
|
2023-04-14 05:11:56 +00:00
|
|
|
return
|
2019-05-22 21:16:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// The slowest path - there are rows that don't fit any existing partition.
|
|
|
|
// Create new partitions for these rows.
|
|
|
|
// Do this under tb.ptwsLock.
|
2019-07-11 14:04:56 +00:00
|
|
|
minTimestamp, maxTimestamp := tb.getMinMaxTimestamps()
|
2019-05-22 21:16:55 +00:00
|
|
|
tb.ptwsLock.Lock()
|
|
|
|
for i := range missingRows {
|
|
|
|
r := &missingRows[i]
|
|
|
|
|
2019-07-11 14:04:56 +00:00
|
|
|
if r.Timestamp < minTimestamp || r.Timestamp > maxTimestamp {
|
|
|
|
// Silently skip row outside retention, since it should be deleted anyway.
|
2019-05-22 21:16:55 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Make sure the partition for the r hasn't been added by another goroutines.
|
|
|
|
ptFound := false
|
|
|
|
for _, ptw := range tb.ptws {
|
|
|
|
if ptw.pt.HasTimestamp(r.Timestamp) {
|
|
|
|
ptFound = true
|
|
|
|
ptw.pt.AddRows(missingRows[i : i+1])
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if ptFound {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2023-04-14 05:11:56 +00:00
|
|
|
pt := mustCreatePartition(r.Timestamp, tb.smallPartitionsPath, tb.bigPartitionsPath, tb.s)
|
2019-05-22 21:16:55 +00:00
|
|
|
pt.AddRows(missingRows[i : i+1])
|
|
|
|
tb.addPartitionNolock(pt)
|
|
|
|
}
|
|
|
|
tb.ptwsLock.Unlock()
|
|
|
|
}
|
|
|
|
|
2019-07-11 14:04:56 +00:00
|
|
|
func (tb *table) getMinMaxTimestamps() (int64, int64) {
|
2020-05-14 19:01:51 +00:00
|
|
|
now := int64(fasttime.UnixTimestamp() * 1000)
|
2022-10-23 22:30:50 +00:00
|
|
|
minTimestamp := now - tb.s.retentionMsecs
|
2019-07-11 14:04:56 +00:00
|
|
|
maxTimestamp := now + 2*24*3600*1000 // allow max +2 days from now due to timezones shit :)
|
|
|
|
if minTimestamp < 0 {
|
|
|
|
// Negative timestamps aren't supported by the storage.
|
|
|
|
minTimestamp = 0
|
|
|
|
}
|
|
|
|
if maxTimestamp < 0 {
|
|
|
|
maxTimestamp = (1 << 63) - 1
|
|
|
|
}
|
|
|
|
return minTimestamp, maxTimestamp
|
|
|
|
}
|
|
|
|
|
2019-05-22 21:16:55 +00:00
|
|
|
func (tb *table) startRetentionWatcher() {
|
|
|
|
tb.retentionWatcherWG.Add(1)
|
|
|
|
go func() {
|
|
|
|
tb.retentionWatcher()
|
|
|
|
tb.retentionWatcherWG.Done()
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (tb *table) retentionWatcher() {
|
2024-01-22 16:12:37 +00:00
|
|
|
d := timeutil.AddJitterToDuration(time.Minute)
|
|
|
|
ticker := time.NewTicker(d)
|
2020-02-13 10:55:58 +00:00
|
|
|
defer ticker.Stop()
|
2019-05-22 21:16:55 +00:00
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-tb.stop:
|
|
|
|
return
|
2020-02-13 10:55:58 +00:00
|
|
|
case <-ticker.C:
|
2019-05-22 21:16:55 +00:00
|
|
|
}
|
|
|
|
|
2022-10-23 22:30:50 +00:00
|
|
|
minTimestamp := int64(fasttime.UnixTimestamp()*1000) - tb.s.retentionMsecs
|
2019-05-22 21:16:55 +00:00
|
|
|
var ptwsDrop []*partitionWrapper
|
|
|
|
tb.ptwsLock.Lock()
|
|
|
|
dst := tb.ptws[:0]
|
|
|
|
for _, ptw := range tb.ptws {
|
|
|
|
if ptw.pt.tr.MaxTimestamp < minTimestamp {
|
|
|
|
ptwsDrop = append(ptwsDrop, ptw)
|
|
|
|
} else {
|
|
|
|
dst = append(dst, ptw)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
tb.ptws = dst
|
|
|
|
tb.ptwsLock.Unlock()
|
|
|
|
|
|
|
|
if len(ptwsDrop) == 0 {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2023-02-13 12:27:13 +00:00
|
|
|
// There are partitions to drop. Drop them.
|
2019-05-22 21:16:55 +00:00
|
|
|
|
|
|
|
// Remove table references from partitions, so they will be eventually
|
|
|
|
// closed and dropped after all the pending searches are done.
|
|
|
|
for _, ptw := range ptwsDrop {
|
|
|
|
ptw.scheduleToDrop()
|
|
|
|
ptw.decRef()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-12-15 13:58:27 +00:00
|
|
|
func (tb *table) startFinalDedupWatcher() {
|
|
|
|
tb.finalDedupWatcherWG.Add(1)
|
|
|
|
go func() {
|
|
|
|
tb.finalDedupWatcher()
|
|
|
|
tb.finalDedupWatcherWG.Done()
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (tb *table) finalDedupWatcher() {
|
|
|
|
if !isDedupEnabled() {
|
|
|
|
// Deduplication is disabled.
|
|
|
|
return
|
|
|
|
}
|
|
|
|
f := func() {
|
|
|
|
ptws := tb.GetPartitions(nil)
|
|
|
|
defer tb.PutPartitions(ptws)
|
|
|
|
timestamp := timestampFromTime(time.Now())
|
|
|
|
currentPartitionName := timestampToPartitionName(timestamp)
|
|
|
|
for _, ptw := range ptws {
|
2022-12-20 18:11:38 +00:00
|
|
|
if ptw.pt.name == currentPartitionName || !ptw.pt.isFinalDedupNeeded() {
|
2021-12-15 13:58:27 +00:00
|
|
|
// Do not run final dedup for the current month.
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if err := ptw.pt.runFinalDedup(); err != nil {
|
|
|
|
logger.Errorf("cannot run final dedup for partition %s: %s", ptw.pt.name, err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2024-01-22 16:12:37 +00:00
|
|
|
d := timeutil.AddJitterToDuration(time.Hour)
|
|
|
|
t := time.NewTicker(d)
|
2021-12-15 13:58:27 +00:00
|
|
|
defer t.Stop()
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-tb.stop:
|
|
|
|
return
|
|
|
|
case <-t.C:
|
|
|
|
f()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-22 21:16:55 +00:00
|
|
|
// GetPartitions appends tb's partitions snapshot to dst and returns the result.
|
|
|
|
//
|
|
|
|
// The returned partitions must be passed to PutPartitions
|
|
|
|
// when they no longer needed.
|
|
|
|
func (tb *table) GetPartitions(dst []*partitionWrapper) []*partitionWrapper {
|
|
|
|
tb.ptwsLock.Lock()
|
|
|
|
for _, ptw := range tb.ptws {
|
|
|
|
ptw.incRef()
|
|
|
|
dst = append(dst, ptw)
|
|
|
|
}
|
|
|
|
tb.ptwsLock.Unlock()
|
|
|
|
|
|
|
|
return dst
|
|
|
|
}
|
|
|
|
|
|
|
|
// PutPartitions deregisters ptws obtained via GetPartitions.
|
|
|
|
func (tb *table) PutPartitions(ptws []*partitionWrapper) {
|
|
|
|
for _, ptw := range ptws {
|
|
|
|
ptw.decRef()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-04-15 06:01:20 +00:00
|
|
|
func mustOpenPartitions(smallPartitionsPath, bigPartitionsPath string, s *Storage) []*partition {
|
2019-11-06 17:48:01 +00:00
|
|
|
// Certain partition directories in either `big` or `small` dir may be missing
|
|
|
|
// after restoring from backup. So populate partition names from both dirs.
|
|
|
|
ptNames := make(map[string]bool)
|
2023-04-15 05:08:43 +00:00
|
|
|
mustPopulatePartitionNames(smallPartitionsPath, ptNames)
|
|
|
|
mustPopulatePartitionNames(bigPartitionsPath, ptNames)
|
2019-11-06 17:48:01 +00:00
|
|
|
var pts []*partition
|
|
|
|
for ptName := range ptNames {
|
2023-03-25 18:43:19 +00:00
|
|
|
smallPartsPath := filepath.Join(smallPartitionsPath, ptName)
|
|
|
|
bigPartsPath := filepath.Join(bigPartitionsPath, ptName)
|
2023-04-15 06:01:20 +00:00
|
|
|
pt := mustOpenPartition(smallPartsPath, bigPartsPath, s)
|
2019-11-06 17:48:01 +00:00
|
|
|
pts = append(pts, pt)
|
|
|
|
}
|
2023-04-15 06:01:20 +00:00
|
|
|
return pts
|
2019-11-06 17:48:01 +00:00
|
|
|
}
|
|
|
|
|
2023-04-15 05:08:43 +00:00
|
|
|
func mustPopulatePartitionNames(partitionsPath string, ptNames map[string]bool) {
|
|
|
|
des := fs.MustReadDir(partitionsPath)
|
2023-03-18 04:03:34 +00:00
|
|
|
for _, de := range des {
|
|
|
|
if !fs.IsDirOrSymlink(de) {
|
2019-05-22 21:16:55 +00:00
|
|
|
// Skip non-directories
|
|
|
|
continue
|
|
|
|
}
|
2023-03-18 04:03:34 +00:00
|
|
|
ptName := de.Name()
|
2023-03-25 21:33:54 +00:00
|
|
|
if ptName == snapshotsDirname {
|
2019-11-06 17:48:01 +00:00
|
|
|
// Skip directory with snapshots
|
2019-05-22 21:16:55 +00:00
|
|
|
continue
|
|
|
|
}
|
2019-11-06 17:48:01 +00:00
|
|
|
ptNames[ptName] = true
|
2019-05-22 21:16:55 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
type partitionWrappers struct {
|
|
|
|
a []*partitionWrapper
|
|
|
|
}
|
|
|
|
|
|
|
|
func getPartitionWrappers() *partitionWrappers {
|
|
|
|
v := ptwsPool.Get()
|
|
|
|
if v == nil {
|
|
|
|
return &partitionWrappers{}
|
|
|
|
}
|
|
|
|
return v.(*partitionWrappers)
|
|
|
|
}
|
|
|
|
|
|
|
|
func putPartitionWrappers(ptwsX *partitionWrappers) {
|
|
|
|
ptwsX.a = ptwsX.a[:0]
|
|
|
|
ptwsPool.Put(ptwsX)
|
|
|
|
}
|
|
|
|
|
|
|
|
var ptwsPool sync.Pool
|