mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2024-11-21 14:44:00 +00:00
lib/storage: pre-create timeseries before indexDB rotation (#4652)
* lib/storage: pre-create timeseries before indexDB rotation during an hour before indexDB rotation start creating records at the next indexDB it must improve performance during switch for the next indexDB and remove ingestion issues. Since there is no need for creation new index records for timeseries already ingested into current indexDB https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4563 * lib/storage: further work on indexdb rotation optimization - Document the change at docs/CHAGNELOG.md - Move back various caches from indexDB to Storage. This makes the change less intrusive. The dateMetricIDCache now takes into account indexDB generation, so it stores (date, metricID) entries for both the current and the next indexDB. - Consolidate the code responsible for idbNext pre-filling into prefillNextIndexDB() function. This improves code readability and maintainability a bit. - Rewrite and simplify the code responsible for calculating the next retention timestamp. Add various tests for corner cases of this code. - Remove indexdb pre-filling from RegisterMetricNames() function, since this function is rarely called. It is OK to add indexdb entries on demand in this function. This simplifies the code. Updates https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1401 * docs/CHANGELOG.md: refer to https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4563 --------- Co-authored-by: Aliaksandr Valialkin <valyala@victoriametrics.com>
This commit is contained in:
parent
7750c5a553
commit
30b32583f4
6 changed files with 368 additions and 193 deletions
|
@ -584,6 +584,9 @@ func registerStorageMetrics(strg *storage.Storage) {
|
||||||
metrics.NewGauge(`vm_timeseries_repopulated_total`, func() float64 {
|
metrics.NewGauge(`vm_timeseries_repopulated_total`, func() float64 {
|
||||||
return float64(m().TimeseriesRepopulated)
|
return float64(m().TimeseriesRepopulated)
|
||||||
})
|
})
|
||||||
|
metrics.NewGauge(`vm_timeseries_precreated_total`, func() float64 {
|
||||||
|
return float64(m().TimeseriesPreCreated)
|
||||||
|
})
|
||||||
metrics.NewGauge(`vm_new_timeseries_created_total`, func() float64 {
|
metrics.NewGauge(`vm_new_timeseries_created_total`, func() float64 {
|
||||||
return float64(m().NewTimeseriesCreated)
|
return float64(m().NewTimeseriesCreated)
|
||||||
})
|
})
|
||||||
|
|
|
@ -27,7 +27,8 @@ The following `tip` changes can be tested by building VictoriaMetrics components
|
||||||
* SECURITY: upgrade base docker image (alpine) from 3.18.0 to 3.18.2. See [alpine 3.18.2 release notes](https://alpinelinux.org/posts/Alpine-3.15.9-3.16.6-3.17.4-3.18.2-released.html).
|
* SECURITY: upgrade base docker image (alpine) from 3.18.0 to 3.18.2. See [alpine 3.18.2 release notes](https://alpinelinux.org/posts/Alpine-3.15.9-3.16.6-3.17.4-3.18.2-released.html).
|
||||||
* SECURITY: upgrade Go builder from Go1.20.5 to Go1.20.6. See [the list of issues addressed in Go1.20.6](https://github.com/golang/go/issues?q=milestone%3AGo1.20.6+label%3ACherryPickApproved).
|
* SECURITY: upgrade Go builder from Go1.20.5 to Go1.20.6. See [the list of issues addressed in Go1.20.6](https://github.com/golang/go/issues?q=milestone%3AGo1.20.6+label%3ACherryPickApproved).
|
||||||
|
|
||||||
* FEATURE: reduce memory usage by up to 5x for setups with [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate) and long [retention](https://docs.victoriametrics.com/#retention). See [description for this change](https://github.com/VictoriaMetrics/VictoriaMetrics/commit/7094fa38bc207c7bd7330ea8a834310a310ce5e3) for details.
|
* FEATURE: reduce memory usage by up to 5x for setups with [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate) and long [retention](https://docs.victoriametrics.com/#retention). See [the description for this change](https://github.com/VictoriaMetrics/VictoriaMetrics/commit/7094fa38bc207c7bd7330ea8a834310a310ce5e3) and [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4563) for details.
|
||||||
|
* FEATURE: reduce spikes in CPU and disk IO usage during `indexdb` rotation (aka inverted index), which is performed once per [`-retentionPeriod`](https://docs.victoriametrics.com/#retention). The new algorithm gradually pre-populates newly created `indexdb` during the last hour before the rotation. The number of pre-populated series in the newly created `indexdb` can be [monitored](https://docs.victoriametrics.com/#monitoring) via `vm_timeseries_precreated_total` metric. This should resolve [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1401).
|
||||||
* FEATURE: [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html): allow selecting time series matching at least one of multiple `or` filters. For example, `{env="prod",job="a" or env="dev",job="b"}` selects series with either `{env="prod",job="a"}` or `{env="dev",job="b"}` labels. This functionality allows passing the selected series to [rollup functions](https://docs.victoriametrics.com/MetricsQL.html#rollup-functions) without the need to use [subqueries](https://docs.victoriametrics.com/MetricsQL.html#subqueries). See [these docs](https://docs.victoriametrics.com/keyConcepts.html#filtering-by-multiple-or-filters).
|
* FEATURE: [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html): allow selecting time series matching at least one of multiple `or` filters. For example, `{env="prod",job="a" or env="dev",job="b"}` selects series with either `{env="prod",job="a"}` or `{env="dev",job="b"}` labels. This functionality allows passing the selected series to [rollup functions](https://docs.victoriametrics.com/MetricsQL.html#rollup-functions) without the need to use [subqueries](https://docs.victoriametrics.com/MetricsQL.html#subqueries). See [these docs](https://docs.victoriametrics.com/keyConcepts.html#filtering-by-multiple-or-filters).
|
||||||
* FEATURE: [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html): add ability to preserve metric names for binary operation results via `keep_metric_names` modifier. For example, `({__name__=~"foo|bar"} / 10) keep_metric_names` leaves `foo` and `bar` metric names in division results. See [these docs](https://docs.victoriametrics.com/MetricsQL.html#keep_metric_names). This helps to address issues like [this one](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3710).
|
* FEATURE: [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html): add ability to preserve metric names for binary operation results via `keep_metric_names` modifier. For example, `({__name__=~"foo|bar"} / 10) keep_metric_names` leaves `foo` and `bar` metric names in division results. See [these docs](https://docs.victoriametrics.com/MetricsQL.html#keep_metric_names). This helps to address issues like [this one](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3710).
|
||||||
* FEATURE: [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html): add ability to copy all the labels from `one` side of [many-to-one operations](https://prometheus.io/docs/prometheus/latest/querying/operators/#many-to-one-and-one-to-many-vector-matches) by specifying `*` inside `group_left()` or `group_right()`. Also allow adding a prefix for copied label names via `group_left(*) prefix "..."` syntax. For example, the following query copies Kubernetes namespace labels to `kube_pod_info` series and adds `ns_` prefix for the copied label names: `kube_pod_info * on(namespace) group_left(*) prefix "ns_" kube_namespace_labels`. The labels from `on()` list aren't prefixed. This feature resolves [this](https://stackoverflow.com/questions/76661818/how-to-add-namespace-labels-to-pod-labels-in-prometheus) and [that](https://stackoverflow.com/questions/76653997/how-can-i-make-a-new-copy-of-kube-namespace-labels-metric-with-a-different-name) questions at StackOverflow.
|
* FEATURE: [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html): add ability to copy all the labels from `one` side of [many-to-one operations](https://prometheus.io/docs/prometheus/latest/querying/operators/#many-to-one-and-one-to-many-vector-matches) by specifying `*` inside `group_left()` or `group_right()`. Also allow adding a prefix for copied label names via `group_left(*) prefix "..."` syntax. For example, the following query copies Kubernetes namespace labels to `kube_pod_info` series and adds `ns_` prefix for the copied label names: `kube_pod_info * on(namespace) group_left(*) prefix "ns_" kube_namespace_labels`. The labels from `on()` list aren't prefixed. This feature resolves [this](https://stackoverflow.com/questions/76661818/how-to-add-namespace-labels-to-pod-labels-in-prometheus) and [that](https://stackoverflow.com/questions/76653997/how-can-i-make-a-new-copy-of-kube-namespace-labels-metric-with-a-different-name) questions at StackOverflow.
|
||||||
|
|
|
@ -96,9 +96,6 @@ type indexDB struct {
|
||||||
// and is used for syncing items from different indexDBs
|
// and is used for syncing items from different indexDBs
|
||||||
generation uint64
|
generation uint64
|
||||||
|
|
||||||
// The unix timestamp in seconds for the indexDB rotation.
|
|
||||||
rotationTimestamp uint64
|
|
||||||
|
|
||||||
name string
|
name string
|
||||||
tb *mergeset.Table
|
tb *mergeset.Table
|
||||||
|
|
||||||
|
@ -136,10 +133,7 @@ func getTagFiltersCacheSize() int {
|
||||||
//
|
//
|
||||||
// The last segment of the path should contain unique hex value which
|
// The last segment of the path should contain unique hex value which
|
||||||
// will be then used as indexDB.generation
|
// will be then used as indexDB.generation
|
||||||
//
|
func mustOpenIndexDB(path string, s *Storage, isReadOnly *uint32) *indexDB {
|
||||||
// The rotationTimestamp must be set to the current unix timestamp when mustOpenIndexDB
|
|
||||||
// is called when creating new indexdb during indexdb rotation.
|
|
||||||
func mustOpenIndexDB(path string, s *Storage, rotationTimestamp uint64, isReadOnly *uint32) *indexDB {
|
|
||||||
if s == nil {
|
if s == nil {
|
||||||
logger.Panicf("BUG: Storage must be nin-nil")
|
logger.Panicf("BUG: Storage must be nin-nil")
|
||||||
}
|
}
|
||||||
|
@ -157,11 +151,10 @@ func mustOpenIndexDB(path string, s *Storage, rotationTimestamp uint64, isReadOn
|
||||||
tagFiltersCacheSize := getTagFiltersCacheSize()
|
tagFiltersCacheSize := getTagFiltersCacheSize()
|
||||||
|
|
||||||
db := &indexDB{
|
db := &indexDB{
|
||||||
refCount: 1,
|
refCount: 1,
|
||||||
generation: gen,
|
generation: gen,
|
||||||
rotationTimestamp: rotationTimestamp,
|
tb: tb,
|
||||||
tb: tb,
|
name: name,
|
||||||
name: name,
|
|
||||||
|
|
||||||
tagFiltersToMetricIDsCache: workingsetcache.New(tagFiltersCacheSize),
|
tagFiltersToMetricIDsCache: workingsetcache.New(tagFiltersCacheSize),
|
||||||
s: s,
|
s: s,
|
||||||
|
|
|
@ -511,7 +511,7 @@ func TestIndexDBOpenClose(t *testing.T) {
|
||||||
tableName := nextIndexDBTableName()
|
tableName := nextIndexDBTableName()
|
||||||
for i := 0; i < 5; i++ {
|
for i := 0; i < 5; i++ {
|
||||||
var isReadOnly uint32
|
var isReadOnly uint32
|
||||||
db := mustOpenIndexDB(tableName, &s, 0, &isReadOnly)
|
db := mustOpenIndexDB(tableName, &s, &isReadOnly)
|
||||||
db.MustClose()
|
db.MustClose()
|
||||||
}
|
}
|
||||||
if err := os.RemoveAll(tableName); err != nil {
|
if err := os.RemoveAll(tableName); err != nil {
|
||||||
|
|
|
@ -45,6 +45,7 @@ type Storage struct {
|
||||||
tooBigTimestampRows uint64
|
tooBigTimestampRows uint64
|
||||||
|
|
||||||
timeseriesRepopulated uint64
|
timeseriesRepopulated uint64
|
||||||
|
timeseriesPreCreated uint64
|
||||||
newTimeseriesCreated uint64
|
newTimeseriesCreated uint64
|
||||||
slowRowInserts uint64
|
slowRowInserts uint64
|
||||||
slowPerDayIndexInserts uint64
|
slowPerDayIndexInserts uint64
|
||||||
|
@ -53,6 +54,13 @@ type Storage struct {
|
||||||
hourlySeriesLimitRowsDropped uint64
|
hourlySeriesLimitRowsDropped uint64
|
||||||
dailySeriesLimitRowsDropped uint64
|
dailySeriesLimitRowsDropped uint64
|
||||||
|
|
||||||
|
// nextRotationTimestamp is a timestamp in seconds of the next indexdb rotation.
|
||||||
|
//
|
||||||
|
// It is used for gradual pre-population of the idbNext during the last hour before the indexdb rotation.
|
||||||
|
// in order to reduce spikes in CPU and disk IO usage just after the rotiation.
|
||||||
|
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1401
|
||||||
|
nextRotationTimestamp int64
|
||||||
|
|
||||||
path string
|
path string
|
||||||
cachePath string
|
cachePath string
|
||||||
retentionMsecs int64
|
retentionMsecs int64
|
||||||
|
@ -60,8 +68,17 @@ type Storage struct {
|
||||||
// lock file for exclusive access to the storage on the given path.
|
// lock file for exclusive access to the storage on the given path.
|
||||||
flockF *os.File
|
flockF *os.File
|
||||||
|
|
||||||
|
// idbCurr contains the currently used indexdb.
|
||||||
idbCurr atomic.Pointer[indexDB]
|
idbCurr atomic.Pointer[indexDB]
|
||||||
|
|
||||||
|
// idbNext is the next indexdb, which will become idbCurr at the next rotation.
|
||||||
|
//
|
||||||
|
// It is started to be gradually pre-populated with the data for active time series during the last hour
|
||||||
|
// before nextRotationTimestamp.
|
||||||
|
// This reduces spikes in CPU and disk IO usage just after the rotiation.
|
||||||
|
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1401
|
||||||
|
idbNext atomic.Pointer[indexDB]
|
||||||
|
|
||||||
tb *table
|
tb *table
|
||||||
|
|
||||||
// Series cardinality limiters.
|
// Series cardinality limiters.
|
||||||
|
@ -77,7 +94,8 @@ type Storage struct {
|
||||||
// metricNameCache is MetricID -> MetricName cache.
|
// metricNameCache is MetricID -> MetricName cache.
|
||||||
metricNameCache *workingsetcache.Cache
|
metricNameCache *workingsetcache.Cache
|
||||||
|
|
||||||
// dateMetricIDCache is (Date, MetricID) cache.
|
// dateMetricIDCache is (generation, Date, MetricID) cache, where generation is the indexdb generation.
|
||||||
|
// See generationTSID for details.
|
||||||
dateMetricIDCache *dateMetricIDCache
|
dateMetricIDCache *dateMetricIDCache
|
||||||
|
|
||||||
// Fast cache for MetricID values occurred during the current hour.
|
// Fast cache for MetricID values occurred during the current hour.
|
||||||
|
@ -213,9 +231,6 @@ func MustOpenStorage(path string, retentionMsecs int64, maxHourlySeries, maxDail
|
||||||
s.currHourMetricIDs.Store(hmCurr)
|
s.currHourMetricIDs.Store(hmCurr)
|
||||||
s.prevHourMetricIDs.Store(hmPrev)
|
s.prevHourMetricIDs.Store(hmPrev)
|
||||||
|
|
||||||
date := fasttime.UnixDate()
|
|
||||||
nextDayMetricIDs := s.mustLoadNextDayMetricIDs(date)
|
|
||||||
s.nextDayMetricIDs.Store(nextDayMetricIDs)
|
|
||||||
s.pendingNextDayMetricIDs = &uint64set.Set{}
|
s.pendingNextDayMetricIDs = &uint64set.Set{}
|
||||||
|
|
||||||
s.prefetchedMetricIDs.Store(&uint64set.Set{})
|
s.prefetchedMetricIDs.Store(&uint64set.Set{})
|
||||||
|
@ -231,9 +246,23 @@ func MustOpenStorage(path string, retentionMsecs int64, maxHourlySeries, maxDail
|
||||||
idbSnapshotsPath := filepath.Join(idbPath, snapshotsDirname)
|
idbSnapshotsPath := filepath.Join(idbPath, snapshotsDirname)
|
||||||
fs.MustMkdirIfNotExist(idbSnapshotsPath)
|
fs.MustMkdirIfNotExist(idbSnapshotsPath)
|
||||||
fs.MustRemoveTemporaryDirs(idbSnapshotsPath)
|
fs.MustRemoveTemporaryDirs(idbSnapshotsPath)
|
||||||
idbCurr, idbPrev := s.mustOpenIndexDBTables(idbPath)
|
idbNext, idbCurr, idbPrev := s.mustOpenIndexDBTables(idbPath)
|
||||||
|
|
||||||
idbCurr.SetExtDB(idbPrev)
|
idbCurr.SetExtDB(idbPrev)
|
||||||
|
idbNext.SetExtDB(idbCurr)
|
||||||
|
|
||||||
s.idbCurr.Store(idbCurr)
|
s.idbCurr.Store(idbCurr)
|
||||||
|
s.idbNext.Store(idbNext)
|
||||||
|
|
||||||
|
// Initialize nextRotationTimestamp
|
||||||
|
nowSecs := time.Now().UnixNano() / 1e9
|
||||||
|
nextRotationTimestamp := nextRetentionDeadlineSeconds(nowSecs, retentionMsecs/1000, retentionTimezoneOffsetSecs)
|
||||||
|
atomic.StoreInt64(&s.nextRotationTimestamp, nextRotationTimestamp)
|
||||||
|
|
||||||
|
// Load nextDayMetricIDs cache
|
||||||
|
date := fasttime.UnixDate()
|
||||||
|
nextDayMetricIDs := s.mustLoadNextDayMetricIDs(idbCurr.generation, date)
|
||||||
|
s.nextDayMetricIDs.Store(nextDayMetricIDs)
|
||||||
|
|
||||||
// Load deleted metricIDs from idbCurr and idbPrev
|
// Load deleted metricIDs from idbCurr and idbPrev
|
||||||
dmisCurr, err := idbCurr.loadDeletedMetricIDs()
|
dmisCurr, err := idbCurr.loadDeletedMetricIDs()
|
||||||
|
@ -461,6 +490,7 @@ type Metrics struct {
|
||||||
TooBigTimestampRows uint64
|
TooBigTimestampRows uint64
|
||||||
|
|
||||||
TimeseriesRepopulated uint64
|
TimeseriesRepopulated uint64
|
||||||
|
TimeseriesPreCreated uint64
|
||||||
NewTimeseriesCreated uint64
|
NewTimeseriesCreated uint64
|
||||||
SlowRowInserts uint64
|
SlowRowInserts uint64
|
||||||
SlowPerDayIndexInserts uint64
|
SlowPerDayIndexInserts uint64
|
||||||
|
@ -532,6 +562,7 @@ func (s *Storage) UpdateMetrics(m *Metrics) {
|
||||||
m.TooBigTimestampRows += atomic.LoadUint64(&s.tooBigTimestampRows)
|
m.TooBigTimestampRows += atomic.LoadUint64(&s.tooBigTimestampRows)
|
||||||
|
|
||||||
m.TimeseriesRepopulated += atomic.LoadUint64(&s.timeseriesRepopulated)
|
m.TimeseriesRepopulated += atomic.LoadUint64(&s.timeseriesRepopulated)
|
||||||
|
m.TimeseriesPreCreated += atomic.LoadUint64(&s.timeseriesPreCreated)
|
||||||
m.NewTimeseriesCreated += atomic.LoadUint64(&s.newTimeseriesCreated)
|
m.NewTimeseriesCreated += atomic.LoadUint64(&s.newTimeseriesCreated)
|
||||||
m.SlowRowInserts += atomic.LoadUint64(&s.slowRowInserts)
|
m.SlowRowInserts += atomic.LoadUint64(&s.slowRowInserts)
|
||||||
m.SlowPerDayIndexInserts += atomic.LoadUint64(&s.slowPerDayIndexInserts)
|
m.SlowPerDayIndexInserts += atomic.LoadUint64(&s.slowPerDayIndexInserts)
|
||||||
|
@ -602,12 +633,20 @@ func (s *Storage) UpdateMetrics(m *Metrics) {
|
||||||
m.PrefetchedMetricIDsSize += uint64(prefetchedMetricIDs.Len())
|
m.PrefetchedMetricIDsSize += uint64(prefetchedMetricIDs.Len())
|
||||||
m.PrefetchedMetricIDsSizeBytes += uint64(prefetchedMetricIDs.SizeBytes())
|
m.PrefetchedMetricIDsSizeBytes += uint64(prefetchedMetricIDs.SizeBytes())
|
||||||
|
|
||||||
m.NextRetentionSeconds = uint64(nextRetentionDuration(s.retentionMsecs).Seconds())
|
d := s.nextRetentionSeconds()
|
||||||
|
if d < 0 {
|
||||||
|
d = 0
|
||||||
|
}
|
||||||
|
m.NextRetentionSeconds = uint64(d)
|
||||||
|
|
||||||
s.idb().UpdateMetrics(&m.IndexDBMetrics)
|
s.idb().UpdateMetrics(&m.IndexDBMetrics)
|
||||||
s.tb.UpdateMetrics(&m.TableMetrics)
|
s.tb.UpdateMetrics(&m.TableMetrics)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *Storage) nextRetentionSeconds() int64 {
|
||||||
|
return atomic.LoadInt64(&s.nextRotationTimestamp) - int64(fasttime.UnixTimestamp())
|
||||||
|
}
|
||||||
|
|
||||||
// SetFreeDiskSpaceLimit sets the minimum free disk space size of current storage path
|
// SetFreeDiskSpaceLimit sets the minimum free disk space size of current storage path
|
||||||
//
|
//
|
||||||
// The function must be called before opening or creating any storage.
|
// The function must be called before opening or creating any storage.
|
||||||
|
@ -664,11 +703,11 @@ func (s *Storage) startRetentionWatcher() {
|
||||||
|
|
||||||
func (s *Storage) retentionWatcher() {
|
func (s *Storage) retentionWatcher() {
|
||||||
for {
|
for {
|
||||||
d := nextRetentionDuration(s.retentionMsecs)
|
d := s.nextRetentionSeconds()
|
||||||
select {
|
select {
|
||||||
case <-s.stop:
|
case <-s.stop:
|
||||||
return
|
return
|
||||||
case <-time.After(d):
|
case <-time.After(time.Second * time.Duration(d)):
|
||||||
s.mustRotateIndexDB()
|
s.mustRotateIndexDB()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -727,23 +766,29 @@ func (s *Storage) nextDayMetricIDsUpdater() {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Storage) mustRotateIndexDB() {
|
func (s *Storage) mustRotateIndexDB() {
|
||||||
// Create new indexdb table.
|
// Create new indexdb table, which will be used as idbNext
|
||||||
newTableName := nextIndexDBTableName()
|
newTableName := nextIndexDBTableName()
|
||||||
idbNewPath := filepath.Join(s.path, indexdbDirname, newTableName)
|
idbNewPath := filepath.Join(s.path, indexdbDirname, newTableName)
|
||||||
rotationTimestamp := fasttime.UnixTimestamp()
|
idbNew := mustOpenIndexDB(idbNewPath, s, &s.isReadOnly)
|
||||||
idbNew := mustOpenIndexDB(idbNewPath, s, rotationTimestamp, &s.isReadOnly)
|
|
||||||
|
|
||||||
// Drop extDB
|
// Update nextRotationTimestamp
|
||||||
|
atomic.AddInt64(&s.nextRotationTimestamp, s.retentionMsecs/1000)
|
||||||
|
|
||||||
|
// Set idbNext to idbNew
|
||||||
|
idbNext := s.idbNext.Load()
|
||||||
|
idbNew.SetExtDB(idbNext)
|
||||||
|
s.idbNext.Store(idbNew)
|
||||||
|
|
||||||
|
// Set idbCurr to idbNext
|
||||||
idbCurr := s.idb()
|
idbCurr := s.idb()
|
||||||
|
s.idbCurr.Store(idbNext)
|
||||||
|
|
||||||
|
// Schedule data removal for idbPrev
|
||||||
idbCurr.doExtDB(func(extDB *indexDB) {
|
idbCurr.doExtDB(func(extDB *indexDB) {
|
||||||
extDB.scheduleToDrop()
|
extDB.scheduleToDrop()
|
||||||
})
|
})
|
||||||
idbCurr.SetExtDB(nil)
|
idbCurr.SetExtDB(nil)
|
||||||
|
|
||||||
// Start using idbNew
|
|
||||||
idbNew.SetExtDB(idbCurr)
|
|
||||||
s.idbCurr.Store(idbNew)
|
|
||||||
|
|
||||||
// Persist changes on the file system.
|
// Persist changes on the file system.
|
||||||
fs.MustSyncPath(s.path)
|
fs.MustSyncPath(s.path)
|
||||||
|
|
||||||
|
@ -753,7 +798,7 @@ func (s *Storage) mustRotateIndexDB() {
|
||||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1401
|
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1401
|
||||||
|
|
||||||
// Flush metric id caches for the current and the previous hour,
|
// Flush metric id caches for the current and the previous hour,
|
||||||
// since they may contain entries missing in idbNew.
|
// since they may contain entries missing in idbCurr after the rotation.
|
||||||
// This should prevent from missing data in queries when
|
// This should prevent from missing data in queries when
|
||||||
// the following steps are performed for short -retentionPeriod (e.g. 1 day):
|
// the following steps are performed for short -retentionPeriod (e.g. 1 day):
|
||||||
//
|
//
|
||||||
|
@ -763,7 +808,7 @@ func (s *Storage) mustRotateIndexDB() {
|
||||||
// These series are already registered in prevHourMetricIDs, so VM doesn't add per-day entries to the current indexdb.
|
// These series are already registered in prevHourMetricIDs, so VM doesn't add per-day entries to the current indexdb.
|
||||||
// 4. Stop adding new samples for these series just before 5 UTC.
|
// 4. Stop adding new samples for these series just before 5 UTC.
|
||||||
// 5. The next indexdb rotation is performed at 4 UTC next day.
|
// 5. The next indexdb rotation is performed at 4 UTC next day.
|
||||||
// The information about the series from step 3 disappears from indexdb, since the old indexdb from step 1 is deleted,
|
// The information about the series added at step 3 disappears from indexdb, since the old indexdb from step 1 is deleted,
|
||||||
// while the current indexdb doesn't contain information about the series.
|
// while the current indexdb doesn't contain information about the series.
|
||||||
// So queries for the last 24 hours stop returning samples added at step 3.
|
// So queries for the last 24 hours stop returning samples added at step 3.
|
||||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2698
|
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2698
|
||||||
|
@ -773,13 +818,12 @@ func (s *Storage) mustRotateIndexDB() {
|
||||||
s.currHourMetricIDs.Store(&hourMetricIDs{})
|
s.currHourMetricIDs.Store(&hourMetricIDs{})
|
||||||
s.prevHourMetricIDs.Store(&hourMetricIDs{})
|
s.prevHourMetricIDs.Store(&hourMetricIDs{})
|
||||||
|
|
||||||
// Flush dateMetricIDCache, so idbNew can be populated with fresh data.
|
// Do not flush dateMetricIDCache, since it contains entries prefixed with idb generation.
|
||||||
s.dateMetricIDCache.Reset()
|
|
||||||
|
// There is no need in resetting nextDayMetricIDs, since it contains entries prefixed with idb generation.
|
||||||
|
|
||||||
// Do not flush metricIDCache and metricNameCache, since all the metricIDs
|
// Do not flush metricIDCache and metricNameCache, since all the metricIDs
|
||||||
// from prev idb remain valid after the rotation.
|
// from prev idb remain valid after the rotation.
|
||||||
|
|
||||||
// There is no need in resetting nextDayMetricIDs, since it should be automatically reset every day.
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Storage) resetAndSaveTSIDCache() {
|
func (s *Storage) resetAndSaveTSIDCache() {
|
||||||
|
@ -833,11 +877,14 @@ func (s *Storage) MustClose() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Storage) mustLoadNextDayMetricIDs(date uint64) *byDateMetricIDEntry {
|
func (s *Storage) mustLoadNextDayMetricIDs(generation, date uint64) *byDateMetricIDEntry {
|
||||||
e := &byDateMetricIDEntry{
|
e := &byDateMetricIDEntry{
|
||||||
date: date,
|
k: generationDateKey{
|
||||||
|
generation: generation,
|
||||||
|
date: date,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
name := "next_day_metric_ids"
|
name := "next_day_metric_ids_v2"
|
||||||
path := filepath.Join(s.cachePath, name)
|
path := filepath.Join(s.cachePath, name)
|
||||||
if !fs.IsPathExist(path) {
|
if !fs.IsPathExist(path) {
|
||||||
return e
|
return e
|
||||||
|
@ -846,12 +893,17 @@ func (s *Storage) mustLoadNextDayMetricIDs(date uint64) *byDateMetricIDEntry {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Panicf("FATAL: cannot read %s: %s", path, err)
|
logger.Panicf("FATAL: cannot read %s: %s", path, err)
|
||||||
}
|
}
|
||||||
if len(src) < 16 {
|
if len(src) < 24 {
|
||||||
logger.Errorf("discarding %s, since it has broken header; got %d bytes; want %d bytes", path, len(src), 16)
|
logger.Errorf("discarding %s, since it has broken header; got %d bytes; want %d bytes", path, len(src), 24)
|
||||||
return e
|
return e
|
||||||
}
|
}
|
||||||
|
|
||||||
// Unmarshal header
|
// Unmarshal header
|
||||||
|
generationLoaded := encoding.UnmarshalUint64(src)
|
||||||
|
src = src[8:]
|
||||||
|
if generationLoaded != generation {
|
||||||
|
logger.Infof("discarding %s, since it contains data for stale generation; got %d; want %d", path, generationLoaded, generation)
|
||||||
|
}
|
||||||
dateLoaded := encoding.UnmarshalUint64(src)
|
dateLoaded := encoding.UnmarshalUint64(src)
|
||||||
src = src[8:]
|
src = src[8:]
|
||||||
if dateLoaded != date {
|
if dateLoaded != date {
|
||||||
|
@ -948,12 +1000,13 @@ func (s *Storage) mustLoadHourMetricIDs(hour uint64, name string) *hourMetricIDs
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Storage) mustSaveNextDayMetricIDs(e *byDateMetricIDEntry) {
|
func (s *Storage) mustSaveNextDayMetricIDs(e *byDateMetricIDEntry) {
|
||||||
name := "next_day_metric_ids"
|
name := "next_day_metric_ids_v2"
|
||||||
path := filepath.Join(s.cachePath, name)
|
path := filepath.Join(s.cachePath, name)
|
||||||
dst := make([]byte, 0, e.v.Len()*8+16)
|
dst := make([]byte, 0, e.v.Len()*8+16)
|
||||||
|
|
||||||
// Marshal header
|
// Marshal header
|
||||||
dst = encoding.MarshalUint64(dst, e.date)
|
dst = encoding.MarshalUint64(dst, e.k.generation)
|
||||||
|
dst = encoding.MarshalUint64(dst, e.k.date)
|
||||||
|
|
||||||
// Marshal e.v
|
// Marshal e.v
|
||||||
dst = marshalUint64Set(dst, &e.v)
|
dst = marshalUint64Set(dst, &e.v)
|
||||||
|
@ -1072,30 +1125,31 @@ var saveCacheLock sync.Mutex
|
||||||
// SetRetentionTimezoneOffset sets the offset, which is used for calculating the time for indexdb rotation.
|
// SetRetentionTimezoneOffset sets the offset, which is used for calculating the time for indexdb rotation.
|
||||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/pull/2574
|
// See https://github.com/VictoriaMetrics/VictoriaMetrics/pull/2574
|
||||||
func SetRetentionTimezoneOffset(offset time.Duration) {
|
func SetRetentionTimezoneOffset(offset time.Duration) {
|
||||||
retentionTimezoneOffsetMsecs = offset.Milliseconds()
|
retentionTimezoneOffsetSecs = int64(offset.Seconds())
|
||||||
}
|
}
|
||||||
|
|
||||||
var retentionTimezoneOffsetMsecs int64
|
var retentionTimezoneOffsetSecs int64
|
||||||
|
|
||||||
func nextRetentionDuration(retentionMsecs int64) time.Duration {
|
func nextRetentionDeadlineSeconds(atSecs, retentionSecs, offsetSecs int64) int64 {
|
||||||
nowMsecs := time.Now().UnixNano() / 1e6
|
// Round retentionSecs to days. This guarantees that per-day inverted index works as expected
|
||||||
return nextRetentionDurationAt(nowMsecs, retentionMsecs)
|
const secsPerDay = 24 * 3600
|
||||||
}
|
retentionSecs = ((retentionSecs + secsPerDay - 1) / secsPerDay) * secsPerDay
|
||||||
|
|
||||||
func nextRetentionDurationAt(atMsecs int64, retentionMsecs int64) time.Duration {
|
// Schedule the deadline to +4 hours from the next retention period start
|
||||||
// Round retentionMsecs to days. This guarantees that per-day inverted index works as expected
|
// because of historical reasons - see https://github.com/VictoriaMetrics/VictoriaMetrics/issues/248
|
||||||
retentionMsecs = ((retentionMsecs + msecPerDay - 1) / msecPerDay) * msecPerDay
|
offsetSecs -= 4 * 3600
|
||||||
|
|
||||||
// The effect of time zone on retention period is moved out.
|
// Make sure that offsetSecs doesn't exceed retentionSecs
|
||||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/pull/2574
|
offsetSecs %= retentionSecs
|
||||||
deadline := ((atMsecs + retentionMsecs + retentionTimezoneOffsetMsecs - 1) / retentionMsecs) * retentionMsecs
|
|
||||||
|
|
||||||
// Schedule the deadline to +4 hours from the next retention period start.
|
// align the retention deadline to multiples of retentionSecs
|
||||||
// This should prevent from possible double deletion of indexdb
|
// This makes the deadline independent of atSecs.
|
||||||
// due to time drift - see https://github.com/VictoriaMetrics/VictoriaMetrics/issues/248 .
|
deadline := ((atSecs + offsetSecs + retentionSecs - 1) / retentionSecs) * retentionSecs
|
||||||
deadline += int64(4 * 3600 * 1000)
|
|
||||||
deadline -= retentionTimezoneOffsetMsecs
|
// Apply the provided offsetSecs
|
||||||
return time.Duration(deadline-atMsecs) * time.Millisecond
|
deadline -= offsetSecs
|
||||||
|
|
||||||
|
return deadline
|
||||||
}
|
}
|
||||||
|
|
||||||
// SearchMetricNames returns marshaled metric names matching the given tfss on the given tr.
|
// SearchMetricNames returns marshaled metric names matching the given tfss on the given tr.
|
||||||
|
@ -1661,6 +1715,7 @@ func (s *Storage) RegisterMetricNames(qt *querytracer.Tracer, mrs []MetricRow) {
|
||||||
var seriesRepopulated uint64
|
var seriesRepopulated uint64
|
||||||
|
|
||||||
idb := s.idb()
|
idb := s.idb()
|
||||||
|
generation := idb.generation
|
||||||
is := idb.getIndexSearch(0, 0, noDeadline)
|
is := idb.getIndexSearch(0, 0, noDeadline)
|
||||||
defer idb.putIndexSearch(is)
|
defer idb.putIndexSearch(is)
|
||||||
var firstWarn error
|
var firstWarn error
|
||||||
|
@ -1673,7 +1728,7 @@ func (s *Storage) RegisterMetricNames(qt *querytracer.Tracer, mrs []MetricRow) {
|
||||||
// Skip row, since it exceeds cardinality limit
|
// Skip row, since it exceeds cardinality limit
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if genTSID.generation != idb.generation {
|
if genTSID.generation < generation {
|
||||||
// The found TSID is from the previous indexdb. Create it in the current indexdb.
|
// The found TSID is from the previous indexdb. Create it in the current indexdb.
|
||||||
|
|
||||||
if err := mn.UnmarshalRaw(mr.MetricNameRaw); err != nil {
|
if err := mn.UnmarshalRaw(mr.MetricNameRaw); err != nil {
|
||||||
|
@ -1688,7 +1743,7 @@ func (s *Storage) RegisterMetricNames(qt *querytracer.Tracer, mrs []MetricRow) {
|
||||||
mn.sortTags()
|
mn.sortTags()
|
||||||
|
|
||||||
createAllIndexesForMetricName(is, mn, &genTSID.TSID, date)
|
createAllIndexesForMetricName(is, mn, &genTSID.TSID, date)
|
||||||
genTSID.generation = idb.generation
|
genTSID.generation = generation
|
||||||
s.putSeriesToCache(mr.MetricNameRaw, &genTSID, date)
|
s.putSeriesToCache(mr.MetricNameRaw, &genTSID, date)
|
||||||
seriesRepopulated++
|
seriesRepopulated++
|
||||||
}
|
}
|
||||||
|
@ -1718,10 +1773,10 @@ func (s *Storage) RegisterMetricNames(qt *querytracer.Tracer, mrs []MetricRow) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if genTSID.generation != idb.generation {
|
if genTSID.generation < generation {
|
||||||
// The found TSID is from the previous indexdb. Create it in the current indexdb.
|
// The found TSID is from the previous indexdb. Create it in the current indexdb.
|
||||||
createAllIndexesForMetricName(is, mn, &genTSID.TSID, date)
|
createAllIndexesForMetricName(is, mn, &genTSID.TSID, date)
|
||||||
genTSID.generation = idb.generation
|
genTSID.generation = generation
|
||||||
seriesRepopulated++
|
seriesRepopulated++
|
||||||
}
|
}
|
||||||
s.putSeriesToCache(mr.MetricNameRaw, &genTSID, date)
|
s.putSeriesToCache(mr.MetricNameRaw, &genTSID, date)
|
||||||
|
@ -1739,12 +1794,15 @@ func (s *Storage) RegisterMetricNames(qt *querytracer.Tracer, mrs []MetricRow) {
|
||||||
// Schedule creating TSID indexes instead of creating them synchronously.
|
// Schedule creating TSID indexes instead of creating them synchronously.
|
||||||
// This should keep stable the ingestion rate when new time series are ingested.
|
// This should keep stable the ingestion rate when new time series are ingested.
|
||||||
createAllIndexesForMetricName(is, mn, &genTSID.TSID, date)
|
createAllIndexesForMetricName(is, mn, &genTSID.TSID, date)
|
||||||
genTSID.generation = idb.generation
|
genTSID.generation = generation
|
||||||
s.putSeriesToCache(mr.MetricNameRaw, &genTSID, date)
|
s.putSeriesToCache(mr.MetricNameRaw, &genTSID, date)
|
||||||
}
|
}
|
||||||
|
|
||||||
atomic.AddUint64(&s.timeseriesRepopulated, seriesRepopulated)
|
atomic.AddUint64(&s.timeseriesRepopulated, seriesRepopulated)
|
||||||
|
|
||||||
|
// There is no need in pre-filling idbNext here, since RegisterMetricNames() is rarely called.
|
||||||
|
// So it is OK to register metric names in blocking manner after indexdb rotation.
|
||||||
|
|
||||||
if firstWarn != nil {
|
if firstWarn != nil {
|
||||||
logger.Warnf("cannot create some metrics: %s", firstWarn)
|
logger.Warnf("cannot create some metrics: %s", firstWarn)
|
||||||
}
|
}
|
||||||
|
@ -1752,6 +1810,7 @@ func (s *Storage) RegisterMetricNames(qt *querytracer.Tracer, mrs []MetricRow) {
|
||||||
|
|
||||||
func (s *Storage) add(rows []rawRow, dstMrs []*MetricRow, mrs []MetricRow, precisionBits uint8) error {
|
func (s *Storage) add(rows []rawRow, dstMrs []*MetricRow, mrs []MetricRow, precisionBits uint8) error {
|
||||||
idb := s.idb()
|
idb := s.idb()
|
||||||
|
generation := idb.generation
|
||||||
is := idb.getIndexSearch(0, 0, noDeadline)
|
is := idb.getIndexSearch(0, 0, noDeadline)
|
||||||
defer idb.putIndexSearch(is)
|
defer idb.putIndexSearch(is)
|
||||||
|
|
||||||
|
@ -1835,7 +1894,7 @@ func (s *Storage) add(rows []rawRow, dstMrs []*MetricRow, mrs []MetricRow, preci
|
||||||
prevTSID = r.TSID
|
prevTSID = r.TSID
|
||||||
prevMetricNameRaw = mr.MetricNameRaw
|
prevMetricNameRaw = mr.MetricNameRaw
|
||||||
|
|
||||||
if genTSID.generation != idb.generation {
|
if genTSID.generation < generation {
|
||||||
// The found TSID is from the previous indexdb. Create it in the current indexdb.
|
// The found TSID is from the previous indexdb. Create it in the current indexdb.
|
||||||
date := uint64(r.Timestamp) / msecPerDay
|
date := uint64(r.Timestamp) / msecPerDay
|
||||||
|
|
||||||
|
@ -1849,7 +1908,7 @@ func (s *Storage) add(rows []rawRow, dstMrs []*MetricRow, mrs []MetricRow, preci
|
||||||
mn.sortTags()
|
mn.sortTags()
|
||||||
|
|
||||||
createAllIndexesForMetricName(is, mn, &genTSID.TSID, date)
|
createAllIndexesForMetricName(is, mn, &genTSID.TSID, date)
|
||||||
genTSID.generation = idb.generation
|
genTSID.generation = generation
|
||||||
s.putSeriesToCache(mr.MetricNameRaw, &genTSID, date)
|
s.putSeriesToCache(mr.MetricNameRaw, &genTSID, date)
|
||||||
seriesRepopulated++
|
seriesRepopulated++
|
||||||
slowInsertsCount++
|
slowInsertsCount++
|
||||||
|
@ -1883,10 +1942,10 @@ func (s *Storage) add(rows []rawRow, dstMrs []*MetricRow, mrs []MetricRow, preci
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if genTSID.generation != idb.generation {
|
if genTSID.generation < generation {
|
||||||
// The found TSID is from the previous indexdb. Create it in the current indexdb.
|
// The found TSID is from the previous indexdb. Create it in the current indexdb.
|
||||||
createAllIndexesForMetricName(is, mn, &genTSID.TSID, date)
|
createAllIndexesForMetricName(is, mn, &genTSID.TSID, date)
|
||||||
genTSID.generation = idb.generation
|
genTSID.generation = generation
|
||||||
seriesRepopulated++
|
seriesRepopulated++
|
||||||
}
|
}
|
||||||
s.putSeriesToCache(mr.MetricNameRaw, &genTSID, date)
|
s.putSeriesToCache(mr.MetricNameRaw, &genTSID, date)
|
||||||
|
@ -1907,7 +1966,7 @@ func (s *Storage) add(rows []rawRow, dstMrs []*MetricRow, mrs []MetricRow, preci
|
||||||
}
|
}
|
||||||
|
|
||||||
createAllIndexesForMetricName(is, mn, &genTSID.TSID, date)
|
createAllIndexesForMetricName(is, mn, &genTSID.TSID, date)
|
||||||
genTSID.generation = idb.generation
|
genTSID.generation = generation
|
||||||
s.putSeriesToCache(mr.MetricNameRaw, &genTSID, date)
|
s.putSeriesToCache(mr.MetricNameRaw, &genTSID, date)
|
||||||
newSeriesCount++
|
newSeriesCount++
|
||||||
|
|
||||||
|
@ -1924,11 +1983,17 @@ func (s *Storage) add(rows []rawRow, dstMrs []*MetricRow, mrs []MetricRow, preci
|
||||||
atomic.AddUint64(&s.newTimeseriesCreated, newSeriesCount)
|
atomic.AddUint64(&s.newTimeseriesCreated, newSeriesCount)
|
||||||
atomic.AddUint64(&s.timeseriesRepopulated, seriesRepopulated)
|
atomic.AddUint64(&s.timeseriesRepopulated, seriesRepopulated)
|
||||||
|
|
||||||
|
dstMrs = dstMrs[:j]
|
||||||
|
rows = rows[:j]
|
||||||
|
|
||||||
|
if err := s.prefillNextIndexDB(rows, dstMrs); err != nil {
|
||||||
|
if firstWarn == nil {
|
||||||
|
firstWarn = err
|
||||||
|
}
|
||||||
|
}
|
||||||
if firstWarn != nil {
|
if firstWarn != nil {
|
||||||
storageAddRowsLogger.Warnf("warn occurred during rows addition: %s", firstWarn)
|
storageAddRowsLogger.Warnf("warn occurred during rows addition: %s", firstWarn)
|
||||||
}
|
}
|
||||||
dstMrs = dstMrs[:j]
|
|
||||||
rows = rows[:j]
|
|
||||||
|
|
||||||
err := s.updatePerDateData(rows, dstMrs)
|
err := s.updatePerDateData(rows, dstMrs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -1963,9 +2028,9 @@ func (s *Storage) putSeriesToCache(metricNameRaw []byte, genTSID *generationTSID
|
||||||
// so future rows for that TSID are ingested via fast path.
|
// so future rows for that TSID are ingested via fast path.
|
||||||
s.putTSIDToCache(genTSID, metricNameRaw)
|
s.putTSIDToCache(genTSID, metricNameRaw)
|
||||||
|
|
||||||
// Register the (date, metricID) entry in the cache,
|
// Register the (generation, date, metricID) entry in the cache,
|
||||||
// so next time the entry is found there instead of searching for it in the indexdb.
|
// so next time the entry is found there instead of searching for it in the indexdb.
|
||||||
s.dateMetricIDCache.Set(date, genTSID.TSID.MetricID)
|
s.dateMetricIDCache.Set(genTSID.generation, date, genTSID.TSID.MetricID)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Storage) registerSeriesCardinality(metricID uint64, metricNameRaw []byte) bool {
|
func (s *Storage) registerSeriesCardinality(metricID uint64, metricNameRaw []byte) bool {
|
||||||
|
@ -2004,6 +2069,77 @@ func getUserReadableMetricName(metricNameRaw []byte) string {
|
||||||
return mn.String()
|
return mn.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *Storage) prefillNextIndexDB(rows []rawRow, mrs []*MetricRow) error {
|
||||||
|
d := s.nextRetentionSeconds()
|
||||||
|
if d >= 3600 {
|
||||||
|
// Fast path: nothing to pre-fill because it is too early.
|
||||||
|
// The pre-fill is started during the last hour before the indexdb rotation.
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Slower path: less than hour left for the next indexdb rotation.
|
||||||
|
// Pre-populate idbNext with the increasing probability until the rotation.
|
||||||
|
// The probability increases from 0% to 100% proportioinally to d=[3600 .. 0].
|
||||||
|
pMin := float64(d) / 3600
|
||||||
|
|
||||||
|
idbNext := s.idbNext.Load()
|
||||||
|
generation := idbNext.generation
|
||||||
|
isNext := idbNext.getIndexSearch(0, 0, noDeadline)
|
||||||
|
defer idbNext.putIndexSearch(isNext)
|
||||||
|
|
||||||
|
var firstError error
|
||||||
|
var genTSID generationTSID
|
||||||
|
mn := GetMetricName()
|
||||||
|
defer PutMetricName(mn)
|
||||||
|
|
||||||
|
timeseriesPreCreated := uint64(0)
|
||||||
|
for i := range rows {
|
||||||
|
r := &rows[i]
|
||||||
|
p := float64(uint32(fastHashUint64(r.TSID.MetricID))) / (1 << 32)
|
||||||
|
if p < pMin {
|
||||||
|
// Fast path: it is too early to pre-fill indexes for the given MetricID.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check whether the given MetricID is already present in dateMetricIDCache.
|
||||||
|
date := uint64(r.Timestamp) / msecPerDay
|
||||||
|
metricID := r.TSID.MetricID
|
||||||
|
if s.dateMetricIDCache.Has(generation, date, metricID) {
|
||||||
|
// Indexes are already pre-filled.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check whether the given (date, metricID) is already present in idbNext.
|
||||||
|
if isNext.hasDateMetricIDNoExtDB(date, metricID, r.TSID.AccountID, r.TSID.ProjectID) {
|
||||||
|
// Indexes are already pre-filled at idbNext.
|
||||||
|
//
|
||||||
|
// Register the (generation, date, metricID) entry in the cache,
|
||||||
|
// so next time the entry is found there instead of searching for it in the indexdb.
|
||||||
|
s.dateMetricIDCache.Set(generation, date, metricID)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Slow path: pre-fill indexes in idbNext.
|
||||||
|
metricNameRaw := mrs[i].MetricNameRaw
|
||||||
|
if err := mn.UnmarshalRaw(metricNameRaw); err != nil {
|
||||||
|
if firstError == nil {
|
||||||
|
firstError = fmt.Errorf("cannot unmarshal MetricNameRaw %q: %w", metricNameRaw, err)
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
mn.sortTags()
|
||||||
|
|
||||||
|
createAllIndexesForMetricName(isNext, mn, &r.TSID, date)
|
||||||
|
genTSID.TSID = r.TSID
|
||||||
|
genTSID.generation = generation
|
||||||
|
s.putSeriesToCache(metricNameRaw, &genTSID, date)
|
||||||
|
timeseriesPreCreated++
|
||||||
|
}
|
||||||
|
atomic.AddUint64(&s.timeseriesPreCreated, timeseriesPreCreated)
|
||||||
|
|
||||||
|
return firstError
|
||||||
|
}
|
||||||
|
|
||||||
func (s *Storage) updatePerDateData(rows []rawRow, mrs []*MetricRow) error {
|
func (s *Storage) updatePerDateData(rows []rawRow, mrs []*MetricRow) error {
|
||||||
var date uint64
|
var date uint64
|
||||||
var hour uint64
|
var hour uint64
|
||||||
|
@ -2014,6 +2150,10 @@ func (s *Storage) updatePerDateData(rows []rawRow, mrs []*MetricRow) error {
|
||||||
prevDate uint64
|
prevDate uint64
|
||||||
prevMetricID uint64
|
prevMetricID uint64
|
||||||
)
|
)
|
||||||
|
|
||||||
|
idb := s.idb()
|
||||||
|
generation := idb.generation
|
||||||
|
|
||||||
hm := s.currHourMetricIDs.Load()
|
hm := s.currHourMetricIDs.Load()
|
||||||
hmPrev := s.prevHourMetricIDs.Load()
|
hmPrev := s.prevHourMetricIDs.Load()
|
||||||
hmPrevDate := hmPrev.hour / 24
|
hmPrevDate := hmPrev.hour / 24
|
||||||
|
@ -2079,8 +2219,8 @@ func (s *Storage) updatePerDateData(rows []rawRow, mrs []*MetricRow) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Slower path: check global cache for (date, metricID) entry.
|
// Slower path: check global cache for (generation, date, metricID) entry.
|
||||||
if s.dateMetricIDCache.Has(date, metricID) {
|
if s.dateMetricIDCache.Has(generation, date, metricID) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// Slow path: store the (date, metricID) entry in the indexDB.
|
// Slow path: store the (date, metricID) entry in the indexDB.
|
||||||
|
@ -2124,7 +2264,6 @@ func (s *Storage) updatePerDateData(rows []rawRow, mrs []*MetricRow) error {
|
||||||
return a.tsid.MetricID < b.tsid.MetricID
|
return a.tsid.MetricID < b.tsid.MetricID
|
||||||
})
|
})
|
||||||
|
|
||||||
idb := s.idb()
|
|
||||||
is := idb.getIndexSearch(0, 0, noDeadline)
|
is := idb.getIndexSearch(0, 0, noDeadline)
|
||||||
defer idb.putIndexSearch(is)
|
defer idb.putIndexSearch(is)
|
||||||
|
|
||||||
|
@ -2154,7 +2293,7 @@ func (s *Storage) updatePerDateData(rows []rawRow, mrs []*MetricRow) error {
|
||||||
}
|
}
|
||||||
PutMetricName(mn)
|
PutMetricName(mn)
|
||||||
// The (date, metricID) entries must be added to cache only after they have been successfully added to indexDB.
|
// The (date, metricID) entries must be added to cache only after they have been successfully added to indexDB.
|
||||||
s.dateMetricIDCache.Store(dateMetricIDsForCache)
|
s.dateMetricIDCache.Store(generation, dateMetricIDsForCache)
|
||||||
return firstError
|
return firstError
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2188,12 +2327,6 @@ func newDateMetricIDCache() *dateMetricIDCache {
|
||||||
return &dmc
|
return &dmc
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dmc *dateMetricIDCache) Reset() {
|
|
||||||
dmc.mu.Lock()
|
|
||||||
dmc.resetLocked()
|
|
||||||
dmc.mu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (dmc *dateMetricIDCache) resetLocked() {
|
func (dmc *dateMetricIDCache) resetLocked() {
|
||||||
// Do not reset syncsCount and resetsCount
|
// Do not reset syncsCount and resetsCount
|
||||||
dmc.byDate.Store(newByDateMetricIDMap())
|
dmc.byDate.Store(newByDateMetricIDMap())
|
||||||
|
@ -2221,9 +2354,9 @@ func (dmc *dateMetricIDCache) SizeBytes() uint64 {
|
||||||
return n
|
return n
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dmc *dateMetricIDCache) Has(date, metricID uint64) bool {
|
func (dmc *dateMetricIDCache) Has(generation, date, metricID uint64) bool {
|
||||||
byDate := dmc.byDate.Load()
|
byDate := dmc.byDate.Load()
|
||||||
v := byDate.get(date)
|
v := byDate.get(generation, date)
|
||||||
if v.Has(metricID) {
|
if v.Has(metricID) {
|
||||||
// Fast path.
|
// Fast path.
|
||||||
// The majority of calls must go here.
|
// The majority of calls must go here.
|
||||||
|
@ -2232,7 +2365,7 @@ func (dmc *dateMetricIDCache) Has(date, metricID uint64) bool {
|
||||||
|
|
||||||
// Slow path. Check mutable map.
|
// Slow path. Check mutable map.
|
||||||
dmc.mu.Lock()
|
dmc.mu.Lock()
|
||||||
v = dmc.byDateMutable.get(date)
|
v = dmc.byDateMutable.get(generation, date)
|
||||||
ok := v.Has(metricID)
|
ok := v.Has(metricID)
|
||||||
dmc.syncLockedIfNeeded()
|
dmc.syncLockedIfNeeded()
|
||||||
dmc.mu.Unlock()
|
dmc.mu.Unlock()
|
||||||
|
@ -2245,7 +2378,7 @@ type dateMetricID struct {
|
||||||
metricID uint64
|
metricID uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dmc *dateMetricIDCache) Store(dmids []dateMetricID) {
|
func (dmc *dateMetricIDCache) Store(generation uint64, dmids []dateMetricID) {
|
||||||
var prevDate uint64
|
var prevDate uint64
|
||||||
metricIDs := make([]uint64, 0, len(dmids))
|
metricIDs := make([]uint64, 0, len(dmids))
|
||||||
dmc.mu.Lock()
|
dmc.mu.Lock()
|
||||||
|
@ -2255,22 +2388,22 @@ func (dmc *dateMetricIDCache) Store(dmids []dateMetricID) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if len(metricIDs) > 0 {
|
if len(metricIDs) > 0 {
|
||||||
v := dmc.byDateMutable.getOrCreate(prevDate)
|
v := dmc.byDateMutable.getOrCreate(generation, prevDate)
|
||||||
v.AddMulti(metricIDs)
|
v.AddMulti(metricIDs)
|
||||||
}
|
}
|
||||||
metricIDs = append(metricIDs[:0], dmid.metricID)
|
metricIDs = append(metricIDs[:0], dmid.metricID)
|
||||||
prevDate = dmid.date
|
prevDate = dmid.date
|
||||||
}
|
}
|
||||||
if len(metricIDs) > 0 {
|
if len(metricIDs) > 0 {
|
||||||
v := dmc.byDateMutable.getOrCreate(prevDate)
|
v := dmc.byDateMutable.getOrCreate(generation, prevDate)
|
||||||
v.AddMulti(metricIDs)
|
v.AddMulti(metricIDs)
|
||||||
}
|
}
|
||||||
dmc.mu.Unlock()
|
dmc.mu.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dmc *dateMetricIDCache) Set(date, metricID uint64) {
|
func (dmc *dateMetricIDCache) Set(generation, date, metricID uint64) {
|
||||||
dmc.mu.Lock()
|
dmc.mu.Lock()
|
||||||
v := dmc.byDateMutable.getOrCreate(date)
|
v := dmc.byDateMutable.getOrCreate(generation, date)
|
||||||
v.Add(metricID)
|
v.Add(metricID)
|
||||||
dmc.mu.Unlock()
|
dmc.mu.Unlock()
|
||||||
}
|
}
|
||||||
|
@ -2288,31 +2421,38 @@ func (dmc *dateMetricIDCache) syncLocked() {
|
||||||
// Nothing to sync.
|
// Nothing to sync.
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Merge data from byDate into byDateMutable and then atomically replace byDate with the merged data.
|
||||||
byDate := dmc.byDate.Load()
|
byDate := dmc.byDate.Load()
|
||||||
byDateMutable := dmc.byDateMutable
|
byDateMutable := dmc.byDateMutable
|
||||||
for date, e := range byDateMutable.m {
|
for k, e := range byDateMutable.m {
|
||||||
v := byDate.get(date)
|
v := byDate.get(k.generation, k.date)
|
||||||
if v == nil {
|
if v == nil {
|
||||||
|
// Nothing to merge
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
v = v.Clone()
|
v = v.Clone()
|
||||||
v.Union(&e.v)
|
v.Union(&e.v)
|
||||||
dme := &byDateMetricIDEntry{
|
dme := &byDateMetricIDEntry{
|
||||||
date: date,
|
k: k,
|
||||||
v: *v,
|
v: *v,
|
||||||
}
|
}
|
||||||
if date == byDateMutable.hotEntry.Load().date {
|
byDateMutable.m[k] = dme
|
||||||
|
he := byDateMutable.hotEntry.Load()
|
||||||
|
if he.k == k {
|
||||||
byDateMutable.hotEntry.Store(dme)
|
byDateMutable.hotEntry.Store(dme)
|
||||||
}
|
}
|
||||||
byDateMutable.m[date] = dme
|
|
||||||
}
|
}
|
||||||
for date, e := range byDate.m {
|
// Copy entries from byDate, which are missing in byDateMutable
|
||||||
v := byDateMutable.get(date)
|
for k, e := range byDate.m {
|
||||||
|
v := byDateMutable.get(k.generation, k.date)
|
||||||
if v != nil {
|
if v != nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
byDateMutable.m[date] = e
|
byDateMutable.m[k] = e
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Atomically replace byDate with byDateMutable
|
||||||
dmc.byDate.Store(dmc.byDateMutable)
|
dmc.byDate.Store(dmc.byDateMutable)
|
||||||
dmc.byDateMutable = newByDateMetricIDMap()
|
dmc.byDateMutable = newByDateMetricIDMap()
|
||||||
|
|
||||||
|
@ -2325,25 +2465,34 @@ func (dmc *dateMetricIDCache) syncLocked() {
|
||||||
|
|
||||||
type byDateMetricIDMap struct {
|
type byDateMetricIDMap struct {
|
||||||
hotEntry atomic.Pointer[byDateMetricIDEntry]
|
hotEntry atomic.Pointer[byDateMetricIDEntry]
|
||||||
m map[uint64]*byDateMetricIDEntry
|
m map[generationDateKey]*byDateMetricIDEntry
|
||||||
|
}
|
||||||
|
|
||||||
|
type generationDateKey struct {
|
||||||
|
generation uint64
|
||||||
|
date uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
func newByDateMetricIDMap() *byDateMetricIDMap {
|
func newByDateMetricIDMap() *byDateMetricIDMap {
|
||||||
dmm := &byDateMetricIDMap{
|
dmm := &byDateMetricIDMap{
|
||||||
m: make(map[uint64]*byDateMetricIDEntry),
|
m: make(map[generationDateKey]*byDateMetricIDEntry),
|
||||||
}
|
}
|
||||||
dmm.hotEntry.Store(&byDateMetricIDEntry{})
|
dmm.hotEntry.Store(&byDateMetricIDEntry{})
|
||||||
return dmm
|
return dmm
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dmm *byDateMetricIDMap) get(date uint64) *uint64set.Set {
|
func (dmm *byDateMetricIDMap) get(generation, date uint64) *uint64set.Set {
|
||||||
hotEntry := dmm.hotEntry.Load()
|
hotEntry := dmm.hotEntry.Load()
|
||||||
if hotEntry.date == date {
|
if hotEntry.k.generation == generation && hotEntry.k.date == date {
|
||||||
// Fast path
|
// Fast path
|
||||||
return &hotEntry.v
|
return &hotEntry.v
|
||||||
}
|
}
|
||||||
// Slow path
|
// Slow path
|
||||||
e := dmm.m[date]
|
k := generationDateKey{
|
||||||
|
generation: generation,
|
||||||
|
date: date,
|
||||||
|
}
|
||||||
|
e := dmm.m[k]
|
||||||
if e == nil {
|
if e == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -2351,36 +2500,41 @@ func (dmm *byDateMetricIDMap) get(date uint64) *uint64set.Set {
|
||||||
return &e.v
|
return &e.v
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dmm *byDateMetricIDMap) getOrCreate(date uint64) *uint64set.Set {
|
func (dmm *byDateMetricIDMap) getOrCreate(generation, date uint64) *uint64set.Set {
|
||||||
v := dmm.get(date)
|
v := dmm.get(generation, date)
|
||||||
if v != nil {
|
if v != nil {
|
||||||
return v
|
return v
|
||||||
}
|
}
|
||||||
e := &byDateMetricIDEntry{
|
k := generationDateKey{
|
||||||
date: date,
|
generation: generation,
|
||||||
|
date: date,
|
||||||
}
|
}
|
||||||
dmm.m[date] = e
|
e := &byDateMetricIDEntry{
|
||||||
|
k: k,
|
||||||
|
}
|
||||||
|
dmm.m[k] = e
|
||||||
return &e.v
|
return &e.v
|
||||||
}
|
}
|
||||||
|
|
||||||
type byDateMetricIDEntry struct {
|
type byDateMetricIDEntry struct {
|
||||||
date uint64
|
k generationDateKey
|
||||||
v uint64set.Set
|
v uint64set.Set
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Storage) updateNextDayMetricIDs(date uint64) {
|
func (s *Storage) updateNextDayMetricIDs(date uint64) {
|
||||||
|
generation := s.idb().generation
|
||||||
e := s.nextDayMetricIDs.Load()
|
e := s.nextDayMetricIDs.Load()
|
||||||
s.pendingNextDayMetricIDsLock.Lock()
|
s.pendingNextDayMetricIDsLock.Lock()
|
||||||
pendingMetricIDs := s.pendingNextDayMetricIDs
|
pendingMetricIDs := s.pendingNextDayMetricIDs
|
||||||
s.pendingNextDayMetricIDs = &uint64set.Set{}
|
s.pendingNextDayMetricIDs = &uint64set.Set{}
|
||||||
s.pendingNextDayMetricIDsLock.Unlock()
|
s.pendingNextDayMetricIDsLock.Unlock()
|
||||||
if pendingMetricIDs.Len() == 0 && e.date == date {
|
if pendingMetricIDs.Len() == 0 && e.k.generation == generation && e.k.date == date {
|
||||||
// Fast path: nothing to update.
|
// Fast path: nothing to update.
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Slow path: union pendingMetricIDs with e.v
|
// Slow path: union pendingMetricIDs with e.v
|
||||||
if e.date == date {
|
if e.k.generation == generation && e.k.date == date {
|
||||||
pendingMetricIDs.Union(&e.v)
|
pendingMetricIDs.Union(&e.v)
|
||||||
} else {
|
} else {
|
||||||
// Do not add pendingMetricIDs from the previous day to the current day,
|
// Do not add pendingMetricIDs from the previous day to the current day,
|
||||||
|
@ -2388,9 +2542,13 @@ func (s *Storage) updateNextDayMetricIDs(date uint64) {
|
||||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3309
|
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3309
|
||||||
pendingMetricIDs = &uint64set.Set{}
|
pendingMetricIDs = &uint64set.Set{}
|
||||||
}
|
}
|
||||||
|
k := generationDateKey{
|
||||||
|
generation: generation,
|
||||||
|
date: date,
|
||||||
|
}
|
||||||
eNew := &byDateMetricIDEntry{
|
eNew := &byDateMetricIDEntry{
|
||||||
date: date,
|
k: k,
|
||||||
v: *pendingMetricIDs,
|
v: *pendingMetricIDs,
|
||||||
}
|
}
|
||||||
s.nextDayMetricIDs.Store(eNew)
|
s.nextDayMetricIDs.Store(eNew)
|
||||||
}
|
}
|
||||||
|
@ -2486,12 +2644,11 @@ func (s *Storage) putTSIDToCache(tsid *generationTSID, metricName []byte) {
|
||||||
s.tsidCache.Set(metricName, buf)
|
s.tsidCache.Set(metricName, buf)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Storage) mustOpenIndexDBTables(path string) (curr, prev *indexDB) {
|
func (s *Storage) mustOpenIndexDBTables(path string) (next, curr, prev *indexDB) {
|
||||||
fs.MustMkdirIfNotExist(path)
|
fs.MustMkdirIfNotExist(path)
|
||||||
fs.MustRemoveTemporaryDirs(path)
|
fs.MustRemoveTemporaryDirs(path)
|
||||||
|
|
||||||
// Search for the two most recent tables - the last one is active,
|
// Search for the three most recent tables - the prev, curr and next.
|
||||||
// the previous one contains backup data.
|
|
||||||
des := fs.MustReadDir(path)
|
des := fs.MustReadDir(path)
|
||||||
var tableNames []string
|
var tableNames []string
|
||||||
for _, de := range des {
|
for _, de := range des {
|
||||||
|
@ -2509,37 +2666,42 @@ func (s *Storage) mustOpenIndexDBTables(path string) (curr, prev *indexDB) {
|
||||||
sort.Slice(tableNames, func(i, j int) bool {
|
sort.Slice(tableNames, func(i, j int) bool {
|
||||||
return tableNames[i] < tableNames[j]
|
return tableNames[i] < tableNames[j]
|
||||||
})
|
})
|
||||||
if len(tableNames) < 2 {
|
switch len(tableNames) {
|
||||||
// Create missing tables
|
case 0:
|
||||||
if len(tableNames) == 0 {
|
prevName := nextIndexDBTableName()
|
||||||
prevName := nextIndexDBTableName()
|
|
||||||
tableNames = append(tableNames, prevName)
|
|
||||||
}
|
|
||||||
currName := nextIndexDBTableName()
|
currName := nextIndexDBTableName()
|
||||||
tableNames = append(tableNames, currName)
|
nextName := nextIndexDBTableName()
|
||||||
|
tableNames = append(tableNames, prevName, currName, nextName)
|
||||||
|
case 1:
|
||||||
|
currName := nextIndexDBTableName()
|
||||||
|
nextName := nextIndexDBTableName()
|
||||||
|
tableNames = append(tableNames, currName, nextName)
|
||||||
|
case 2:
|
||||||
|
nextName := nextIndexDBTableName()
|
||||||
|
tableNames = append(tableNames, nextName)
|
||||||
|
default:
|
||||||
|
// Remove all the tables except the last three tables.
|
||||||
|
for _, tn := range tableNames[:len(tableNames)-3] {
|
||||||
|
pathToRemove := filepath.Join(path, tn)
|
||||||
|
logger.Infof("removing obsolete indexdb dir %q...", pathToRemove)
|
||||||
|
fs.MustRemoveAll(pathToRemove)
|
||||||
|
logger.Infof("removed obsolete indexdb dir %q", pathToRemove)
|
||||||
|
}
|
||||||
|
fs.MustSyncPath(path)
|
||||||
|
|
||||||
|
tableNames = tableNames[len(tableNames)-3:]
|
||||||
}
|
}
|
||||||
|
|
||||||
// Invariant: len(tableNames) >= 2
|
// Open tables
|
||||||
|
nextPath := filepath.Join(path, tableNames[2])
|
||||||
|
currPath := filepath.Join(path, tableNames[1])
|
||||||
|
prevPath := filepath.Join(path, tableNames[0])
|
||||||
|
|
||||||
// Remove all the tables except two last tables.
|
next = mustOpenIndexDB(nextPath, s, &s.isReadOnly)
|
||||||
for _, tn := range tableNames[:len(tableNames)-2] {
|
curr = mustOpenIndexDB(currPath, s, &s.isReadOnly)
|
||||||
pathToRemove := filepath.Join(path, tn)
|
prev = mustOpenIndexDB(prevPath, s, &s.isReadOnly)
|
||||||
logger.Infof("removing obsolete indexdb dir %q...", pathToRemove)
|
|
||||||
fs.MustRemoveAll(pathToRemove)
|
|
||||||
logger.Infof("removed obsolete indexdb dir %q", pathToRemove)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Persist changes on the file system.
|
return next, curr, prev
|
||||||
fs.MustSyncPath(path)
|
|
||||||
|
|
||||||
// Open the last two tables.
|
|
||||||
currPath := filepath.Join(path, tableNames[len(tableNames)-1])
|
|
||||||
|
|
||||||
curr = mustOpenIndexDB(currPath, s, 0, &s.isReadOnly)
|
|
||||||
prevPath := filepath.Join(path, tableNames[len(tableNames)-2])
|
|
||||||
prev = mustOpenIndexDB(prevPath, s, 0, &s.isReadOnly)
|
|
||||||
|
|
||||||
return curr, prev
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var indexDBTableNameRegexp = regexp.MustCompile("^[0-9A-F]{16}$")
|
var indexDBTableNameRegexp = regexp.MustCompile("^[0-9A-F]{16}$")
|
||||||
|
|
|
@ -90,23 +90,25 @@ func TestDateMetricIDCacheConcurrent(t *testing.T) {
|
||||||
|
|
||||||
func testDateMetricIDCache(c *dateMetricIDCache, concurrent bool) error {
|
func testDateMetricIDCache(c *dateMetricIDCache, concurrent bool) error {
|
||||||
type dmk struct {
|
type dmk struct {
|
||||||
date uint64
|
generation uint64
|
||||||
metricID uint64
|
date uint64
|
||||||
|
metricID uint64
|
||||||
}
|
}
|
||||||
m := make(map[dmk]bool)
|
m := make(map[dmk]bool)
|
||||||
for i := 0; i < 1e5; i++ {
|
for i := 0; i < 1e5; i++ {
|
||||||
|
generation := uint64(i) % 2
|
||||||
date := uint64(i) % 3
|
date := uint64(i) % 3
|
||||||
metricID := uint64(i) % 1237
|
metricID := uint64(i) % 1237
|
||||||
if !concurrent && c.Has(date, metricID) {
|
if !concurrent && c.Has(generation, date, metricID) {
|
||||||
if !m[dmk{date, metricID}] {
|
if !m[dmk{generation, date, metricID}] {
|
||||||
return fmt.Errorf("c.Has(%d, %d) must return false, but returned true", date, metricID)
|
return fmt.Errorf("c.Has(%d, %d, %d) must return false, but returned true", generation, date, metricID)
|
||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
c.Set(date, metricID)
|
c.Set(generation, date, metricID)
|
||||||
m[dmk{date, metricID}] = true
|
m[dmk{generation, date, metricID}] = true
|
||||||
if !concurrent && !c.Has(date, metricID) {
|
if !concurrent && !c.Has(generation, date, metricID) {
|
||||||
return fmt.Errorf("c.Has(%d, %d) must return true, but returned false", date, metricID)
|
return fmt.Errorf("c.Has(%d, %d, %d) must return true, but returned false", generation, date, metricID)
|
||||||
}
|
}
|
||||||
if i%11234 == 0 {
|
if i%11234 == 0 {
|
||||||
c.mu.Lock()
|
c.mu.Lock()
|
||||||
|
@ -114,25 +116,29 @@ func testDateMetricIDCache(c *dateMetricIDCache, concurrent bool) error {
|
||||||
c.mu.Unlock()
|
c.mu.Unlock()
|
||||||
}
|
}
|
||||||
if i%34323 == 0 {
|
if i%34323 == 0 {
|
||||||
c.Reset()
|
c.mu.Lock()
|
||||||
|
c.resetLocked()
|
||||||
|
c.mu.Unlock()
|
||||||
m = make(map[dmk]bool)
|
m = make(map[dmk]bool)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verify fast path after sync.
|
// Verify fast path after sync.
|
||||||
for i := 0; i < 1e5; i++ {
|
for i := 0; i < 1e5; i++ {
|
||||||
|
generation := uint64(i) % 2
|
||||||
date := uint64(i) % 3
|
date := uint64(i) % 3
|
||||||
metricID := uint64(i) % 123
|
metricID := uint64(i) % 123
|
||||||
c.Set(date, metricID)
|
c.Set(generation, date, metricID)
|
||||||
}
|
}
|
||||||
c.mu.Lock()
|
c.mu.Lock()
|
||||||
c.syncLocked()
|
c.syncLocked()
|
||||||
c.mu.Unlock()
|
c.mu.Unlock()
|
||||||
for i := 0; i < 1e5; i++ {
|
for i := 0; i < 1e5; i++ {
|
||||||
|
generation := uint64(i) % 2
|
||||||
date := uint64(i) % 3
|
date := uint64(i) % 3
|
||||||
metricID := uint64(i) % 123
|
metricID := uint64(i) % 123
|
||||||
if !concurrent && !c.Has(date, metricID) {
|
if !concurrent && !c.Has(generation, date, metricID) {
|
||||||
return fmt.Errorf("c.Has(%d, %d) must return true after sync", date, metricID)
|
return fmt.Errorf("c.Has(%d, %d, %d) must return true after sync", generation, date, metricID)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -140,7 +146,9 @@ func testDateMetricIDCache(c *dateMetricIDCache, concurrent bool) error {
|
||||||
if n := c.EntriesCount(); !concurrent && n < 123 {
|
if n := c.EntriesCount(); !concurrent && n < 123 {
|
||||||
return fmt.Errorf("c.EntriesCount must return at least 123; returned %d", n)
|
return fmt.Errorf("c.EntriesCount must return at least 123; returned %d", n)
|
||||||
}
|
}
|
||||||
c.Reset()
|
c.mu.Lock()
|
||||||
|
c.resetLocked()
|
||||||
|
c.mu.Unlock()
|
||||||
if n := c.EntriesCount(); !concurrent && n > 0 {
|
if n := c.EntriesCount(); !concurrent && n > 0 {
|
||||||
return fmt.Errorf("c.EntriesCount must return 0 after reset; returned %d", n)
|
return fmt.Errorf("c.EntriesCount must return 0 after reset; returned %d", n)
|
||||||
}
|
}
|
||||||
|
@ -493,45 +501,53 @@ func TestMetricRowMarshalUnmarshal(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNextRetentionDuration(t *testing.T) {
|
func TestNextRetentionDeadlineSeconds(t *testing.T) {
|
||||||
validateRetention := func(retention int64) {
|
f := func(currentTime string, retention, offset time.Duration, deadlineExpected string) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
validateRetentionAt := func(now time.Time, retention int64) {
|
|
||||||
nowMsecs := now.UnixMilli()
|
now, err := time.Parse(time.RFC3339, currentTime)
|
||||||
d := nextRetentionDurationAt(nowMsecs, retention)
|
if err != nil {
|
||||||
if d <= 0 {
|
t.Fatalf("cannot parse currentTime=%q: %s", currentTime, err)
|
||||||
nextTime := now.Add(d)
|
|
||||||
retentionHuman := time.Duration(retention) * time.Millisecond
|
|
||||||
t.Errorf("unexpected retention duration for retention=%s; got %s(%s); must be %s + %s; offset: %s", retentionHuman, nextTime, d, now, retentionHuman, time.Duration(retentionTimezoneOffsetMsecs)*time.Millisecond)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// UTC offsets are in range [-12 hours, +14 hours].
|
d := nextRetentionDeadlineSeconds(now.Unix(), int64(retention.Seconds()), int64(offset.Seconds()))
|
||||||
// Verify that any legit combination of retention timezone and local time
|
deadline := time.Unix(d, 0).UTC().Format(time.RFC3339)
|
||||||
// will return valid retention duration.
|
if deadline != deadlineExpected {
|
||||||
// See: https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4207
|
t.Fatalf("unexpected deadline; got %s; want %s", deadline, deadlineExpected)
|
||||||
for retentionOffset := -12; retentionOffset <= 14; retentionOffset++ {
|
|
||||||
SetRetentionTimezoneOffset(time.Duration(retentionOffset) * time.Hour)
|
|
||||||
validateRetentionAt(time.Now().UTC(), retention)
|
|
||||||
|
|
||||||
now := time.Date(2023, 4, 27, 23, 58, 0, 0, time.UTC)
|
|
||||||
validateRetentionAt(now, retention)
|
|
||||||
|
|
||||||
now = time.Date(2023, 4, 27, 0, 1, 0, 0, time.UTC)
|
|
||||||
validateRetentionAt(now, retention)
|
|
||||||
|
|
||||||
now = time.Date(2023, 4, 27, 0, 0, 0, 0, time.UTC)
|
|
||||||
validateRetentionAt(now, retention)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for retentionDays := 0.3; retentionDays < 3; retentionDays += 0.3 {
|
f("2023-07-22T12:44:35Z", 24*time.Hour, 0, "2023-07-23T04:00:00Z")
|
||||||
validateRetention(int64(retentionDays * msecPerDay))
|
f("2023-07-22T03:44:35Z", 24*time.Hour, 0, "2023-07-22T04:00:00Z")
|
||||||
}
|
f("2023-07-22T04:44:35Z", 24*time.Hour, 0, "2023-07-23T04:00:00Z")
|
||||||
|
f("2023-07-22T23:44:35Z", 24*time.Hour, 0, "2023-07-23T04:00:00Z")
|
||||||
|
f("2023-07-23T03:59:35Z", 24*time.Hour, 0, "2023-07-23T04:00:00Z")
|
||||||
|
|
||||||
for retentionMonths := float64(0.1); retentionMonths < 120; retentionMonths += 0.3 {
|
f("2023-07-22T12:44:35Z", 24*time.Hour, 2*time.Hour, "2023-07-23T02:00:00Z")
|
||||||
validateRetention(int64(retentionMonths * msecsPerMonth))
|
f("2023-07-22T01:44:35Z", 24*time.Hour, 2*time.Hour, "2023-07-22T02:00:00Z")
|
||||||
}
|
f("2023-07-22T02:44:35Z", 24*time.Hour, 2*time.Hour, "2023-07-23T02:00:00Z")
|
||||||
|
f("2023-07-22T23:44:35Z", 24*time.Hour, 2*time.Hour, "2023-07-23T02:00:00Z")
|
||||||
|
f("2023-07-23T01:59:35Z", 24*time.Hour, 2*time.Hour, "2023-07-23T02:00:00Z")
|
||||||
|
|
||||||
|
f("2023-07-22T12:44:35Z", 24*time.Hour, -5*time.Hour, "2023-07-23T09:00:00Z")
|
||||||
|
f("2023-07-22T08:44:35Z", 24*time.Hour, -5*time.Hour, "2023-07-22T09:00:00Z")
|
||||||
|
f("2023-07-22T09:44:35Z", 24*time.Hour, -5*time.Hour, "2023-07-23T09:00:00Z")
|
||||||
|
|
||||||
|
f("2023-07-22T12:44:35Z", 24*time.Hour, -12*time.Hour, "2023-07-22T16:00:00Z")
|
||||||
|
f("2023-07-22T15:44:35Z", 24*time.Hour, -12*time.Hour, "2023-07-22T16:00:00Z")
|
||||||
|
f("2023-07-22T16:44:35Z", 24*time.Hour, -12*time.Hour, "2023-07-23T16:00:00Z")
|
||||||
|
|
||||||
|
f("2023-07-22T12:44:35Z", 24*time.Hour, -18*time.Hour, "2023-07-22T22:00:00Z")
|
||||||
|
f("2023-07-22T21:44:35Z", 24*time.Hour, -18*time.Hour, "2023-07-22T22:00:00Z")
|
||||||
|
f("2023-07-22T22:44:35Z", 24*time.Hour, -18*time.Hour, "2023-07-23T22:00:00Z")
|
||||||
|
|
||||||
|
f("2023-07-22T12:44:35Z", 24*time.Hour, 18*time.Hour, "2023-07-23T10:00:00Z")
|
||||||
|
f("2023-07-22T09:44:35Z", 24*time.Hour, 18*time.Hour, "2023-07-22T10:00:00Z")
|
||||||
|
f("2023-07-22T10:44:35Z", 24*time.Hour, 18*time.Hour, "2023-07-23T10:00:00Z")
|
||||||
|
|
||||||
|
f("2023-07-22T12:44:35Z", 24*time.Hour, 37*time.Hour, "2023-07-22T15:00:00Z")
|
||||||
|
f("2023-07-22T14:44:35Z", 24*time.Hour, 37*time.Hour, "2023-07-22T15:00:00Z")
|
||||||
|
f("2023-07-22T15:44:35Z", 24*time.Hour, 37*time.Hour, "2023-07-23T15:00:00Z")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestStorageOpenClose(t *testing.T) {
|
func TestStorageOpenClose(t *testing.T) {
|
||||||
|
|
Loading…
Reference in a new issue