Merge branch 'master' into max-labels-per-timeseries

This commit is contained in:
Andrii Chubatiuk 2024-11-16 09:11:24 +02:00
commit f371f57474
No known key found for this signature in database
GPG key ID: 96D776CC99880667
24 changed files with 220 additions and 69 deletions

View file

@ -484,14 +484,16 @@ func tryPush(at *auth.Token, wr *prompbmarshal.WriteRequest, forceDropSamplesOnF
rowsCountAfterRelabel := getRowsCount(tssBlock) rowsCountAfterRelabel := getRowsCount(tssBlock)
rowsDroppedByGlobalRelabel.Add(rowsCountBeforeRelabel - rowsCountAfterRelabel) rowsDroppedByGlobalRelabel.Add(rowsCountBeforeRelabel - rowsCountAfterRelabel)
} }
idxDiff := 0 tssBlockTmp := tssBlock[:0]
for i, ts := range tssBlock { for _, ts := range tssBlock {
if storagelimits.ExceedingLabels(ts.Labels) { if !storagelimits.ExceedingLabels(ts.Labels) {
idxDiff++ tssBlockTmp = append(tssBlockTmp, ts)
} else if idxDiff > 0 {
tss[i-idxDiff] = tss[i]
} }
} }
tssBlock = tssBlockTmp
if len(tssBlock) == 0 {
continue
}
sortLabelsIfNeeded(tssBlock) sortLabelsIfNeeded(tssBlock)
tssBlock = limitSeriesCardinality(tssBlock) tssBlock = limitSeriesCardinality(tssBlock)
if sas.IsEnabled() { if sas.IsEnabled() {

View file

@ -62,9 +62,6 @@ func (ctx *InsertCtx) marshalMetricNameRaw(prefix []byte, labels []prompbmarshal
// WriteDataPoint writes (timestamp, value) with the given prefix and labels into ctx buffer. // WriteDataPoint writes (timestamp, value) with the given prefix and labels into ctx buffer.
func (ctx *InsertCtx) WriteDataPoint(prefix []byte, labels []prompbmarshal.Label, timestamp int64, value float64) error { func (ctx *InsertCtx) WriteDataPoint(prefix []byte, labels []prompbmarshal.Label, timestamp int64, value float64) error {
if storagelimits.ExceedingLabels(labels) {
return nil
}
metricNameRaw := ctx.marshalMetricNameRaw(prefix, labels) metricNameRaw := ctx.marshalMetricNameRaw(prefix, labels)
return ctx.addRow(metricNameRaw, timestamp, value) return ctx.addRow(metricNameRaw, timestamp, value)
} }
@ -73,9 +70,6 @@ func (ctx *InsertCtx) WriteDataPoint(prefix []byte, labels []prompbmarshal.Label
// //
// It returns metricNameRaw for the given labels if len(metricNameRaw) == 0. // It returns metricNameRaw for the given labels if len(metricNameRaw) == 0.
func (ctx *InsertCtx) WriteDataPointExt(metricNameRaw []byte, labels []prompbmarshal.Label, timestamp int64, value float64) ([]byte, error) { func (ctx *InsertCtx) WriteDataPointExt(metricNameRaw []byte, labels []prompbmarshal.Label, timestamp int64, value float64) ([]byte, error) {
if storagelimits.ExceedingLabels(labels) {
return metricNameRaw, nil
}
if len(metricNameRaw) == 0 { if len(metricNameRaw) == 0 {
metricNameRaw = ctx.marshalMetricNameRaw(nil, labels) metricNameRaw = ctx.marshalMetricNameRaw(nil, labels)
} }
@ -144,6 +138,11 @@ func (ctx *InsertCtx) ApplyRelabeling() {
ctx.Labels = ctx.relabelCtx.ApplyRelabeling(ctx.Labels) ctx.Labels = ctx.relabelCtx.ApplyRelabeling(ctx.Labels)
} }
// AreLabelsInvalid checks if labels are passing validation
func (ctx *InsertCtx) AreLabelsInvalid() bool {
return len(ctx.Labels) == 0 || storagelimits.ExceedingLabels(ctx.Labels)
}
// FlushBufs flushes buffered rows to the underlying storage. // FlushBufs flushes buffered rows to the underlying storage.
func (ctx *InsertCtx) FlushBufs() error { func (ctx *InsertCtx) FlushBufs() error {
sas := sasGlobal.Load() sas := sasGlobal.Load()

View file

@ -49,8 +49,8 @@ func insertRows(rows []parser.Row, extraLabels []prompbmarshal.Label) error {
if hasRelabeling { if hasRelabeling {
ctx.ApplyRelabeling() ctx.ApplyRelabeling()
} }
if len(ctx.Labels) == 0 { if ctx.AreLabelsInvalid() {
// Skip metric without labels. // Skip metric with invalid labels.
continue continue
} }
ctx.SortLabelsIfNeeded() ctx.SortLabelsIfNeeded()

View file

@ -63,8 +63,8 @@ func insertRows(sketches []*datadogsketches.Sketch, extraLabels []prompbmarshal.
if hasRelabeling { if hasRelabeling {
ctx.ApplyRelabeling() ctx.ApplyRelabeling()
} }
if len(ctx.Labels) == 0 { if ctx.AreLabelsInvalid() {
// Skip metric without labels. // Skip metric with invalid labels.
continue continue
} }
ctx.SortLabelsIfNeeded() ctx.SortLabelsIfNeeded()

View file

@ -66,8 +66,8 @@ func insertRows(series []datadogv1.Series, extraLabels []prompbmarshal.Label) er
if hasRelabeling { if hasRelabeling {
ctx.ApplyRelabeling() ctx.ApplyRelabeling()
} }
if len(ctx.Labels) == 0 { if ctx.AreLabelsInvalid() {
// Skip metric without labels. // Skip metric with invalid labels.
continue continue
} }
ctx.SortLabelsIfNeeded() ctx.SortLabelsIfNeeded()

View file

@ -69,8 +69,8 @@ func insertRows(series []datadogv2.Series, extraLabels []prompbmarshal.Label) er
if hasRelabeling { if hasRelabeling {
ctx.ApplyRelabeling() ctx.ApplyRelabeling()
} }
if len(ctx.Labels) == 0 { if ctx.AreLabelsInvalid() {
// Skip metric without labels. // Skip metric with invalid labels.
continue continue
} }
ctx.SortLabelsIfNeeded() ctx.SortLabelsIfNeeded()

View file

@ -39,8 +39,8 @@ func insertRows(rows []parser.Row) error {
if hasRelabeling { if hasRelabeling {
ctx.ApplyRelabeling() ctx.ApplyRelabeling()
} }
if len(ctx.Labels) == 0 { if ctx.AreLabelsInvalid() {
// Skip metric without labels. // Skip metric with invalid labels.
continue continue
} }
ctx.SortLabelsIfNeeded() ctx.SortLabelsIfNeeded()

View file

@ -109,8 +109,8 @@ func insertRows(db string, rows []parser.Row, extraLabels []prompbmarshal.Label)
ic.Labels = append(ic.Labels[:0], ctx.originLabels...) ic.Labels = append(ic.Labels[:0], ctx.originLabels...)
ic.AddLabel("", metricGroup) ic.AddLabel("", metricGroup)
ic.ApplyRelabeling() ic.ApplyRelabeling()
if len(ic.Labels) == 0 { if ic.AreLabelsInvalid() {
// Skip metric without labels. // Skip metric with invalid labels.
continue continue
} }
ic.SortLabelsIfNeeded() ic.SortLabelsIfNeeded()
@ -130,8 +130,8 @@ func insertRows(db string, rows []parser.Row, extraLabels []prompbmarshal.Label)
metricGroup := bytesutil.ToUnsafeString(ctx.metricGroupBuf) metricGroup := bytesutil.ToUnsafeString(ctx.metricGroupBuf)
ic.Labels = ic.Labels[:labelsLen] ic.Labels = ic.Labels[:labelsLen]
ic.AddLabel("", metricGroup) ic.AddLabel("", metricGroup)
if len(ic.Labels) == 0 { if ic.AreLabelsInvalid() {
// Skip metric without labels. // Skip metric with invalid labels.
continue continue
} }
if err := ic.WriteDataPoint(ctx.metricNameBuf, ic.Labels[len(ic.Labels)-1:], r.Timestamp, f.Value); err != nil { if err := ic.WriteDataPoint(ctx.metricNameBuf, ic.Labels[len(ic.Labels)-1:], r.Timestamp, f.Value); err != nil {

View file

@ -58,8 +58,8 @@ func insertRows(block *stream.Block, extraLabels []prompbmarshal.Label) error {
if hasRelabeling { if hasRelabeling {
ic.ApplyRelabeling() ic.ApplyRelabeling()
} }
if len(ic.Labels) == 0 { if ic.AreLabelsInvalid() {
// Skip metric without labels. // Skip metric with invalid labels.
return nil return nil
} }
ic.SortLabelsIfNeeded() ic.SortLabelsIfNeeded()

View file

@ -61,8 +61,8 @@ func insertRows(rows []newrelic.Row, extraLabels []prompbmarshal.Label) error {
if hasRelabeling { if hasRelabeling {
ctx.ApplyRelabeling() ctx.ApplyRelabeling()
} }
if len(ctx.Labels) == 0 { if ctx.AreLabelsInvalid() {
// Skip metric without labels. // Skip metric with invalid labels.
continue continue
} }
ctx.SortLabelsIfNeeded() ctx.SortLabelsIfNeeded()

View file

@ -62,8 +62,8 @@ func insertRows(tss []prompbmarshal.TimeSeries, extraLabels []prompbmarshal.Labe
if hasRelabeling { if hasRelabeling {
ctx.ApplyRelabeling() ctx.ApplyRelabeling()
} }
if len(ctx.Labels) == 0 { if ctx.AreLabelsInvalid() {
// Skip metric without labels. // Skip metric with invalid labels.
continue continue
} }
ctx.SortLabelsIfNeeded() ctx.SortLabelsIfNeeded()

View file

@ -39,8 +39,8 @@ func insertRows(rows []parser.Row) error {
if hasRelabeling { if hasRelabeling {
ctx.ApplyRelabeling() ctx.ApplyRelabeling()
} }
if len(ctx.Labels) == 0 { if ctx.AreLabelsInvalid() {
// Skip metric without labels. // Skip metric with invalid labels.
continue continue
} }
ctx.SortLabelsIfNeeded() ctx.SortLabelsIfNeeded()

View file

@ -57,8 +57,8 @@ func insertRows(rows []parser.Row, extraLabels []prompbmarshal.Label) error {
if hasRelabeling { if hasRelabeling {
ctx.ApplyRelabeling() ctx.ApplyRelabeling()
} }
if len(ctx.Labels) == 0 { if ctx.AreLabelsInvalid() {
// Skip metric without labels. // Skip metric with invalid labels.
continue continue
} }
ctx.SortLabelsIfNeeded() ctx.SortLabelsIfNeeded()

View file

@ -57,8 +57,8 @@ func insertRows(rows []parser.Row, extraLabels []prompbmarshal.Label) error {
if hasRelabeling { if hasRelabeling {
ctx.ApplyRelabeling() ctx.ApplyRelabeling()
} }
if len(ctx.Labels) == 0 { if ctx.AreLabelsInvalid() {
// Skip metric without labels. // Skip metric with invalid labels.
continue continue
} }
ctx.SortLabelsIfNeeded() ctx.SortLabelsIfNeeded()

View file

@ -58,8 +58,8 @@ func push(ctx *common.InsertCtx, tss []prompbmarshal.TimeSeries) {
ctx.AddLabel(label.Name, label.Value) ctx.AddLabel(label.Name, label.Value)
} }
ctx.ApplyRelabeling() ctx.ApplyRelabeling()
if len(ctx.Labels) == 0 { if ctx.AreLabelsInvalid() {
// Skip metric without labels. // Skip metric with invalid labels.
continue continue
} }
ctx.SortLabelsIfNeeded() ctx.SortLabelsIfNeeded()

View file

@ -55,8 +55,8 @@ func insertRows(timeseries []prompb.TimeSeries, extraLabels []prompbmarshal.Labe
if hasRelabeling { if hasRelabeling {
ctx.ApplyRelabeling() ctx.ApplyRelabeling()
} }
if len(ctx.Labels) == 0 { if ctx.AreLabelsInvalid() {
// Skip metric without labels. // Skip metric with invalid labels.
continue continue
} }
ctx.SortLabelsIfNeeded() ctx.SortLabelsIfNeeded()

View file

@ -61,8 +61,8 @@ func insertRows(rows []parser.Row, extraLabels []prompbmarshal.Label) error {
if hasRelabeling { if hasRelabeling {
ic.ApplyRelabeling() ic.ApplyRelabeling()
} }
if len(ic.Labels) == 0 { if ic.AreLabelsInvalid() {
// Skip metric without labels. // Skip metric with invalid labels.
continue continue
} }
ic.SortLabelsIfNeeded() ic.SortLabelsIfNeeded()

View file

@ -1,13 +1,13 @@
{ {
"files": { "files": {
"main.css": "./static/css/main.d781989c.css", "main.css": "./static/css/main.d781989c.css",
"main.js": "./static/js/main.7ec4e6eb.js", "main.js": "./static/js/main.a7037969.js",
"static/js/685.f772060c.chunk.js": "./static/js/685.f772060c.chunk.js", "static/js/685.f772060c.chunk.js": "./static/js/685.f772060c.chunk.js",
"static/media/MetricsQL.md": "./static/media/MetricsQL.a00044c91d9781cf8557.md", "static/media/MetricsQL.md": "./static/media/MetricsQL.a00044c91d9781cf8557.md",
"index.html": "./index.html" "index.html": "./index.html"
}, },
"entrypoints": [ "entrypoints": [
"static/css/main.d781989c.css", "static/css/main.d781989c.css",
"static/js/main.7ec4e6eb.js" "static/js/main.a7037969.js"
] ]
} }

View file

@ -1 +1 @@
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.svg"/><link rel="apple-touch-icon" href="./favicon.svg"/><link rel="mask-icon" href="./favicon.svg" color="#000000"><meta name="viewport" content="width=device-width,initial-scale=1,maximum-scale=5"/><meta name="theme-color" content="#000000"/><meta name="description" content="Explore and troubleshoot your VictoriaMetrics data"/><link rel="manifest" href="./manifest.json"/><title>vmui</title><script src="./dashboards/index.js" type="module"></script><meta name="twitter:card" content="summary"><meta name="twitter:title" content="UI for VictoriaMetrics"><meta name="twitter:site" content="@https://victoriametrics.com/"><meta name="twitter:description" content="Explore and troubleshoot your VictoriaMetrics data"><meta name="twitter:image" content="./preview.jpg"><meta property="og:type" content="website"><meta property="og:title" content="UI for VictoriaMetrics"><meta property="og:url" content="https://victoriametrics.com/"><meta property="og:description" content="Explore and troubleshoot your VictoriaMetrics data"><script defer="defer" src="./static/js/main.7ec4e6eb.js"></script><link href="./static/css/main.d781989c.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html> <!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.svg"/><link rel="apple-touch-icon" href="./favicon.svg"/><link rel="mask-icon" href="./favicon.svg" color="#000000"><meta name="viewport" content="width=device-width,initial-scale=1,maximum-scale=5"/><meta name="theme-color" content="#000000"/><meta name="description" content="Explore and troubleshoot your VictoriaMetrics data"/><link rel="manifest" href="./manifest.json"/><title>vmui</title><script src="./dashboards/index.js" type="module"></script><meta name="twitter:card" content="summary"><meta name="twitter:title" content="UI for VictoriaMetrics"><meta name="twitter:site" content="@https://victoriametrics.com/"><meta name="twitter:description" content="Explore and troubleshoot your VictoriaMetrics data"><meta name="twitter:image" content="./preview.jpg"><meta property="og:type" content="website"><meta property="og:title" content="UI for VictoriaMetrics"><meta property="og:url" content="https://victoriametrics.com/"><meta property="og:description" content="Explore and troubleshoot your VictoriaMetrics data"><script defer="defer" src="./static/js/main.a7037969.js"></script><link href="./static/css/main.d781989c.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html>

View file

@ -18,18 +18,26 @@ See also [LTS releases](https://docs.victoriametrics.com/lts-releases/).
## tip ## tip
* SECURITY: upgrade Go builder from Go1.23.1 to Go1.23.3. See the list of issues addressed in [Go1.23.2](https://github.com/golang/go/issues?q=milestone%3AGo1.23.2+label%3ACherryPickApproved) and [Go1.23.3](https://github.com/golang/go/issues?q=milestone%3AGo1.23.3+label%3ACherryPickApproved).
* FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent) added `-maxLabelsPerTimeseries` and `-maxLabelValueLen` flags, which limits amount of labels and label value length for pushed to/scraped by vmagent data. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6928). * FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent) added `-maxLabelsPerTimeseries` and `-maxLabelValueLen` flags, which limits amount of labels and label value length for pushed to/scraped by vmagent data. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6928).
* BUGFIX: [vmauth](https://docs.victoriametrics.com/vmauth/): fixed unauthorized routing behavior inconsistency. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7543) for details. * BUGFIX: [vmsingle](https://docs.victoriametrics.com/single-server-victoriametrics/) changed a meaning of `-maxLabelsPerTimeseries` and `-maxLabelValueLen`. Previously excessive labels, label names and values were truncated. To prevent having corrupted data in a storage, series, which are hitting these limits will be dropped. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6928)
## [v1.106.1](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.106.1)
Released at 2024-11-15
* SECURITY: upgrade Go builder from Go1.23.1 to Go1.23.3. See the list of issues addressed in [Go1.23.2](https://github.com/golang/go/issues?q=milestone%3AGo1.23.2+label%3ACherryPickApproved) and [Go1.23.3](https://github.com/golang/go/issues?q=milestone%3AGo1.23.3+label%3ACherryPickApproved).
* BUGFIX: [vmctl](https://docs.victoriametrics.com/vmctl/): drop rows that do not belong to the current series during import. The dropped rows should belong to another series whose tags are a superset of the current series. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7301) and [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/7330). Thanks to @dpedu for reporting and cooperating with the test. * BUGFIX: [vmctl](https://docs.victoriametrics.com/vmctl/): drop rows that do not belong to the current series during import. The dropped rows should belong to another series whose tags are a superset of the current series. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7301) and [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/7330). Thanks to @dpedu for reporting and cooperating with the test.
* BUGFIX: [vmsingle](https://docs.victoriametrics.com/single-server-victoriametrics/), `vmselect` in [VictoriaMetrics cluster](https://docs.victoriametrics.com/cluster-victoriametrics/): keep the order of resulting time series when `limit_offset` is applied. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7068). * BUGFIX: [vmsingle](https://docs.victoriametrics.com/single-server-victoriametrics/), `vmselect` in [VictoriaMetrics cluster](https://docs.victoriametrics.com/cluster-victoriametrics/): keep the order of resulting time series when `limit_offset` is applied. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7068).
* BUGFIX: [graphite](https://docs.victoriametrics.com/#graphite-render-api-usage): properly handle xFilesFactor=0 for `transformRemoveEmptySeries` function. See [this PR](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/7337) for details. * BUGFIX: [graphite](https://docs.victoriametrics.com/#graphite-render-api-usage): properly handle xFilesFactor=0 for `transformRemoveEmptySeries` function. See [this PR](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/7337) for details.
* BUGFIX: [vmauth](https://docs.victoriametrics.com/vmauth/): properly check availability of all the backends before giving up when proxying requests. Previously, vmauth could return an error even if there were healthy backends available. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3061) for details. * BUGFIX: [vmauth](https://docs.victoriametrics.com/vmauth/): fixed unauthorized routing behavior inconsistency. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7543) for details.
* BUGFIX: [vmauth](https://docs.victoriametrics.com/vmauth/): properly inherit [`drop_src_path_prefix_parts`](https://docs.victoriametrics.com/vmauth/#dropping-request-path-prefix), [`load_balancing_policy`](https://docs.victoriametrics.com/vmauth/#high-availability), [`retry_status_codes`](https://docs.victoriametrics.com/vmauth/#load-balancing) and [`discover_backend_ips`](https://docs.victoriametrics.com/vmauth/#discovering-backend-ips) options by `url_map` entries if `url_prefix` option isn't set at the [user config level](https://docs.victoriametrics.com/vmauth/#auth-config). These options were inherited only when the `url_prefix` option was set. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7519). * BUGFIX: [vmauth](https://docs.victoriametrics.com/vmauth/): properly inherit [`drop_src_path_prefix_parts`](https://docs.victoriametrics.com/vmauth/#dropping-request-path-prefix), [`load_balancing_policy`](https://docs.victoriametrics.com/vmauth/#high-availability), [`retry_status_codes`](https://docs.victoriametrics.com/vmauth/#load-balancing) and [`discover_backend_ips`](https://docs.victoriametrics.com/vmauth/#discovering-backend-ips) options by `url_map` entries if `url_prefix` option isn't set at the [user config level](https://docs.victoriametrics.com/vmauth/#auth-config). These options were inherited only when the `url_prefix` option was set. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7519).
* BUGFIX: [vmauth](https://docs.victoriametrics.com/vmauth): properly check availability of all the backends before giving up when proxying requests. Previously, vmauth could return an error even if there were healthy backends available. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3061) for details.
* BUGFIX: [dashboards](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/dashboards): add `file` label filter to vmalert dashboard panels. Previously, metrics from groups with the same name but different rule files could be mixed in the results. * BUGFIX: [dashboards](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/dashboards): add `file` label filter to vmalert dashboard panels. Previously, metrics from groups with the same name but different rule files could be mixed in the results.
* BUGFIX: [vmsingle](https://docs.victoriametrics.com/single-server-victoriametrics/) changed a meaning of `-maxLabelsPerTimeseries` and `-maxLabelValueLen`. Previously excessive labels, label names and values were truncated. To prevent having corrupted data in a storage series, which are hitting these limits will be dropped. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6928) * BUGFIX: [Single-node VictoriaMetrics](https://docs.victoriametrics.com/) and `vmstorage` in [VictoriaMetrics cluster](https://docs.victoriametrics.com/cluster-victoriametrics/): Optimize resources usage for configured [downsampling](https://docs.victoriametrics.com/#downsampling) with time-series filter. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7440) for details.
* BUGFIX: `vmstorage` in [VictoriaMetrics cluster](https://docs.victoriametrics.com/cluster-victoriametrics/): Properly return query results for search requests after index rotation. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7417) for details.
* BUGFIX: `vmselect` in [VictoriaMetrics cluster](https://docs.victoriametrics.com/cluster-victoriametrics/): Properly handle [multitenant](https://docs.victoriametrics.com/cluster-victoriametrics/#multitenancy-via-labels) query request errors and correctly perform search for available tenants. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7549) for details.
## [v1.106.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.106.0) ## [v1.106.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.106.0)

View file

@ -91,13 +91,17 @@ type indexDB struct {
// The db must be automatically recovered after that. // The db must be automatically recovered after that.
missingMetricNamesForMetricID atomic.Uint64 missingMetricNamesForMetricID atomic.Uint64
// minMissingTimestamp is the minimum timestamp, which is missing in the given indexDB. // minMissingTimestampByKey holds the minimum timestamps by index search key,
// which is missing in the given indexDB.
// Key must be formed with marshalCommonPrefix function.
// //
// This field is used at containsTimeRange() function only for the previous indexDB, // This field is used at containsTimeRange() function only for the previous indexDB,
// since this indexDB is readonly. // since this indexDB is readonly.
// This field cannot be used for the current indexDB, since it may receive data // This field cannot be used for the current indexDB, since it may receive data
// with bigger timestamps at any time. // with bigger timestamps at any time.
minMissingTimestamp atomic.Int64 minMissingTimestampByKey map[string]int64
// protects minMissingTimestampByKey
minMissingTimestampByKeyLock sync.RWMutex
// generation identifies the index generation ID // generation identifies the index generation ID
// and is used for syncing items from different indexDBs // and is used for syncing items from different indexDBs
@ -162,6 +166,7 @@ func mustOpenIndexDB(path string, s *Storage, isReadOnly *atomic.Bool) *indexDB
tb: tb, tb: tb,
name: name, name: name,
minMissingTimestampByKey: make(map[string]int64),
tagFiltersToMetricIDsCache: workingsetcache.New(tagFiltersCacheSize), tagFiltersToMetricIDsCache: workingsetcache.New(tagFiltersCacheSize),
s: s, s: s,
loopsPerDateTagFilterCache: workingsetcache.New(mem / 128), loopsPerDateTagFilterCache: workingsetcache.New(mem / 128),
@ -1945,25 +1950,36 @@ func (is *indexSearch) containsTimeRange(tr TimeRange) bool {
// This means that it may contain data for the given tr with probability close to 100%. // This means that it may contain data for the given tr with probability close to 100%.
return true return true
} }
// The db corresponds to the previous indexDB, which is readonly. // The db corresponds to the previous indexDB, which is readonly.
// So it is safe caching the minimum timestamp, which isn't covered by the db. // So it is safe caching the minimum timestamp, which isn't covered by the db.
minMissingTimestamp := db.minMissingTimestamp.Load()
if minMissingTimestamp != 0 && tr.MinTimestamp >= minMissingTimestamp { // use common prefix as a key for minMissingTimestamp
// it's needed to properly track timestamps for cluster version
// which uses tenant labels for the index search
kb := &is.kb
kb.B = is.marshalCommonPrefix(kb.B[:0], nsPrefixDateToMetricID)
key := kb.B
db.minMissingTimestampByKeyLock.RLock()
minMissingTimestamp, ok := db.minMissingTimestampByKey[string(key)]
db.minMissingTimestampByKeyLock.RUnlock()
if ok && tr.MinTimestamp >= minMissingTimestamp {
return false return false
} }
if is.containsTimeRangeSlowForPrefixBuf(kb, tr) {
if is.containsTimeRangeSlow(tr) {
return true return true
} }
db.minMissingTimestamp.CompareAndSwap(minMissingTimestamp, tr.MinTimestamp) db.minMissingTimestampByKeyLock.Lock()
db.minMissingTimestampByKey[string(key)] = tr.MinTimestamp
db.minMissingTimestampByKeyLock.Unlock()
return false return false
} }
func (is *indexSearch) containsTimeRangeSlow(tr TimeRange) bool { func (is *indexSearch) containsTimeRangeSlowForPrefixBuf(prefixBuf *bytesutil.ByteBuffer, tr TimeRange) bool {
ts := &is.ts ts := &is.ts
kb := &is.kb
// Verify whether the tr.MinTimestamp is included into `ts` or is smaller than the minimum date stored in `ts`. // Verify whether the tr.MinTimestamp is included into `ts` or is smaller than the minimum date stored in `ts`.
// Do not check whether tr.MaxTimestamp is included into `ts` or is bigger than the max date stored in `ts` for performance reasons. // Do not check whether tr.MaxTimestamp is included into `ts` or is bigger than the max date stored in `ts` for performance reasons.
@ -1972,13 +1988,12 @@ func (is *indexSearch) containsTimeRangeSlow(tr TimeRange) bool {
// The main practical case allows skipping searching in prev indexdb (`ts`) when `tr` // The main practical case allows skipping searching in prev indexdb (`ts`) when `tr`
// is located above the max date stored there. // is located above the max date stored there.
minDate := uint64(tr.MinTimestamp) / msecPerDay minDate := uint64(tr.MinTimestamp) / msecPerDay
kb.B = is.marshalCommonPrefix(kb.B[:0], nsPrefixDateToMetricID) prefix := prefixBuf.B
prefix := kb.B prefixBuf.B = encoding.MarshalUint64(prefixBuf.B, minDate)
kb.B = encoding.MarshalUint64(kb.B, minDate) ts.Seek(prefixBuf.B)
ts.Seek(kb.B)
if !ts.NextItem() { if !ts.NextItem() {
if err := ts.Error(); err != nil { if err := ts.Error(); err != nil {
logger.Panicf("FATAL: error when searching for minDate=%d, prefix %q: %w", minDate, kb.B, err) logger.Panicf("FATAL: error when searching for minDate=%d, prefix %q: %w", minDate, prefixBuf.B, err)
} }
return false return false
} }

View file

@ -2101,3 +2101,130 @@ func stopTestStorage(s *Storage) {
s.tsidCache.Stop() s.tsidCache.Stop()
fs.MustRemoveDirAtomic(s.cachePath) fs.MustRemoveDirAtomic(s.cachePath)
} }
func TestSearchContainsTimeRange(t *testing.T) {
path := t.Name()
os.RemoveAll(path)
s := MustOpenStorage(path, retentionMax, 0, 0)
db := s.idb()
is := db.getIndexSearch(noDeadline)
// Create a bunch of per-day time series
const (
days = 6
tenant2IngestionDay = 8
metricsPerDay = 1000
)
rotationDay := time.Date(2019, time.October, 15, 5, 1, 0, 0, time.UTC)
rotationMillis := uint64(rotationDay.UnixMilli())
rotationDate := rotationMillis / msecPerDay
var metricNameBuf []byte
perDayMetricIDs := make(map[uint64]*uint64set.Set)
labelNames := []string{
"__name__", "constant", "day", "UniqueId", "some_unique_id",
}
sort.Strings(labelNames)
newMN := func(name string, day, metric int) MetricName {
var mn MetricName
mn.MetricGroup = []byte(name)
mn.AddTag(
"constant",
"const",
)
mn.AddTag(
"day",
fmt.Sprintf("%v", day),
)
mn.AddTag(
"UniqueId",
fmt.Sprintf("%v", metric),
)
mn.AddTag(
"some_unique_id",
fmt.Sprintf("%v", day),
)
mn.sortTags()
return mn
}
// ingest metrics for tenant 0:0
for day := 0; day < days; day++ {
date := rotationDate - uint64(day)
var metricIDs uint64set.Set
for metric := range metricsPerDay {
mn := newMN("testMetric", day, metric)
metricNameBuf = mn.Marshal(metricNameBuf[:0])
var genTSID generationTSID
if !is.getTSIDByMetricName(&genTSID, metricNameBuf, date) {
generateTSID(&genTSID.TSID, &mn)
createAllIndexesForMetricName(is, &mn, &genTSID.TSID, date)
}
metricIDs.Add(genTSID.TSID.MetricID)
}
perDayMetricIDs[date] = &metricIDs
}
db.putIndexSearch(is)
// Flush index to disk, so it becomes visible for search
s.DebugFlush()
is2 := db.getIndexSearch(noDeadline)
// Check that all the metrics are found for all the days.
for date := rotationDate - days + 1; date <= rotationDate; date++ {
metricIDs, err := is2.getMetricIDsForDate(date, metricsPerDay)
if err != nil {
t.Fatalf("unexpected error: %s", err)
}
if !perDayMetricIDs[date].Equal(metricIDs) {
t.Fatalf("unexpected metricIDs found;\ngot\n%d\nwant\n%d", metricIDs.AppendTo(nil), perDayMetricIDs[date].AppendTo(nil))
}
}
db.putIndexSearch(is2)
// rotate indexdb
s.mustRotateIndexDB(rotationDay)
db = s.idb()
// perform search for 0:0 tenant
// results of previous search requests shouldn't affect it
isExt := db.extDB.getIndexSearch(noDeadline)
// search for range that covers prev indexDB for dates before ingestion
tr := TimeRange{
MinTimestamp: int64(rotationMillis - msecPerDay*(days)),
MaxTimestamp: int64(rotationMillis),
}
if !isExt.containsTimeRange(tr) {
t.Fatalf("expected to have given time range at prev IndexDB")
}
// search for range not exist at prev indexDB
tr = TimeRange{
MinTimestamp: int64(rotationMillis + msecPerDay*(days+4)),
MaxTimestamp: int64(rotationMillis + msecPerDay*(days+2)),
}
if isExt.containsTimeRange(tr) {
t.Fatalf("not expected to have given time range at prev IndexDB")
}
key := isExt.marshalCommonPrefix(nil, nsPrefixDateToMetricID)
db.extDB.minMissingTimestampByKeyLock.Lock()
minMissingTimetamp := db.extDB.minMissingTimestampByKey[string(key)]
db.extDB.minMissingTimestampByKeyLock.Unlock()
if minMissingTimetamp != tr.MinTimestamp {
t.Fatalf("unexpected minMissingTimestamp for 0:0 tenant got %d, want %d", minMissingTimetamp, tr.MinTimestamp)
}
db.extDB.putIndexSearch(isExt)
s.MustClose()
fs.MustRemoveAll(path)
}