lib/promrelabel: fix relabeling if clause (#4816)

* lib/promrelabel: fix relabeling if clause being applied to labels outside of current context

Relabeling is applied to each metric row separately, but in order to lower amount of memory allocations it is reusing labels.

Functions which are working on current metric row labels are supposed to use only current metric labels by using provided offset, but if clause matcher was using the whole labels set instead of local metrics.

This leaded to invalid relabeling results such as one described here: https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4806

Signed-off-by: Zakhar Bessarab <z.bessarab@victoriametrics.com>

* docs/CHANGELOG.md: document the bugfix

Updates https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1998
Updates https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4806

---------

Signed-off-by: Zakhar Bessarab <z.bessarab@victoriametrics.com>
Co-authored-by: Aliaksandr Valialkin <valyala@victoriametrics.com>
This commit is contained in:
Zakhar Bessarab 2023-08-11 17:37:48 +04:00 committed by GitHub
parent e0017b4d47
commit 1bd7637fe1
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
3 changed files with 48 additions and 1 deletions

View file

@ -42,6 +42,7 @@ The following `tip` changes can be tested by building VictoriaMetrics components
* FEATURE: [Official Grafana dashboards for VictoriaMetrics](https://grafana.com/orgs/victoriametrics): correctly calculate `Bytes per point` value for single-server and cluster VM dashboards. Before, the calculation mistakenly accounted for the number of entries in indexdb in denominator, which could have shown lower values than expected.
* FEATURE: [Alerting rules for VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/master/deployment/docker#alerts): `ConcurrentFlushesHitTheLimit` alerting rule was moved from [single-server](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/deployment/docker/alerts.yml) and [cluster](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/deployment/docker/alerts-cluster.yml) alerts to the [list of "health" alerts](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/deployment/docker/alerts-health.yml) as it could be related to many VictoriaMetrics components.
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): properly apply `if` filters during [relabeling](https://docs.victoriametrics.com/vmagent.html#relabeling-enhancements). Previously the `if` filter could improperly work. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4806) and [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/4816).
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): use local scrape timestamps for the scraped metrics unless `honor_timestamps: true` option is explicitly set at [scrape_config](https://docs.victoriametrics.com/sd_configs.html#scrape_configs). This fixes gaps for metrics collected from [cadvisor](https://github.com/google/cadvisor) or similar exporters, which export metrics with invalid timestamps. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4697) and [this comment](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4697#issuecomment-1654614799) for details. The issue has been introduced in [v1.68.0](#v1680).
* BUGFIX: [vmalert](https://docs.victoriametrics.com/vmalert.html): properly set `vmalert_config_last_reload_successful` value on configuration updates or rollbacks. The bug was introduced in [v1.92.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.92.0) in [this PR](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/4543).
* BUGFIX: [vmalert](https://docs.victoriametrics.com/vmalert.html): fix `vmalert_remotewrite_send_duration_seconds_total` value, before it didn't count in the real time spending on remote write requests. See [this pr](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/4801) for details.

View file

@ -158,7 +158,7 @@ func FinalizeLabels(dst, src []prompbmarshal.Label) []prompbmarshal.Label {
// See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config
func (prc *parsedRelabelConfig) apply(labels []prompbmarshal.Label, labelsOffset int) []prompbmarshal.Label {
src := labels[labelsOffset:]
if !prc.If.Match(labels) {
if !prc.If.Match(src) {
if prc.Action == "keep" {
// Drop the target on `if` mismatch for `action: keep`
return labels[:labelsOffset]

View file

@ -915,3 +915,49 @@ func newTestRegexRelabelConfig(pattern string) *parsedRelabelConfig {
}
return prc
}
func TestParsedRelabelConfigsApplyForMultipleSeries(t *testing.T) {
f := func(config string, metrics []string, resultExpected []string) {
t.Helper()
pcs, err := ParseRelabelConfigsData([]byte(config))
if err != nil {
t.Fatalf("cannot parse %q: %s", config, err)
}
totalLabels := 0
var labels []prompbmarshal.Label
for _, metric := range metrics {
labels = append(labels, promutils.MustNewLabelsFromString(metric).GetLabels()...)
resultLabels := pcs.Apply(labels, totalLabels)
SortLabels(resultLabels)
totalLabels += len(resultLabels)
labels = resultLabels
}
var result []string
for i := range labels {
result = append(result, LabelsToString(labels[i:i+1]))
}
if len(result) != len(resultExpected) {
t.Fatalf("unexpected number of results; got\n%q\nwant\n%q", result, resultExpected)
}
for i := range result {
if result[i] != resultExpected[i] {
t.Fatalf("unexpected result[%d]; got\n%q\nwant\n%q", i, result[i], resultExpected[i])
}
}
}
t.Run("drops one of series", func(t *testing.T) {
f(`
- action: drop
if: '{__name__!~"smth"}'
`, []string{`smth`, `notthis`}, []string{`smth`})
f(`
- action: drop
if: '{__name__!~"smth"}'
`, []string{`notthis`, `smth`}, []string{`smth`})
})
}