lib/promscrape: respect 0 value for series_limit param (#5663)

* lib/promscrape: respect `0` value for `series_limit` param

Respect `0` value for `series_limit` param in `scrape_config`
even if global limit was set via `-promscrape.seriesLimitPerTarget`.
Previously, `0` value will be ignored in favor of `-promscrape.seriesLimitPerTarget`.

This behavior aligns with possibility to override `series_limit` value via
relabeling with `__series_limit__` label.

Signed-off-by: hagen1778 <roman@victoriametrics.com>

* Update docs/CHANGELOG.md

---------

Signed-off-by: hagen1778 <roman@victoriametrics.com>
Co-authored-by: Aliaksandr Valialkin <valyala@victoriametrics.com>
This commit is contained in:
Roman Khavronenko 2024-01-23 12:09:14 +01:00 committed by Aliaksandr Valialkin
parent 48a851d7d1
commit ca67926952
No known key found for this signature in database
GPG key ID: 52C003EE2BCDB9EB
3 changed files with 27 additions and 3 deletions

View file

@ -13,6 +13,7 @@ The following `tip` changes can be tested by building VictoriaMetrics components
* BUGFIX: properly return errors from [export APIs](https://docs.victoriametrics.com/#how-to-export-time-series). Previously these errors were silently suppressed. See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/5649).
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): properly discover targets for `role: endpoints` and `role: endpointslice` in [kubernetes_sd_configs](https://docs.victoriametrics.com/sd_configs.html#kubernetes_sd_configs). Previously some `endpoints` and `endpointslice` targets could be left undiscovered or some targets could have missing `__meta_*` labels when performing service discovery in busy Kubernetes clusters with large number of pods. See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/5557).
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): respect explicitly set `series_limit: 0` in [scrape_config](https://docs.victoriametrics.com/sd_configs.html#scrape_configs). This allows removing [`series_limit` restriction](https://docs.victoriametrics.com/vmagent.html#cardinality-limiter) on a per-`scrape_config` basis when global limit is set via `-promscrape.seriesLimitPerTarget`. Previously, `0` value was ignored in favor of `-promscrape.seriesLimitPerTarget`.
* BUGFIX: [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html): properly process queries with too big lookbehind window such as `foo[100y]`. Previously, such queries could return empty responses even if `foo` is present in database. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5553).
* BUGFIX: [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html): properly handle possible negative results caused by float operations precision error in rollup functions like rate() or increase(). See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5571).

View file

@ -283,7 +283,7 @@ type ScrapeConfig struct {
StreamParse bool `yaml:"stream_parse,omitempty"`
ScrapeAlignInterval *promutils.Duration `yaml:"scrape_align_interval,omitempty"`
ScrapeOffset *promutils.Duration `yaml:"scrape_offset,omitempty"`
SeriesLimit int `yaml:"series_limit,omitempty"`
SeriesLimit *int `yaml:"series_limit,omitempty"`
NoStaleMarkers *bool `yaml:"no_stale_markers,omitempty"`
ProxyClientConfig promauth.ProxyClientConfig `yaml:",inline"`
@ -1028,8 +1028,8 @@ func getScrapeWorkConfig(sc *ScrapeConfig, baseDir string, globalCfg *GlobalConf
noStaleTracking = *sc.NoStaleMarkers
}
seriesLimit := *seriesLimitPerTarget
if sc.SeriesLimit > 0 {
seriesLimit = sc.SeriesLimit
if sc.SeriesLimit != nil {
seriesLimit = *sc.SeriesLimit
}
swc := &scrapeWorkConfig{
scrapeInterval: scrapeInterval,

View file

@ -1289,6 +1289,29 @@ scrape_configs:
jobNameOriginal: "foo",
},
})
defaultSeriesLimitPerTarget := *seriesLimitPerTarget
*seriesLimitPerTarget = 1e3
f(`
scrape_configs:
- job_name: foo
series_limit: 0
static_configs:
- targets: ["foo.bar:1234"]
`, []*ScrapeWork{
{
ScrapeURL: "http://foo.bar:1234/metrics",
ScrapeInterval: defaultScrapeInterval,
ScrapeTimeout: defaultScrapeTimeout,
jobNameOriginal: "foo",
Labels: promutils.NewLabelsFromMap(map[string]string{
"instance": "foo.bar:1234",
"job": "foo",
}),
SeriesLimit: 0,
},
})
*seriesLimitPerTarget = defaultSeriesLimitPerTarget
}
func equalStaticConfigForScrapeWorks(a, b []*ScrapeWork) bool {