mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2024-11-21 14:44:00 +00:00
Revert "lib/promscrape: do not store last scrape response when stale markers … (#5577)"
This reverts commit cfec258803
.
Reason for revert: the original code already doesn't store the last scrape response when stale markers are disabled.
The scrapeWork.areIdenticalSeries() function always returns true is stale markers are disabled.
This prevents from storing the last response at scrapeWork.processScrapedData().
It looks like the reverted commit could also return back the issue https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3660
Updates https://github.com/VictoriaMetrics/VictoriaMetrics/pull/5577
This commit is contained in:
parent
9c0863babc
commit
d3ee3e0ef5
2 changed files with 12 additions and 14 deletions
|
@ -68,7 +68,6 @@ The sandbox cluster installation is running under the constant load generated by
|
|||
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): properly assume role with [AWS IRSA authorization](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html). Previously role chaining was not supported. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3822) for details.
|
||||
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): exit if there is config syntax error in [`scrape_config_files`](https://docs.victoriametrics.com/vmagent.html#loading-scrape-configs-from-multiple-files) when `-promscrape.config.strictParse=true`. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5508).
|
||||
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): properly discover targets for `role: endpoints` and `role: endpointslice` in [kubernetes_sd_configs](https://docs.victoriametrics.com/sd_configs.html#kubernetes_sd_configs). Previously some `endpoints` and `endpointslice` targets could be left undiscovered or some targets could have missing `__meta_*` labels when performing service discovery in busy Kubernetes clusters with large number of pods. See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/5557).
|
||||
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): do not store scrape response for target in memory when staleness markers are disabled. See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/5577) for details.
|
||||
* BUGFIX: [vmui](https://docs.victoriametrics.com/#vmui): fix a link for the statistic inaccuracy explanation in the cardinality explorer tool. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5460).
|
||||
* BUGFIX: [vmui](https://docs.victoriametrics.com/#vmui): send `step` param for instant queries. The change reverts [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3896) due to reasons explained in [this comment](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3896#issuecomment-1896704401).
|
||||
* BUGFIX: all: fix potential panic during components shutdown when [metrics push](https://docs.victoriametrics.com/#push-metrics) is configured. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5548). Thanks to @zhdd99 for the [pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/5549).
|
||||
|
|
|
@ -337,13 +337,11 @@ func (sw *scrapeWork) run(stopCh <-chan struct{}, globalStopCh <-chan struct{})
|
|||
// Do not send staleness markers on graceful shutdown as Prometheus does.
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2013#issuecomment-1006994079
|
||||
default:
|
||||
if !sw.Config.NoStaleMarkers {
|
||||
// Send staleness markers to all the metrics scraped last time from the target
|
||||
// when the given target disappears as Prometheus does.
|
||||
// Use the current real timestamp for staleness markers, so queries
|
||||
// stop returning data just after the time the target disappears.
|
||||
sw.sendStaleSeries(lastScrape, "", t, true)
|
||||
}
|
||||
// Send staleness markers to all the metrics scraped last time from the target
|
||||
// when the given target disappears as Prometheus does.
|
||||
// Use the current real timestamp for staleness markers, so queries
|
||||
// stop returning data just after the time the target disappears.
|
||||
sw.sendStaleSeries(lastScrape, "", t, true)
|
||||
}
|
||||
if sw.seriesLimiter != nil {
|
||||
sw.seriesLimiter.MustStop()
|
||||
|
@ -489,8 +487,7 @@ func (sw *scrapeWork) processScrapedData(scrapeTimestamp, realTimestamp int64, b
|
|||
bodyString = ""
|
||||
}
|
||||
seriesAdded := 0
|
||||
// cannot track the number of new time series when stale markers are disabled.
|
||||
if !sw.Config.NoStaleMarkers && !areIdenticalSeries {
|
||||
if !areIdenticalSeries {
|
||||
// The returned value for seriesAdded may be bigger than the real number of added series
|
||||
// if some series were removed during relabeling.
|
||||
// This is a trade-off between performance and accuracy.
|
||||
|
@ -520,7 +517,7 @@ func (sw *scrapeWork) processScrapedData(scrapeTimestamp, realTimestamp int64, b
|
|||
writeRequestCtxPool.Put(wc)
|
||||
}
|
||||
// body must be released only after wc is released, since wc refers to body.
|
||||
if !sw.Config.NoStaleMarkers && !areIdenticalSeries {
|
||||
if !areIdenticalSeries {
|
||||
// Send stale markers for disappeared metrics with the real scrape timestamp
|
||||
// in order to guarantee that query doesn't return data after this time for the disappeared metrics.
|
||||
sw.sendStaleSeries(lastScrape, bodyString, realTimestamp, false)
|
||||
|
@ -630,8 +627,7 @@ func (sw *scrapeWork) scrapeStream(scrapeTimestamp, realTimestamp int64) error {
|
|||
scrapesFailed.Inc()
|
||||
}
|
||||
seriesAdded := 0
|
||||
// cannot track the number of new time series when stale markers are disabled.
|
||||
if !sw.Config.NoStaleMarkers && !areIdenticalSeries {
|
||||
if !areIdenticalSeries {
|
||||
// The returned value for seriesAdded may be bigger than the real number of added series
|
||||
// if some series were removed during relabeling.
|
||||
// This is a trade-off between performance and accuracy.
|
||||
|
@ -651,7 +647,7 @@ func (sw *scrapeWork) scrapeStream(scrapeTimestamp, realTimestamp int64) error {
|
|||
sw.prevBodyLen = sbr.bodyLen
|
||||
wc.reset()
|
||||
writeRequestCtxPool.Put(wc)
|
||||
if !sw.Config.NoStaleMarkers && !areIdenticalSeries {
|
||||
if !areIdenticalSeries {
|
||||
// Send stale markers for disappeared metrics with the real scrape timestamp
|
||||
// in order to guarantee that query doesn't return data after this time for the disappeared metrics.
|
||||
sw.sendStaleSeries(lastScrape, bodyString, realTimestamp, false)
|
||||
|
@ -790,6 +786,9 @@ func (sw *scrapeWork) sendStaleSeries(lastScrape, currScrape string, timestamp i
|
|||
defer func() {
|
||||
<-sendStaleSeriesConcurrencyLimitCh
|
||||
}()
|
||||
if sw.Config.NoStaleMarkers {
|
||||
return
|
||||
}
|
||||
bodyString := lastScrape
|
||||
if currScrape != "" {
|
||||
bodyString = parser.GetRowsDiff(lastScrape, currScrape)
|
||||
|
|
Loading…
Reference in a new issue