app/vmselect/promql: follow up for 433fff0006

This commit is contained in:
Aliaksandr Valialkin 2021-03-09 12:46:11 +02:00
parent 28e450cd7c
commit 942890b1bb
2 changed files with 13 additions and 17 deletions

View file

@ -526,27 +526,22 @@ func vmrangeBucketsToLE(tss []*timeseries) []*timeseries {
xsPrev = xs xsPrev = xs
continue continue
} }
if xs.start != xsPrev.end { if xs.start != xsPrev.end && uniqTs[xs.startStr] == nil {
// check for duplicates at the start of bucket. uniqTs[xs.startStr] = xs.ts
// in case of duplicate following le already exists. xssNew = append(xssNew, x{
// no need to add new one with zero values. endStr: xs.startStr,
if _, ok := uniqTs[xs.startStr]; !ok { end: xs.start,
uniqTs[xs.startStr] = xs.ts ts: copyTS(ts, xs.startStr),
xssNew = append(xssNew, x{ })
endStr: xs.startStr,
end: xs.start,
ts: copyTS(ts, xs.startStr),
})
}
} }
ts.MetricName.AddTag("le", xs.endStr) ts.MetricName.AddTag("le", xs.endStr)
if prevTs, ok := uniqTs[xs.endStr]; !ok { prevTs := uniqTs[xs.endStr]
if prevTs != nil {
// the end of the current bucket is not unique, need to merge it with the existing bucket.
mergeNonOverlappingTimeseries(prevTs, xs.ts)
} else {
xssNew = append(xssNew, xs) xssNew = append(xssNew, xs)
uniqTs[xs.endStr] = xs.ts uniqTs[xs.endStr] = xs.ts
} else {
// end of current bucket not uniq,
// need to merge it with existing bucket.
mergeNonOverlappingTimeseries(prevTs, xs.ts)
} }
xsPrev = xs xsPrev = xs
} }

View file

@ -12,6 +12,7 @@
* BUGFIX: vmagent: prevent from high CPU usage bug during failing scrapes with small `scrape_timeout` (less than a few seconds). * BUGFIX: vmagent: prevent from high CPU usage bug during failing scrapes with small `scrape_timeout` (less than a few seconds).
* BUGFIX: vmagent: reduce memory usage when Kubernetes service discovery is used in big number of distinct jobs by sharing the cache. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1113 * BUGFIX: vmagent: reduce memory usage when Kubernetes service discovery is used in big number of distinct jobs by sharing the cache. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1113
* BUGFUX: avoid `duplicate time series` error if `prometheus_buckets()` covers a time range with distinct set of buckets.
* BUGFIX: prevent exponent overflow when processing extremely small values close to zero such as `2.964393875E-314`. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1114 * BUGFIX: prevent exponent overflow when processing extremely small values close to zero such as `2.964393875E-314`. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1114