mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2024-11-21 14:44:00 +00:00
lib/promscrape: return back the logic for flushing big buffers to storage from the commit 3fd8653b40
This should reduce memory usage when vmagent scrapes targets with big number of metrics and `-promscrape.streamParse` isn't enabled
This commit is contained in:
parent
b88806ecbf
commit
7f6f350ee1
1 changed files with 13 additions and 1 deletions
|
@ -294,10 +294,22 @@ func (sw *scrapeWork) scrapeInternal(scrapeTimestamp, realTimestamp int64) error
|
|||
srcRows := wc.rows.Rows
|
||||
samplesScraped := len(srcRows)
|
||||
scrapedSamples.Update(float64(samplesScraped))
|
||||
samplesPostRelabeling := 0
|
||||
for i := range srcRows {
|
||||
sw.addRowToTimeseries(wc, &srcRows[i], scrapeTimestamp, true)
|
||||
if len(wc.labels) > 40000 {
|
||||
// Limit the maximum size of wc.writeRequest.
|
||||
// This should reduce memory usage when scraping targets with millions of metrics and/or labels.
|
||||
// For example, when scraping /federate handler from Prometheus - see https://prometheus.io/docs/prometheus/latest/federation/
|
||||
samplesPostRelabeling += len(wc.writeRequest.Timeseries)
|
||||
sw.updateSeriesAdded(wc)
|
||||
startTime := time.Now()
|
||||
sw.PushData(&wc.writeRequest)
|
||||
pushDataDuration.UpdateDuration(startTime)
|
||||
wc.resetNoRows()
|
||||
}
|
||||
samplesPostRelabeling := len(wc.writeRequest.Timeseries)
|
||||
}
|
||||
samplesPostRelabeling += len(wc.writeRequest.Timeseries)
|
||||
if sw.Config.SampleLimit > 0 && samplesPostRelabeling > sw.Config.SampleLimit {
|
||||
wc.resetNoRows()
|
||||
up = 0
|
||||
|
|
Loading…
Reference in a new issue