lib/promscrape: return back the logic for flushing big buffers to storage from the commit 3fd8653b40

This should reduce memory usage when vmagent scrapes targets with big number of metrics and `-promscrape.streamParse` isn't enabled
This commit is contained in:
Aliaksandr Valialkin 2021-03-14 22:24:03 +02:00
parent 894246176f
commit 33cd6c26d3

View file

@ -294,10 +294,22 @@ func (sw *scrapeWork) scrapeInternal(scrapeTimestamp, realTimestamp int64) error
srcRows := wc.rows.Rows srcRows := wc.rows.Rows
samplesScraped := len(srcRows) samplesScraped := len(srcRows)
scrapedSamples.Update(float64(samplesScraped)) scrapedSamples.Update(float64(samplesScraped))
samplesPostRelabeling := 0
for i := range srcRows { for i := range srcRows {
sw.addRowToTimeseries(wc, &srcRows[i], scrapeTimestamp, true) sw.addRowToTimeseries(wc, &srcRows[i], scrapeTimestamp, true)
if len(wc.labels) > 40000 {
// Limit the maximum size of wc.writeRequest.
// This should reduce memory usage when scraping targets with millions of metrics and/or labels.
// For example, when scraping /federate handler from Prometheus - see https://prometheus.io/docs/prometheus/latest/federation/
samplesPostRelabeling += len(wc.writeRequest.Timeseries)
sw.updateSeriesAdded(wc)
startTime := time.Now()
sw.PushData(&wc.writeRequest)
pushDataDuration.UpdateDuration(startTime)
wc.resetNoRows()
} }
samplesPostRelabeling := len(wc.writeRequest.Timeseries) }
samplesPostRelabeling += len(wc.writeRequest.Timeseries)
if sw.Config.SampleLimit > 0 && samplesPostRelabeling > sw.Config.SampleLimit { if sw.Config.SampleLimit > 0 && samplesPostRelabeling > sw.Config.SampleLimit {
wc.resetNoRows() wc.resetNoRows()
up = 0 up = 0