app/vmagent/remotewrite: limit memory usage when big scrape blocks are pushed to remote storage

This commit is contained in:
Aliaksandr Valialkin 2020-02-28 18:57:45 +02:00
parent 6282b29a44
commit cc39c9d74b
2 changed files with 15 additions and 9 deletions

View file

@ -50,12 +50,11 @@ func resetRelabel() {
prcs = nil prcs = nil
} }
func (rctx *relabelCtx) applyRelabeling(wr *prompbmarshal.WriteRequest) { func (rctx *relabelCtx) applyRelabeling(tss []prompbmarshal.TimeSeries) []prompbmarshal.TimeSeries {
if len(extraLabels) == 0 && len(prcs) == 0 { if len(extraLabels) == 0 && len(prcs) == 0 {
// Nothing to change. // Nothing to change.
return return tss
} }
tss := wr.Timeseries
tssDst := tss[:0] tssDst := tss[:0]
labels := rctx.labels[:0] labels := rctx.labels[:0]
for i := range tss { for i := range tss {
@ -83,7 +82,7 @@ func (rctx *relabelCtx) applyRelabeling(wr *prompbmarshal.WriteRequest) {
}) })
} }
rctx.labels = labels rctx.labels = labels
wr.Timeseries = tssDst return tssDst
} }
type relabelCtx struct { type relabelCtx struct {

View file

@ -105,11 +105,18 @@ func Stop() {
// Each timeseries in wr.Timeseries must contain one sample. // Each timeseries in wr.Timeseries must contain one sample.
func Push(wr *prompbmarshal.WriteRequest) { func Push(wr *prompbmarshal.WriteRequest) {
rctx := relabelCtxPool.Get().(*relabelCtx) rctx := relabelCtxPool.Get().(*relabelCtx)
rctx.applyRelabeling(wr) tss := wr.Timeseries
for len(tss) > 0 {
idx := atomic.AddUint64(&pssNextIdx, 1) % uint64(len(pss)) // Process big tss in smaller blocks in order to reduce maxmimum memory usage
pss[idx].Push(wr.Timeseries) tssBlock := tss
if len(tssBlock) > maxRowsPerBlock {
tssBlock = tss[:maxRowsPerBlock]
tss = tss[maxRowsPerBlock:]
}
tssBlock = rctx.applyRelabeling(tssBlock)
idx := atomic.AddUint64(&pssNextIdx, 1) % uint64(len(pss))
pss[idx].Push(tssBlock)
}
rctx.reset() rctx.reset()
relabelCtxPool.Put(rctx) relabelCtxPool.Put(rctx)
} }