app/vmagent/remotewrite: limit memory usage when big scrape blocks are pushed to remote storage

This commit is contained in:
Aliaksandr Valialkin 2020-02-28 18:57:45 +02:00
parent 6282b29a44
commit cc39c9d74b
2 changed files with 15 additions and 9 deletions

View file

@ -50,12 +50,11 @@ func resetRelabel() {
prcs = nil
}
func (rctx *relabelCtx) applyRelabeling(wr *prompbmarshal.WriteRequest) {
func (rctx *relabelCtx) applyRelabeling(tss []prompbmarshal.TimeSeries) []prompbmarshal.TimeSeries {
if len(extraLabels) == 0 && len(prcs) == 0 {
// Nothing to change.
return
return tss
}
tss := wr.Timeseries
tssDst := tss[:0]
labels := rctx.labels[:0]
for i := range tss {
@ -83,7 +82,7 @@ func (rctx *relabelCtx) applyRelabeling(wr *prompbmarshal.WriteRequest) {
})
}
rctx.labels = labels
wr.Timeseries = tssDst
return tssDst
}
type relabelCtx struct {

View file

@ -105,11 +105,18 @@ func Stop() {
// Each timeseries in wr.Timeseries must contain one sample.
func Push(wr *prompbmarshal.WriteRequest) {
rctx := relabelCtxPool.Get().(*relabelCtx)
rctx.applyRelabeling(wr)
idx := atomic.AddUint64(&pssNextIdx, 1) % uint64(len(pss))
pss[idx].Push(wr.Timeseries)
tss := wr.Timeseries
for len(tss) > 0 {
// Process big tss in smaller blocks in order to reduce maxmimum memory usage
tssBlock := tss
if len(tssBlock) > maxRowsPerBlock {
tssBlock = tss[:maxRowsPerBlock]
tss = tss[maxRowsPerBlock:]
}
tssBlock = rctx.applyRelabeling(tssBlock)
idx := atomic.AddUint64(&pssNextIdx, 1) % uint64(len(pss))
pss[idx].Push(tssBlock)
}
rctx.reset()
relabelCtxPool.Put(rctx)
}