mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2024-11-21 14:44:00 +00:00
app/vmselect/netstorage: reduce memory allocations when unpacking time series data by using a pool for unpackWork entries
This should slightly reduce load on GC when processing queries that touch big number of time series. Updates https://github.com/VictoriaMetrics/VictoriaMetrics/issues/646 according to the provided memory profile there.
This commit is contained in:
parent
31ae5911a8
commit
dfb113f175
1 changed files with 34 additions and 7 deletions
|
@ -13,6 +13,7 @@ import (
|
|||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/decimal"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
@ -172,10 +173,37 @@ type unpackWork struct {
|
|||
br storage.BlockRef
|
||||
tr storage.TimeRange
|
||||
fetchData bool
|
||||
doneCh chan error
|
||||
sb *sortBlock
|
||||
doneCh chan error
|
||||
}
|
||||
|
||||
func (upw *unpackWork) reset() {
|
||||
upw.br = storage.BlockRef{}
|
||||
upw.tr = storage.TimeRange{}
|
||||
upw.fetchData = false
|
||||
upw.sb = nil
|
||||
if n := len(upw.doneCh); n > 0 {
|
||||
logger.Panicf("BUG: upw.doneCh must be empty; it contains %d items now", n)
|
||||
}
|
||||
}
|
||||
|
||||
func getUnpackWork() *unpackWork {
|
||||
v := unpackWorkPool.Get()
|
||||
if v != nil {
|
||||
return v.(*unpackWork)
|
||||
}
|
||||
return &unpackWork{
|
||||
doneCh: make(chan error, 1),
|
||||
}
|
||||
}
|
||||
|
||||
func putUnpackWork(upw *unpackWork) {
|
||||
upw.reset()
|
||||
unpackWorkPool.Put(upw)
|
||||
}
|
||||
|
||||
var unpackWorkPool sync.Pool
|
||||
|
||||
func init() {
|
||||
for i := 0; i < gomaxprocs; i++ {
|
||||
go unpackWorker()
|
||||
|
@ -206,12 +234,10 @@ func (pts *packedTimeseries) Unpack(dst *Result, tr storage.TimeRange, fetchData
|
|||
// Feed workers with work
|
||||
upws := make([]*unpackWork, len(pts.brs))
|
||||
for i, br := range pts.brs {
|
||||
upw := &unpackWork{
|
||||
br: br,
|
||||
tr: tr,
|
||||
fetchData: fetchData,
|
||||
doneCh: make(chan error, 1),
|
||||
}
|
||||
upw := getUnpackWork()
|
||||
upw.br = br
|
||||
upw.tr = tr
|
||||
upw.fetchData = fetchData
|
||||
unpackWorkCh <- upw
|
||||
upws[i] = upw
|
||||
}
|
||||
|
@ -230,6 +256,7 @@ func (pts *packedTimeseries) Unpack(dst *Result, tr storage.TimeRange, fetchData
|
|||
} else if upw.sb != nil {
|
||||
putSortBlock(upw.sb)
|
||||
}
|
||||
putUnpackWork(upw)
|
||||
}
|
||||
if firstErr != nil {
|
||||
return firstErr
|
||||
|
|
Loading…
Reference in a new issue