mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2024-11-21 14:44:00 +00:00
app/vmselect/netstorage: vary batch size for data unpacking depending on the available CPU cores
This should reduce contention on the channel with unpack work for systems with high number of CPU cores
This commit is contained in:
parent
4ce1368e4b
commit
c9f5c5623f
1 changed files with 1 additions and 1 deletions
|
@ -251,7 +251,7 @@ func unpackWorker() {
|
||||||
// unpackBatchSize is the maximum number of blocks that may be unpacked at once by a single goroutine.
|
// unpackBatchSize is the maximum number of blocks that may be unpacked at once by a single goroutine.
|
||||||
//
|
//
|
||||||
// This batch is needed in order to reduce contention for upackWorkCh in multi-CPU system.
|
// This batch is needed in order to reduce contention for upackWorkCh in multi-CPU system.
|
||||||
const unpackBatchSize = 16
|
var unpackBatchSize = 8 * runtime.GOMAXPROCS(-1)
|
||||||
|
|
||||||
// Unpack unpacks pts to dst.
|
// Unpack unpacks pts to dst.
|
||||||
func (pts *packedTimeseries) Unpack(tbf *tmpBlocksFile, dst *Result, tr storage.TimeRange, fetchData bool, at *auth.Token) error {
|
func (pts *packedTimeseries) Unpack(tbf *tmpBlocksFile, dst *Result, tr storage.TimeRange, fetchData bool, at *auth.Token) error {
|
||||||
|
|
Loading…
Reference in a new issue