mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2024-11-21 14:44:00 +00:00
app/vmselect/netstorage: vary batch size for data unpacking depending on the available CPU cores
This should reduce contention on the channel with unpack work for systems with high number of CPU cores
This commit is contained in:
parent
8d9eb5f808
commit
8adba82c02
1 changed files with 1 additions and 1 deletions
|
@ -246,7 +246,7 @@ func unpackWorker() {
|
||||||
// unpackBatchSize is the maximum number of blocks that may be unpacked at once by a single goroutine.
|
// unpackBatchSize is the maximum number of blocks that may be unpacked at once by a single goroutine.
|
||||||
//
|
//
|
||||||
// This batch is needed in order to reduce contention for upackWorkCh in multi-CPU system.
|
// This batch is needed in order to reduce contention for upackWorkCh in multi-CPU system.
|
||||||
const unpackBatchSize = 16
|
var unpackBatchSize = 8 * runtime.GOMAXPROCS(-1)
|
||||||
|
|
||||||
// Unpack unpacks pts to dst.
|
// Unpack unpacks pts to dst.
|
||||||
func (pts *packedTimeseries) Unpack(dst *Result, tr storage.TimeRange, fetchData bool) error {
|
func (pts *packedTimeseries) Unpack(dst *Result, tr storage.TimeRange, fetchData bool) error {
|
||||||
|
|
Loading…
Reference in a new issue