lib/streamaggr: remove flushState arg at dedupAggr.flush(), since it is always set to true in production

This commit is contained in:
Aliaksandr Valialkin 2024-06-10 14:29:45 +02:00
parent 8d95522529
commit f45d02a243
No known key found for this signature in database
GPG key ID: 52C003EE2BCDB9EB
5 changed files with 9 additions and 23 deletions

View file

@ -4,8 +4,9 @@ import (
"sync"
"unsafe"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
"github.com/cespare/xxhash/v2"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
)
const dedupAggrShardsCount = 128
@ -113,7 +114,7 @@ func (ctx *dedupFlushCtx) reset() {
ctx.samples = ctx.samples[:0]
}
func (da *dedupAggr) flush(f func(samples []pushSample), resetState bool) {
func (da *dedupAggr) flush(f func(samples []pushSample)) {
var wg sync.WaitGroup
for i := range da.shards {
flushConcurrencyCh <- struct{}{}
@ -125,7 +126,7 @@ func (da *dedupAggr) flush(f func(samples []pushSample), resetState bool) {
}()
ctx := getDedupFlushCtx()
shard.flush(ctx, f, resetState)
shard.flush(ctx, f)
putDedupFlushCtx(ctx)
}(&da.shards[i])
}
@ -193,11 +194,11 @@ func (das *dedupAggrShard) pushSamples(samples []pushSample) {
}
}
func (das *dedupAggrShard) flush(ctx *dedupFlushCtx, f func(samples []pushSample), resetState bool) {
func (das *dedupAggrShard) flush(ctx *dedupFlushCtx, f func(samples []pushSample)) {
das.mu.Lock()
m := das.m
if resetState && len(m) > 0 {
if len(m) > 0 {
das.m = make(map[string]dedupAggrSample, len(m))
}

View file

@ -39,7 +39,7 @@ func TestDedupAggrSerial(t *testing.T) {
}
mu.Unlock()
}
da.flush(flushSamples, true)
da.flush(flushSamples)
if !reflect.DeepEqual(expectedSamplesMap, flushedSamplesMap) {
t.Fatalf("unexpected samples;\ngot\n%v\nwant\n%v", flushedSamplesMap, expectedSamplesMap)

View file

@ -4,7 +4,6 @@ import (
"fmt"
"sync/atomic"
"testing"
"time"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
)
@ -17,20 +16,6 @@ func BenchmarkDedupAggr(b *testing.B) {
}
}
func BenchmarkDedupAggrFlushSerial(b *testing.B) {
as := newTotalAggrState(time.Hour, true, true)
benchSamples := newBenchSamples(100_000)
da := newDedupAggr()
da.pushSamples(benchSamples)
b.ResetTimer()
b.ReportAllocs()
b.SetBytes(int64(len(benchSamples)))
for i := 0; i < b.N; i++ {
da.flush(as.pushSamples, false)
}
}
func benchmarkDedupAggr(b *testing.B, samplesPerPush int) {
const loops = 100
benchSamples := newBenchSamples(samplesPerPush)

View file

@ -166,7 +166,7 @@ func (d *Deduplicator) flush(pushFunc PushFunc, dedupInterval time.Duration) {
ctx.labels = labels
ctx.samples = samples
putDeduplicatorFlushCtx(ctx)
}, true)
})
duration := time.Since(startTime)
d.dedupFlushDuration.Update(duration.Seconds())

View file

@ -747,7 +747,7 @@ func (a *aggregator) dedupFlush(dedupInterval time.Duration) {
startTime := time.Now()
a.da.flush(a.pushSamples, true)
a.da.flush(a.pushSamples)
d := time.Since(startTime)
a.dedupFlushDuration.Update(d.Seconds())