mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2024-11-21 14:44:00 +00:00
7cb894a777
The main change is getting rid of interning of sample key. It was discovered that for cases with many unique time series aggregated by vmagent interned keys could grow up to hundreds of millions of objects. This has negative impact on the following aspects: 1. It slows down garbage collection cycles, as GC has to scan all inuse objects periodically. The higher is the number of inuse objects, the longer it takes/the more CPU it takes. 2. It slows down the hot path of samples aggregation where each key needs to be looked up in the map first. The change makes code more fragile, but suppose to provide performance optimization for heavy-loaded vmagents with stream aggregation enabled. --------- Signed-off-by: hagen1778 <roman@victoriametrics.com> Co-authored-by: Aliaksandr Valialkin <valyala@victoriametrics.com>
87 lines
1.9 KiB
Go
87 lines
1.9 KiB
Go
package streamaggr
|
|
|
|
import (
|
|
"strings"
|
|
"sync"
|
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
|
|
)
|
|
|
|
// lastAggrState calculates output=last, e.g. the last value over input samples.
|
|
type lastAggrState struct {
|
|
m sync.Map
|
|
}
|
|
|
|
type lastStateValue struct {
|
|
mu sync.Mutex
|
|
last float64
|
|
timestamp int64
|
|
deleted bool
|
|
}
|
|
|
|
func newLastAggrState() *lastAggrState {
|
|
return &lastAggrState{}
|
|
}
|
|
|
|
func (as *lastAggrState) pushSamples(samples []pushSample) {
|
|
for i := range samples {
|
|
s := &samples[i]
|
|
outputKey := getOutputKey(s.key)
|
|
|
|
again:
|
|
v, ok := as.m.Load(outputKey)
|
|
if !ok {
|
|
// The entry is missing in the map. Try creating it.
|
|
v = &lastStateValue{
|
|
last: s.value,
|
|
timestamp: s.timestamp,
|
|
}
|
|
vNew, loaded := as.m.LoadOrStore(strings.Clone(outputKey), v)
|
|
if !loaded {
|
|
// The new entry has been successfully created.
|
|
continue
|
|
}
|
|
// Use the entry created by a concurrent goroutine.
|
|
v = vNew
|
|
}
|
|
sv := v.(*lastStateValue)
|
|
sv.mu.Lock()
|
|
deleted := sv.deleted
|
|
if !deleted {
|
|
if s.timestamp >= sv.timestamp {
|
|
sv.last = s.value
|
|
sv.timestamp = s.timestamp
|
|
}
|
|
}
|
|
sv.mu.Unlock()
|
|
if deleted {
|
|
// The entry has been deleted by the concurrent call to flushState
|
|
// Try obtaining and updating the entry again.
|
|
goto again
|
|
}
|
|
}
|
|
}
|
|
|
|
func (as *lastAggrState) flushState(ctx *flushCtx, resetState bool) {
|
|
currentTimeMsec := int64(fasttime.UnixTimestamp()) * 1000
|
|
m := &as.m
|
|
m.Range(func(k, v interface{}) bool {
|
|
if resetState {
|
|
// Atomically delete the entry from the map, so new entry is created for the next flush.
|
|
m.Delete(k)
|
|
}
|
|
|
|
sv := v.(*lastStateValue)
|
|
sv.mu.Lock()
|
|
last := sv.last
|
|
if resetState {
|
|
// Mark the entry as deleted, so it won't be updated anymore by concurrent pushSample() calls.
|
|
sv.deleted = true
|
|
}
|
|
sv.mu.Unlock()
|
|
|
|
key := k.(string)
|
|
ctx.appendSeries(key, "last", currentTimeMsec, last)
|
|
return true
|
|
})
|
|
}
|