Prepare for PR - refactoring

This commit is contained in:
Alexander Marshalov 2023-12-12 16:02:12 +01:00
parent 715bf3af82
commit fb9a9d1463
No known key found for this signature in database
9 changed files with 12 additions and 895 deletions

Binary file not shown.

After

Width:  |  Height:  |  Size: 355 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 806 KiB

View file

@ -696,3 +696,15 @@ so every `vmagent` aggregates data into distinct set of time series. These time
For example, if `vmagent` instances run in Docker or Kubernetes, then you can refer `POD_NAME` or `HOSTNAME` environment variables
as an unique label value per each `vmagent`: `-remoteWrite.label='vmagent=%{HOSTNAME}` . See [these docs](https://docs.victoriametrics.com/#environment-variables)
on how to refer environment variables in VictoriaMetrics components.
## Debugging
It is possible to see the current state of streaming aggregation via `/stream-agg` page
of [vmagent](https://docs.victoriametrics.com/vmagent.html) and [single-node VictoriaMetrics](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html).
On this page you can see the list of active aggregations for each remote write:
<img alt="stream aggregation -> aggregations list" src="stream-aggregation-ui-1.png">
By clicking on the output name you can see the list of time series for this output with theirs current state:

View file

@ -1,131 +0,0 @@
package streamaggr
import (
"sync"
"sync/atomic"
"time"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
)
// countSamplesTotalAggrState calculates output=countSamplesTotal, e.g. the count of input samples.
type countSamplesTotalAggrState struct {
m sync.Map
intervalSecs uint64
stalenessSecs uint64
lastPushTimestamp atomic.Uint64
}
type countSamplesTotalStateValue struct {
mu sync.Mutex
n uint64
deleted bool
deleteDeadline uint64
}
func newCountSamplesTotalAggrState(interval time.Duration, stalenessInterval time.Duration) *countSamplesTotalAggrState {
return &countSamplesTotalAggrState{
intervalSecs: roundDurationToSecs(interval),
stalenessSecs: roundDurationToSecs(stalenessInterval),
}
}
func (as *countSamplesTotalAggrState) pushSample(_, outputKey string, _ float64) {
currentTime := fasttime.UnixTimestamp()
deleteDeadline := currentTime + as.stalenessSecs
again:
v, ok := as.m.Load(outputKey)
if !ok {
// The entry is missing in the map. Try creating it.
v = &countSamplesTotalStateValue{
n: 1,
}
vNew, loaded := as.m.LoadOrStore(outputKey, v)
if !loaded {
// The new entry has been successfully created.
return
}
// Use the entry created by a concurrent goroutine.
v = vNew
}
sv := v.(*countSamplesTotalStateValue)
sv.mu.Lock()
deleted := sv.deleted
if !deleted {
sv.n++
sv.deleteDeadline = deleteDeadline
}
sv.mu.Unlock()
if deleted {
// The entry has been deleted by the concurrent call to appendSeriesForFlush
// Try obtaining and updating the entry again.
goto again
}
}
func (as *countSamplesTotalAggrState) removeOldEntries(currentTime uint64) {
m := &as.m
m.Range(func(k, v interface{}) bool {
sv := v.(*countSamplesTotalStateValue)
sv.mu.Lock()
deleted := currentTime > sv.deleteDeadline
if deleted {
// Mark the current entry as deleted
sv.deleted = deleted
}
sv.mu.Unlock()
if deleted {
m.Delete(k)
}
return true
})
}
func (as *countSamplesTotalAggrState) appendSeriesForFlush(ctx *flushCtx) {
currentTime := fasttime.UnixTimestamp()
currentTimeMsec := int64(currentTime) * 1000
as.removeOldEntries(currentTime)
m := &as.m
m.Range(func(k, v interface{}) bool {
sv := v.(*countSamplesTotalStateValue)
sv.mu.Lock()
n := sv.n
sv.mu.Unlock()
key := k.(string)
ctx.appendSeries(key, as.getOutputName(), currentTimeMsec, float64(n))
return true
})
as.lastPushTimestamp.Store(currentTime)
}
func (as *countSamplesTotalAggrState) getOutputName() string {
return "count_samples_total"
}
func (as *countSamplesTotalAggrState) getStateRepresentation(suffix string) aggrStateRepresentation {
metrics := make([]aggrStateRepresentationMetric, 0)
as.m.Range(func(k, v any) bool {
value := v.(*countSamplesTotalStateValue)
value.mu.Lock()
defer value.mu.Unlock()
if value.deleted {
return true
}
metrics = append(metrics, aggrStateRepresentationMetric{
metric: getLabelsStringFromKey(k.(string), suffix, as.getOutputName()),
currentValue: float64(value.n),
samplesCount: value.n,
})
return true
})
return aggrStateRepresentation{
intervalSecs: as.intervalSecs,
lastPushTimestamp: as.lastPushTimestamp.Load(),
metrics: metrics,
}
}

View file

@ -1,156 +0,0 @@
package streamaggr
import (
"sync"
"sync/atomic"
"time"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
)
// increasePureAggrState calculates output=increase_pure, e.g. the increasePure over input counters.
type increasePureAggrState struct {
m sync.Map
intervalSecs uint64
stalenessSecs uint64
lastPushTimestamp atomic.Uint64
}
type increasePureStateValue struct {
mu sync.Mutex
lastValues map[string]*lastValueState
total float64
samplesCount uint64
deleteDeadline uint64
deleted bool
}
func newIncreasePureAggrState(interval time.Duration, stalenessInterval time.Duration) *increasePureAggrState {
return &increasePureAggrState{
intervalSecs: roundDurationToSecs(interval),
stalenessSecs: roundDurationToSecs(stalenessInterval),
}
}
func (as *increasePureAggrState) pushSample(inputKey, outputKey string, value float64) {
currentTime := fasttime.UnixTimestamp()
deleteDeadline := currentTime + as.stalenessSecs
again:
v, ok := as.m.Load(outputKey)
if !ok {
// The entry is missing in the map. Try creating it.
v = &increasePureStateValue{
lastValues: make(map[string]*lastValueState),
}
vNew, loaded := as.m.LoadOrStore(outputKey, v)
if loaded {
// Use the entry created by a concurrent goroutine.
v = vNew
}
}
sv := v.(*increasePureStateValue)
sv.mu.Lock()
deleted := sv.deleted
if !deleted {
lv, ok := sv.lastValues[inputKey]
if !ok {
lv = &lastValueState{}
sv.lastValues[inputKey] = lv
}
d := value
if ok && lv.value <= value {
d = value - lv.value
}
sv.total += d
lv.value = value
lv.deleteDeadline = deleteDeadline
sv.deleteDeadline = deleteDeadline
sv.samplesCount++
}
sv.mu.Unlock()
if deleted {
// The entry has been deleted by the concurrent call to appendSeriesForFlush
// Try obtaining and updating the entry again.
goto again
}
}
func (as *increasePureAggrState) removeOldEntries(currentTime uint64) {
m := &as.m
m.Range(func(k, v interface{}) bool {
sv := v.(*increasePureStateValue)
sv.mu.Lock()
deleted := currentTime > sv.deleteDeadline
if deleted {
// Mark the current entry as deleted
sv.deleted = deleted
} else {
// Delete outdated entries in sv.lastValues
m := sv.lastValues
for k1, v1 := range m {
if currentTime > v1.deleteDeadline {
delete(m, k1)
}
}
}
sv.mu.Unlock()
if deleted {
m.Delete(k)
}
return true
})
}
func (as *increasePureAggrState) appendSeriesForFlush(ctx *flushCtx) {
currentTime := fasttime.UnixTimestamp()
currentTimeMsec := int64(currentTime) * 1000
as.removeOldEntries(currentTime)
m := &as.m
m.Range(func(k, v interface{}) bool {
sv := v.(*increasePureStateValue)
sv.mu.Lock()
increasePure := sv.total
sv.total = 0
deleted := sv.deleted
sv.mu.Unlock()
if !deleted {
key := k.(string)
ctx.appendSeries(key, as.getOutputName(), currentTimeMsec, increasePure)
}
return true
})
as.lastPushTimestamp.Store(currentTime)
}
func (as *increasePureAggrState) getOutputName() string {
return "increase_pure"
}
func (as *increasePureAggrState) getStateRepresentation(suffix string) aggrStateRepresentation {
metrics := make([]aggrStateRepresentationMetric, 0)
as.m.Range(func(k, v any) bool {
value := v.(*increasePureStateValue)
value.mu.Lock()
defer value.mu.Unlock()
if value.deleted {
return true
}
metrics = append(metrics, aggrStateRepresentationMetric{
metric: getLabelsStringFromKey(k.(string), suffix, as.getOutputName()),
currentValue: value.total,
samplesCount: value.samplesCount,
})
return true
})
return aggrStateRepresentation{
intervalSecs: as.intervalSecs,
lastPushTimestamp: as.lastPushTimestamp.Load(),
metrics: metrics,
}
}

View file

@ -24,14 +24,10 @@ import (
var supportedOutputs = []string{
"total",
"total_pure",
"increase",
"increase_pure",
"count_series",
"count_samples",
"count_samples_total",
"sum_samples",
"sum_samples_total",
"last",
"min",
"max",
@ -354,22 +350,14 @@ func newAggregator(cfg *Config, pushFunc PushFunc, dedupInterval time.Duration)
switch output {
case "total":
aggrStates[i] = newTotalAggrState(interval, stalenessInterval)
case "total_pure":
aggrStates[i] = newTotalPureAggrState(interval, stalenessInterval)
case "increase":
aggrStates[i] = newIncreaseAggrState(interval, stalenessInterval)
case "increase_pure":
aggrStates[i] = newIncreasePureAggrState(interval, stalenessInterval)
case "count_series":
aggrStates[i] = newCountSeriesAggrState(interval)
case "count_samples":
aggrStates[i] = newCountSamplesAggrState(interval)
case "count_samples_total":
aggrStates[i] = newCountSamplesTotalAggrState(interval, stalenessInterval)
case "sum_samples":
aggrStates[i] = newSumSamplesAggrState(interval)
case "sum_samples_total":
aggrStates[i] = newSumSamplesTotalAggrState(interval, stalenessInterval)
case "last":
aggrStates[i] = newLastAggrState(interval)
case "min":

View file

@ -2,7 +2,6 @@ package streamaggr
import (
"fmt"
"reflect"
"sort"
"strconv"
"strings"
@ -815,301 +814,3 @@ func mustParsePromMetrics(s string) []prompbmarshal.TimeSeries {
}
return tss
}
func TestTotalOutput(t *testing.T) {
saConfig := `
- interval: 100ms
outputs:
- total
- increase
- newtotal
- newincrease
`
type sample struct {
metric string
value float64
timestamp int64
}
f := func(config string, inputMetrics, outputMetricsExpected []sample, matchIdxsStrExpected string) {
t.Helper()
// Initialize Aggregators
var tssOutput []prompbmarshal.TimeSeries
var tssOutputLock sync.Mutex
pushFunc := func(tss []prompbmarshal.TimeSeries) {
tssOutputLock.Lock()
for _, ts := range tss {
labelsCopy := append([]prompbmarshal.Label{}, ts.Labels...)
samplesCopy := append([]prompbmarshal.Sample{}, ts.Samples...)
tssOutput = append(tssOutput, prompbmarshal.TimeSeries{
Labels: labelsCopy,
Samples: samplesCopy,
})
}
tssOutputLock.Unlock()
}
a, err := NewAggregatorsFromData([]byte(config), pushFunc, 0)
if err != nil {
t.Fatalf("cannot initialize aggregators: %s", err)
}
// Push the inputMetrics to Aggregators
matchIdxs := make([]byte, 0)
var prevTs int64 = 0
for _, m := range inputMetrics {
if (m.timestamp - prevTs) > 0 {
<-time.After(time.Duration(m.timestamp-prevTs) * time.Microsecond)
}
inputMetricsStr := fmt.Sprintf("%s %v\n", m.metric, m.value)
tssInput := mustParsePromMetrics(inputMetricsStr)
matchIdxs = append(matchIdxs, a.Push(tssInput, nil)...)
prevTs = m.timestamp
}
a.MustStop()
//// Verify matchIdxs equals to matchIdxsExpected
matchIdxsStr := ""
for _, v := range matchIdxs {
matchIdxsStr += strconv.Itoa(int(v))
}
if matchIdxsStr != matchIdxsStrExpected {
t.Fatalf("unexpected matchIdxs;\ngot\n%s\nwant\n%s", matchIdxsStr, matchIdxsStrExpected)
}
// Verify the tssOutput contains the expected metrics
outputMetrics := make([]sample, len(tssOutput))
for i, ts := range tssOutput {
outputMetrics[i] = sample{
metric: promrelabel.LabelsToString(ts.Labels),
value: ts.Samples[0].Value,
}
}
if !reflect.DeepEqual(outputMetrics, outputMetricsExpected) {
t.Fatalf("unexpected output metrics;\ngot\n%+v\nwant\n%+v", outputMetrics, outputMetricsExpected)
}
}
f(
saConfig,
[]sample{
{metric: "foo", value: 100, timestamp: 0},
},
[]sample{
{metric: "foo:100ms_total", value: 0},
{metric: "foo:100ms_increase", value: 0},
{metric: "foo:100ms_newtotal", value: 0},
{metric: "foo:100ms_newincrease", value: 0},
},
"1",
)
f(
saConfig,
[]sample{
{metric: "foo", value: 100, timestamp: 0},
{metric: "foo", value: 100, timestamp: 1},
},
[]sample{
{metric: "foo:100ms_total", value: 0},
{metric: "foo:100ms_increase", value: 0},
{metric: "foo:100ms_newtotal", value: 0}, // right: 0
{metric: "foo:100ms_newincrease", value: 0}, // right: 0
},
"11",
)
f(
saConfig,
[]sample{
{metric: "foo", value: 100, timestamp: 0},
{metric: "foo", value: 90, timestamp: 1},
},
[]sample{
{metric: "foo:100ms_total", value: 90},
{metric: "foo:100ms_increase", value: 90},
{metric: "foo:100ms_newtotal", value: 0}, // right: 0
{metric: "foo:100ms_newincrease", value: 0}, // right: 0
},
"11",
)
f(
saConfig,
[]sample{
{metric: "foo", value: 100, timestamp: 0},
{metric: "foo", value: 87, timestamp: 1},
},
[]sample{
{metric: "foo:100ms_total", value: 87},
{metric: "foo:100ms_increase", value: 87},
{metric: "foo:100ms_newtotal", value: 187}, // right: 187
{metric: "foo:100ms_newincrease", value: 187}, // right: 187
},
"11",
)
f(
saConfig,
[]sample{
{metric: "foo", value: 100, timestamp: 0},
{metric: "foo", value: 187, timestamp: 1},
},
[]sample{
{metric: "foo:100ms_total", value: 87},
{metric: "foo:100ms_increase", value: 87},
{metric: "foo:100ms_newtotal", value: 187}, // right: 187
{metric: "foo:100ms_newincrease", value: 187}, // right: 187
},
"11",
)
f(
saConfig,
[]sample{
{metric: "foo", value: 100, timestamp: 0},
{metric: "foo", value: 13, timestamp: 1},
},
[]sample{
{metric: "foo:100ms_total", value: 13},
{metric: "foo:100ms_increase", value: 13},
{metric: "foo:100ms_newtotal", value: 113}, // right: 113
{metric: "foo:100ms_newincrease", value: 113}, // right: 113
},
"11",
)
f(
saConfig,
[]sample{
{metric: "foo", value: 100, timestamp: 0},
{metric: "foo", value: 9, timestamp: 1},
},
[]sample{
{metric: "foo:100ms_total", value: 9},
{metric: "foo:100ms_increase", value: 9},
{metric: "foo:100ms_newtotal", value: 9}, // right: 9
{metric: "foo:100ms_newincrease", value: 9}, // right: 9
},
"11",
)
f(
saConfig,
[]sample{
{metric: "foo", value: 100, timestamp: 0},
{metric: "foo", value: 1, timestamp: 1},
},
[]sample{
{metric: "foo:100ms_total", value: 1},
{metric: "foo:100ms_increase", value: 1},
{metric: "foo:100ms_newtotal", value: 1}, // right: 1
{metric: "foo:100ms_newincrease", value: 1}, // right: 1
},
"11",
)
f(
saConfig,
[]sample{
{metric: "foo", value: 100, timestamp: 0},
{metric: "foo", value: 0, timestamp: 1},
},
[]sample{
{metric: "foo:100ms_total", value: 0},
{metric: "foo:100ms_increase", value: 0},
{metric: "foo:100ms_newtotal", value: 0}, // right: 0
{metric: "foo:100ms_newincrease", value: 0}, // right: 0
},
"11",
)
f(
saConfig,
[]sample{
{metric: "foo", value: 100, timestamp: 0},
{metric: "foo", value: 87, timestamp: 1},
{metric: "foo", value: 200, timestamp: 2},
{metric: "foo", value: 287, timestamp: 3},
},
[]sample{
{metric: "foo:100ms_total", value: 287},
{metric: "foo:100ms_increase", value: 287},
{metric: "foo:100ms_newtotal", value: 387}, // right: 387
{metric: "foo:100ms_newincrease", value: 387}, // right: 387
},
"1111",
)
f(
saConfig,
[]sample{
{metric: "foo", value: 100, timestamp: 0},
{metric: "foo", value: 187, timestamp: 1},
{metric: "foo", value: 200, timestamp: 2},
{metric: "foo", value: 287, timestamp: 3},
},
[]sample{
{metric: "foo:100ms_total", value: 187},
{metric: "foo:100ms_increase", value: 187},
{metric: "foo:100ms_newtotal", value: 287}, // right: 287
{metric: "foo:100ms_newincrease", value: 287}, // right: 287
},
"1111",
)
f(
saConfig,
[]sample{
{metric: "foo", value: 100, timestamp: 0},
{metric: "foo", value: 87, timestamp: 1},
{metric: "foo", value: 200, timestamp: 2},
{metric: "foo", value: 87, timestamp: 3},
},
[]sample{
{metric: "foo:100ms_total", value: 287},
{metric: "foo:100ms_increase", value: 287},
{metric: "foo:100ms_newtotal", value: 387}, // right: 387
{metric: "foo:100ms_newincrease", value: 387}, // right: 387
},
"1111",
)
f(
saConfig,
[]sample{
{metric: "foo", value: 100, timestamp: 0},
{metric: "foo", value: 87, timestamp: 1},
{metric: "foo", value: 200, timestamp: 2},
{metric: "foo", value: 187, timestamp: 3},
},
[]sample{
{metric: "foo:100ms_total", value: 387},
{metric: "foo:100ms_increase", value: 387},
{metric: "foo:100ms_newtotal", value: 300}, // right: 300
{metric: "foo:100ms_newincrease", value: 300}, // right: 300
},
"1111",
)
f(
saConfig,
[]sample{
{metric: "foo", value: 100, timestamp: 0},
{metric: "foo", value: 87, timestamp: 1},
{metric: "foo", value: 200, timestamp: 2},
{metric: "foo", value: 177, timestamp: 3},
},
[]sample{
{metric: "foo:100ms_total", value: 377},
{metric: "foo:100ms_increase", value: 377},
{metric: "foo:100ms_newtotal", value: 300}, // right: 300
{metric: "foo:100ms_newincrease", value: 300}, // right: 300
},
"1111",
)
}

View file

@ -1,138 +0,0 @@
package streamaggr
import (
"math"
"sync"
"sync/atomic"
"time"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
)
// sumSamplesTotalAggrState calculates output=sum_samples, e.g. the sum over input samples.
type sumSamplesTotalAggrState struct {
m sync.Map
intervalSecs uint64
stalenessSecs uint64
lastPushTimestamp atomic.Uint64
}
type sumSamplesTotalStateValue struct {
mu sync.Mutex
sum float64
samplesCount uint64
deleted bool
deleteDeadline uint64
}
func newSumSamplesTotalAggrState(interval time.Duration, stalenessInterval time.Duration) *sumSamplesTotalAggrState {
return &sumSamplesTotalAggrState{
intervalSecs: roundDurationToSecs(interval),
stalenessSecs: roundDurationToSecs(stalenessInterval),
}
}
func (as *sumSamplesTotalAggrState) pushSample(_, outputKey string, value float64) {
currentTime := fasttime.UnixTimestamp()
deleteDeadline := currentTime + as.stalenessSecs
again:
v, ok := as.m.Load(outputKey)
if !ok {
// The entry is missing in the map. Try creating it.
v = &sumSamplesTotalStateValue{
sum: value,
}
vNew, loaded := as.m.LoadOrStore(outputKey, v)
if !loaded {
// The new entry has been successfully created.
return
}
// Use the entry created by a concurrent goroutine.
v = vNew
}
sv := v.(*sumSamplesTotalStateValue)
sv.mu.Lock()
deleted := sv.deleted
if !deleted {
sv.sum += value
sv.samplesCount++
sv.deleteDeadline = deleteDeadline
}
sv.mu.Unlock()
if deleted {
// The entry has been deleted by the concurrent call to appendSeriesForFlush
// Try obtaining and updating the entry again.
goto again
}
}
func (as *sumSamplesTotalAggrState) removeOldEntries(currentTime uint64) {
m := &as.m
m.Range(func(k, v interface{}) bool {
sv := v.(*sumSamplesTotalStateValue)
sv.mu.Lock()
deleted := currentTime > sv.deleteDeadline
if deleted {
// Mark the current entry as deleted
sv.deleted = deleted
}
sv.mu.Unlock()
if deleted {
m.Delete(k)
}
return true
})
}
func (as *sumSamplesTotalAggrState) appendSeriesForFlush(ctx *flushCtx) {
currentTime := fasttime.UnixTimestamp()
currentTimeMsec := int64(currentTime) * 1000
as.removeOldEntries(currentTime)
m := &as.m
m.Range(func(k, v interface{}) bool {
sv := v.(*sumSamplesTotalStateValue)
sv.mu.Lock()
sum := sv.sum
if math.Abs(sv.sum) >= (1 << 53) {
// It is time to reset the entry, since it starts losing float64 precision
sv.sum = 0
}
sv.mu.Unlock()
key := k.(string)
ctx.appendSeries(key, as.getOutputName(), currentTimeMsec, sum)
return true
})
as.lastPushTimestamp.Store(currentTime)
}
func (as *sumSamplesTotalAggrState) getOutputName() string {
return "sum_samples_total"
}
func (as *sumSamplesTotalAggrState) getStateRepresentation(suffix string) aggrStateRepresentation {
metrics := make([]aggrStateRepresentationMetric, 0)
as.m.Range(func(k, v any) bool {
value := v.(*sumSamplesTotalStateValue)
value.mu.Lock()
defer value.mu.Unlock()
if value.deleted {
return true
}
metrics = append(metrics, aggrStateRepresentationMetric{
metric: getLabelsStringFromKey(k.(string), suffix, as.getOutputName()),
currentValue: value.sum,
samplesCount: value.samplesCount,
})
return true
})
return aggrStateRepresentation{
intervalSecs: as.intervalSecs,
lastPushTimestamp: as.lastPushTimestamp.Load(),
metrics: metrics,
}
}

View file

@ -1,159 +0,0 @@
package streamaggr
import (
"math"
"sync"
"sync/atomic"
"time"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
)
// totalPureAggrState calculates output=total_pure, e.g. the summary counter over input counters.
type totalPureAggrState struct {
m sync.Map
intervalSecs uint64
stalenessSecs uint64
lastPushTimestamp atomic.Uint64
}
type totalPureStateValue struct {
mu sync.Mutex
lastValues map[string]*lastValueState
total float64
samplesCount uint64
deleteDeadline uint64
deleted bool
}
func newTotalPureAggrState(interval time.Duration, stalenessInterval time.Duration) *totalPureAggrState {
return &totalPureAggrState{
intervalSecs: roundDurationToSecs(interval),
stalenessSecs: roundDurationToSecs(stalenessInterval),
}
}
func (as *totalPureAggrState) pushSample(inputKey, outputKey string, value float64) {
currentTime := fasttime.UnixTimestamp()
deleteDeadline := currentTime + as.stalenessSecs
again:
v, ok := as.m.Load(outputKey)
if !ok {
// The entry is missing in the map. Try creating it.
v = &totalPureStateValue{
lastValues: make(map[string]*lastValueState),
}
vNew, loaded := as.m.LoadOrStore(outputKey, v)
if loaded {
// Use the entry created by a concurrent goroutine.
v = vNew
}
}
sv := v.(*totalPureStateValue)
sv.mu.Lock()
deleted := sv.deleted
if !deleted {
lv, ok := sv.lastValues[inputKey]
if !ok {
lv = &lastValueState{}
sv.lastValues[inputKey] = lv
}
d := value
if ok && lv.value <= value {
d = value - lv.value
}
sv.total += d
lv.value = value
lv.deleteDeadline = deleteDeadline
sv.deleteDeadline = deleteDeadline
sv.samplesCount++
}
sv.mu.Unlock()
if deleted {
// The entry has been deleted by the concurrent call to appendSeriesForFlush
// Try obtaining and updating the entry again.
goto again
}
}
func (as *totalPureAggrState) removeOldEntries(currentTime uint64) {
m := &as.m
m.Range(func(k, v interface{}) bool {
sv := v.(*totalPureStateValue)
sv.mu.Lock()
deleted := currentTime > sv.deleteDeadline
if deleted {
// Mark the current entry as deleted
sv.deleted = deleted
} else {
// Delete outdated entries in sv.lastValues
m := sv.lastValues
for k1, v1 := range m {
if currentTime > v1.deleteDeadline {
delete(m, k1)
}
}
}
sv.mu.Unlock()
if deleted {
m.Delete(k)
}
return true
})
}
func (as *totalPureAggrState) appendSeriesForFlush(ctx *flushCtx) {
currentTime := fasttime.UnixTimestamp()
currentTimeMsec := int64(currentTime) * 1000
as.removeOldEntries(currentTime)
m := &as.m
m.Range(func(k, v interface{}) bool {
sv := v.(*totalPureStateValue)
sv.mu.Lock()
totalPure := sv.total
if math.Abs(sv.total) >= (1 << 53) {
// It is time to reset the entry, since it starts losing float64 precision
sv.total = 0
}
deleted := sv.deleted
sv.mu.Unlock()
if !deleted {
key := k.(string)
ctx.appendSeries(key, as.getOutputName(), currentTimeMsec, totalPure)
}
return true
})
as.lastPushTimestamp.Store(currentTime)
}
func (as *totalPureAggrState) getOutputName() string {
return "total_pure"
}
func (as *totalPureAggrState) getStateRepresentation(suffix string) aggrStateRepresentation {
metrics := make([]aggrStateRepresentationMetric, 0)
as.m.Range(func(k, v any) bool {
value := v.(*totalPureStateValue)
value.mu.Lock()
defer value.mu.Unlock()
if value.deleted {
return true
}
metrics = append(metrics, aggrStateRepresentationMetric{
metric: getLabelsStringFromKey(k.(string), suffix, as.getOutputName()),
currentValue: value.total,
samplesCount: value.samplesCount,
})
return true
})
return aggrStateRepresentation{
intervalSecs: as.intervalSecs,
lastPushTimestamp: as.lastPushTimestamp.Load(),
metrics: metrics,
}
}