2024-03-04 22:45:22 +00:00
|
|
|
package streamaggr
|
|
|
|
|
|
|
|
import (
|
2024-07-01 12:56:17 +00:00
|
|
|
"fmt"
|
2024-03-05 00:13:21 +00:00
|
|
|
"slices"
|
2024-03-04 22:45:22 +00:00
|
|
|
"sync"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
2024-03-05 00:13:21 +00:00
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
2024-03-04 22:45:22 +00:00
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
|
2024-09-24 20:03:04 +00:00
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/slicesutil"
|
2024-03-04 22:45:22 +00:00
|
|
|
"github.com/VictoriaMetrics/metrics"
|
|
|
|
)
|
|
|
|
|
|
|
|
// Deduplicator deduplicates samples per each time series.
|
|
|
|
type Deduplicator struct {
|
|
|
|
da *dedupAggr
|
|
|
|
|
2024-09-24 20:03:04 +00:00
|
|
|
stateSize int
|
2024-07-03 10:42:45 +00:00
|
|
|
dropLabels []string
|
|
|
|
dedupInterval int64
|
2024-03-05 00:13:21 +00:00
|
|
|
|
2024-03-04 22:45:22 +00:00
|
|
|
wg sync.WaitGroup
|
|
|
|
stopCh chan struct{}
|
|
|
|
|
|
|
|
ms *metrics.Set
|
2024-03-05 00:13:21 +00:00
|
|
|
|
|
|
|
dedupFlushDuration *metrics.Histogram
|
|
|
|
dedupFlushTimeouts *metrics.Counter
|
2024-03-04 22:45:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// NewDeduplicator returns new deduplicator, which deduplicates samples per each time series.
|
|
|
|
//
|
|
|
|
// The de-duplicated samples are passed to pushFunc once per dedupInterval.
|
|
|
|
//
|
2024-03-05 00:13:21 +00:00
|
|
|
// An optional dropLabels list may contain label names, which must be dropped before de-duplicating samples.
|
|
|
|
// Common case is to drop `replica`-like labels from samples received from HA datasources.
|
|
|
|
//
|
app/vmagent/remotewrite: follow-up for f153f54d11250da050aa93bc4fa9b7ba9e144691
- Move the remaining code responsible for stream aggregation initialization from remotewrite.go to streamaggr.go .
This improves code maintainability a bit.
- Properly shut down streamaggr.Aggregators initialized inside remotewrite.CheckStreamAggrConfigs().
This prevents from potential resource leaks.
- Use separate functions for initializing and reloading of global stream aggregation and per-remoteWrite.url stream aggregation.
This makes the code easier to read and maintain. This also fixes INFO and ERROR logs emitted by these functions.
- Add an ability to specify `name` option in every stream aggregation config. This option is used as `name` label
in metrics exposed by stream aggregation at /metrics page. This simplifies investigation of the exposed metrics.
- Add `path` label additionally to `name`, `url` and `position` labels at metrics exposed by streaming aggregation.
This label should simplify investigation of the exposed metrics.
- Remove `match` and `group` labels from metrics exposed by streaming aggregation, since they have little practical applicability:
it is hard to use these labels in query filters and aggregation functions.
- Rename the metric `vm_streamaggr_flushed_samples_total` to less misleading `vm_streamaggr_output_samples_total` .
This metric shows the number of samples generated by the corresponding streaming aggregation rule.
This metric has been added in the commit 861852f2624895e01f93ce196607c72616ce2a94 .
See https://github.com/VictoriaMetrics/VictoriaMetrics/pull/6462
- Remove the metric `vm_streamaggr_stale_samples_total`, since it is unclear how it can be used in practice.
This metric has been added in the commit 861852f2624895e01f93ce196607c72616ce2a94 .
See https://github.com/VictoriaMetrics/VictoriaMetrics/pull/6462
- Remove Alias and aggrID fields from streamaggr.Options struct, since these fields aren't related to optional params,
which could modify the behaviour of the constructed streaming aggregator.
Convert the Alias field to regular argument passed to LoadFromFile() function, since this argument is mandatory.
- Pass Options arg to LoadFromFile() function by reference, since this structure is quite big.
This also allows passing nil instead of Options when default options are enough.
- Add `name`, `path`, `url` and `position` labels to `vm_streamaggr_dedup_state_size_bytes` and `vm_streamaggr_dedup_state_items_count` metrics,
so they have consistent set of labels comparing to the rest of streaming aggregation metrics.
- Convert aggregator.aggrStates field type from `map[string]aggrState` to `[]aggrOutput`, where `aggrOutput` contains the corresponding
`aggrState` plus all the related metrics (currently only `vm_streamaggr_output_samples_total` metric is exposed with the corresponding
`output` label per each configured output function). This simplifies and speeds up the code responsible for updating per-output
metrics. This is a follow-up for the commit 2eb1bc4f814037ae87ac6556011ae0d3caee6bc8 .
See https://github.com/VictoriaMetrics/VictoriaMetrics/pull/6604
- Added missing urls to docs ( https://docs.victoriametrics.com/stream-aggregation/ ) in error messages. These urls help users
figuring out why VictoriaMetrics or vmagent generates the corresponding error messages. The urls were removed for unknown reason
in the commit 2eb1bc4f814037ae87ac6556011ae0d3caee6bc8 .
- Fix incorrect update for `vm_streamaggr_output_samples_total` metric in flushCtx.appendSeriesWithExtraLabel() function.
While at it, reduce memory usage by limiting the maximum number of samples per flush to 10K.
Updates https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5467
Updates https://github.com/VictoriaMetrics/VictoriaMetrics/pull/6268
2024-07-15 16:01:37 +00:00
|
|
|
// alias is url label used in metrics exposed by the returned Deduplicator.
|
|
|
|
//
|
2024-03-04 22:45:22 +00:00
|
|
|
// MustStop must be called on the returned deduplicator in order to free up occupied resources.
|
2024-09-24 20:03:04 +00:00
|
|
|
func NewDeduplicator(pushFunc PushFunc, stateSize int, dedupInterval time.Duration, dropLabels []string, alias string) *Deduplicator {
|
2024-03-04 22:45:22 +00:00
|
|
|
d := &Deduplicator{
|
2024-09-24 20:03:04 +00:00
|
|
|
da: newDedupAggr(stateSize),
|
2024-07-03 10:42:45 +00:00
|
|
|
dropLabels: dropLabels,
|
|
|
|
dedupInterval: dedupInterval.Milliseconds(),
|
2024-09-24 20:03:04 +00:00
|
|
|
stateSize: stateSize,
|
2024-03-05 00:13:21 +00:00
|
|
|
|
2024-03-04 22:45:22 +00:00
|
|
|
stopCh: make(chan struct{}),
|
|
|
|
ms: metrics.NewSet(),
|
|
|
|
}
|
|
|
|
|
|
|
|
ms := d.ms
|
2024-07-01 12:56:17 +00:00
|
|
|
|
app/vmagent/remotewrite: follow-up for f153f54d11250da050aa93bc4fa9b7ba9e144691
- Move the remaining code responsible for stream aggregation initialization from remotewrite.go to streamaggr.go .
This improves code maintainability a bit.
- Properly shut down streamaggr.Aggregators initialized inside remotewrite.CheckStreamAggrConfigs().
This prevents from potential resource leaks.
- Use separate functions for initializing and reloading of global stream aggregation and per-remoteWrite.url stream aggregation.
This makes the code easier to read and maintain. This also fixes INFO and ERROR logs emitted by these functions.
- Add an ability to specify `name` option in every stream aggregation config. This option is used as `name` label
in metrics exposed by stream aggregation at /metrics page. This simplifies investigation of the exposed metrics.
- Add `path` label additionally to `name`, `url` and `position` labels at metrics exposed by streaming aggregation.
This label should simplify investigation of the exposed metrics.
- Remove `match` and `group` labels from metrics exposed by streaming aggregation, since they have little practical applicability:
it is hard to use these labels in query filters and aggregation functions.
- Rename the metric `vm_streamaggr_flushed_samples_total` to less misleading `vm_streamaggr_output_samples_total` .
This metric shows the number of samples generated by the corresponding streaming aggregation rule.
This metric has been added in the commit 861852f2624895e01f93ce196607c72616ce2a94 .
See https://github.com/VictoriaMetrics/VictoriaMetrics/pull/6462
- Remove the metric `vm_streamaggr_stale_samples_total`, since it is unclear how it can be used in practice.
This metric has been added in the commit 861852f2624895e01f93ce196607c72616ce2a94 .
See https://github.com/VictoriaMetrics/VictoriaMetrics/pull/6462
- Remove Alias and aggrID fields from streamaggr.Options struct, since these fields aren't related to optional params,
which could modify the behaviour of the constructed streaming aggregator.
Convert the Alias field to regular argument passed to LoadFromFile() function, since this argument is mandatory.
- Pass Options arg to LoadFromFile() function by reference, since this structure is quite big.
This also allows passing nil instead of Options when default options are enough.
- Add `name`, `path`, `url` and `position` labels to `vm_streamaggr_dedup_state_size_bytes` and `vm_streamaggr_dedup_state_items_count` metrics,
so they have consistent set of labels comparing to the rest of streaming aggregation metrics.
- Convert aggregator.aggrStates field type from `map[string]aggrState` to `[]aggrOutput`, where `aggrOutput` contains the corresponding
`aggrState` plus all the related metrics (currently only `vm_streamaggr_output_samples_total` metric is exposed with the corresponding
`output` label per each configured output function). This simplifies and speeds up the code responsible for updating per-output
metrics. This is a follow-up for the commit 2eb1bc4f814037ae87ac6556011ae0d3caee6bc8 .
See https://github.com/VictoriaMetrics/VictoriaMetrics/pull/6604
- Added missing urls to docs ( https://docs.victoriametrics.com/stream-aggregation/ ) in error messages. These urls help users
figuring out why VictoriaMetrics or vmagent generates the corresponding error messages. The urls were removed for unknown reason
in the commit 2eb1bc4f814037ae87ac6556011ae0d3caee6bc8 .
- Fix incorrect update for `vm_streamaggr_output_samples_total` metric in flushCtx.appendSeriesWithExtraLabel() function.
While at it, reduce memory usage by limiting the maximum number of samples per flush to 10K.
Updates https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5467
Updates https://github.com/VictoriaMetrics/VictoriaMetrics/pull/6268
2024-07-15 16:01:37 +00:00
|
|
|
metricLabels := fmt.Sprintf(`name="dedup",url=%q`, alias)
|
|
|
|
|
2024-07-01 12:56:17 +00:00
|
|
|
_ = ms.NewGauge(fmt.Sprintf(`vm_streamaggr_dedup_state_size_bytes{%s}`, metricLabels), func() float64 {
|
2024-03-04 22:45:22 +00:00
|
|
|
return float64(d.da.sizeBytes())
|
|
|
|
})
|
2024-07-01 12:56:17 +00:00
|
|
|
_ = ms.NewGauge(fmt.Sprintf(`vm_streamaggr_dedup_state_items_count{%s}`, metricLabels), func() float64 {
|
2024-03-04 22:45:22 +00:00
|
|
|
return float64(d.da.itemsCount())
|
|
|
|
})
|
|
|
|
|
app/vmagent/remotewrite: follow-up for f153f54d11250da050aa93bc4fa9b7ba9e144691
- Move the remaining code responsible for stream aggregation initialization from remotewrite.go to streamaggr.go .
This improves code maintainability a bit.
- Properly shut down streamaggr.Aggregators initialized inside remotewrite.CheckStreamAggrConfigs().
This prevents from potential resource leaks.
- Use separate functions for initializing and reloading of global stream aggregation and per-remoteWrite.url stream aggregation.
This makes the code easier to read and maintain. This also fixes INFO and ERROR logs emitted by these functions.
- Add an ability to specify `name` option in every stream aggregation config. This option is used as `name` label
in metrics exposed by stream aggregation at /metrics page. This simplifies investigation of the exposed metrics.
- Add `path` label additionally to `name`, `url` and `position` labels at metrics exposed by streaming aggregation.
This label should simplify investigation of the exposed metrics.
- Remove `match` and `group` labels from metrics exposed by streaming aggregation, since they have little practical applicability:
it is hard to use these labels in query filters and aggregation functions.
- Rename the metric `vm_streamaggr_flushed_samples_total` to less misleading `vm_streamaggr_output_samples_total` .
This metric shows the number of samples generated by the corresponding streaming aggregation rule.
This metric has been added in the commit 861852f2624895e01f93ce196607c72616ce2a94 .
See https://github.com/VictoriaMetrics/VictoriaMetrics/pull/6462
- Remove the metric `vm_streamaggr_stale_samples_total`, since it is unclear how it can be used in practice.
This metric has been added in the commit 861852f2624895e01f93ce196607c72616ce2a94 .
See https://github.com/VictoriaMetrics/VictoriaMetrics/pull/6462
- Remove Alias and aggrID fields from streamaggr.Options struct, since these fields aren't related to optional params,
which could modify the behaviour of the constructed streaming aggregator.
Convert the Alias field to regular argument passed to LoadFromFile() function, since this argument is mandatory.
- Pass Options arg to LoadFromFile() function by reference, since this structure is quite big.
This also allows passing nil instead of Options when default options are enough.
- Add `name`, `path`, `url` and `position` labels to `vm_streamaggr_dedup_state_size_bytes` and `vm_streamaggr_dedup_state_items_count` metrics,
so they have consistent set of labels comparing to the rest of streaming aggregation metrics.
- Convert aggregator.aggrStates field type from `map[string]aggrState` to `[]aggrOutput`, where `aggrOutput` contains the corresponding
`aggrState` plus all the related metrics (currently only `vm_streamaggr_output_samples_total` metric is exposed with the corresponding
`output` label per each configured output function). This simplifies and speeds up the code responsible for updating per-output
metrics. This is a follow-up for the commit 2eb1bc4f814037ae87ac6556011ae0d3caee6bc8 .
See https://github.com/VictoriaMetrics/VictoriaMetrics/pull/6604
- Added missing urls to docs ( https://docs.victoriametrics.com/stream-aggregation/ ) in error messages. These urls help users
figuring out why VictoriaMetrics or vmagent generates the corresponding error messages. The urls were removed for unknown reason
in the commit 2eb1bc4f814037ae87ac6556011ae0d3caee6bc8 .
- Fix incorrect update for `vm_streamaggr_output_samples_total` metric in flushCtx.appendSeriesWithExtraLabel() function.
While at it, reduce memory usage by limiting the maximum number of samples per flush to 10K.
Updates https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5467
Updates https://github.com/VictoriaMetrics/VictoriaMetrics/pull/6268
2024-07-15 16:01:37 +00:00
|
|
|
d.dedupFlushDuration = ms.NewHistogram(fmt.Sprintf(`vm_streamaggr_dedup_flush_duration_seconds{%s}`, metricLabels))
|
|
|
|
d.dedupFlushTimeouts = ms.NewCounter(fmt.Sprintf(`vm_streamaggr_dedup_flush_timeouts_total{%s}`, metricLabels))
|
2024-03-05 00:13:21 +00:00
|
|
|
|
2024-03-04 22:45:22 +00:00
|
|
|
metrics.RegisterSet(ms)
|
|
|
|
|
|
|
|
d.wg.Add(1)
|
|
|
|
go func() {
|
|
|
|
defer d.wg.Done()
|
|
|
|
d.runFlusher(pushFunc, dedupInterval)
|
|
|
|
}()
|
|
|
|
|
|
|
|
return d
|
|
|
|
}
|
|
|
|
|
|
|
|
// MustStop stops d.
|
|
|
|
func (d *Deduplicator) MustStop() {
|
2024-07-15 08:39:05 +00:00
|
|
|
metrics.UnregisterSet(d.ms, true)
|
2024-03-04 22:45:22 +00:00
|
|
|
d.ms = nil
|
|
|
|
|
|
|
|
close(d.stopCh)
|
|
|
|
d.wg.Wait()
|
|
|
|
}
|
|
|
|
|
|
|
|
// Push pushes tss to d.
|
|
|
|
func (d *Deduplicator) Push(tss []prompbmarshal.TimeSeries) {
|
2024-09-24 20:03:04 +00:00
|
|
|
ctx := getDeduplicatorPushCtx(d.stateSize)
|
2024-03-04 22:45:22 +00:00
|
|
|
pss := ctx.pss
|
2024-03-05 00:13:21 +00:00
|
|
|
labels := &ctx.labels
|
2024-03-04 22:45:22 +00:00
|
|
|
buf := ctx.buf
|
|
|
|
|
2024-03-05 00:13:21 +00:00
|
|
|
dropLabels := d.dropLabels
|
2024-09-24 20:03:04 +00:00
|
|
|
aggrIntervals := int64(d.stateSize)
|
2024-03-04 22:45:22 +00:00
|
|
|
for _, ts := range tss {
|
2024-03-05 00:13:21 +00:00
|
|
|
if len(dropLabels) > 0 {
|
|
|
|
labels.Labels = dropSeriesLabels(labels.Labels[:0], ts.Labels, dropLabels)
|
|
|
|
} else {
|
|
|
|
labels.Labels = append(labels.Labels[:0], ts.Labels...)
|
|
|
|
}
|
|
|
|
if len(labels.Labels) == 0 {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
labels.Sort()
|
|
|
|
|
2024-06-07 14:24:09 +00:00
|
|
|
bufLen := len(buf)
|
|
|
|
buf = lc.Compress(buf, labels.Labels)
|
|
|
|
key := bytesutil.ToUnsafeString(buf[bufLen:])
|
2024-03-04 22:45:22 +00:00
|
|
|
for _, s := range ts.Samples {
|
2024-07-03 10:42:45 +00:00
|
|
|
flushIntervals := s.Timestamp/d.dedupInterval + 1
|
|
|
|
idx := int(flushIntervals % aggrIntervals)
|
|
|
|
pss[idx] = append(pss[idx], pushSample{
|
2024-03-12 21:47:29 +00:00
|
|
|
key: key,
|
|
|
|
value: s.Value,
|
|
|
|
timestamp: s.Timestamp,
|
2024-03-04 22:45:22 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-09-24 20:03:04 +00:00
|
|
|
data := &pushCtxData{}
|
2024-07-03 10:42:45 +00:00
|
|
|
for idx, ps := range pss {
|
2024-09-24 20:03:04 +00:00
|
|
|
data.idx = idx
|
|
|
|
data.samples = ps
|
|
|
|
d.da.pushSamples(data)
|
2024-07-03 10:42:45 +00:00
|
|
|
}
|
2024-03-04 22:45:22 +00:00
|
|
|
|
|
|
|
ctx.pss = pss
|
|
|
|
ctx.buf = buf
|
|
|
|
putDeduplicatorPushCtx(ctx)
|
|
|
|
}
|
|
|
|
|
2024-03-05 00:13:21 +00:00
|
|
|
func dropSeriesLabels(dst, src []prompbmarshal.Label, labelNames []string) []prompbmarshal.Label {
|
|
|
|
for _, label := range src {
|
|
|
|
if !slices.Contains(labelNames, label.Name) {
|
|
|
|
dst = append(dst, label)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return dst
|
|
|
|
}
|
|
|
|
|
2024-03-04 22:45:22 +00:00
|
|
|
func (d *Deduplicator) runFlusher(pushFunc PushFunc, dedupInterval time.Duration) {
|
|
|
|
t := time.NewTicker(dedupInterval)
|
|
|
|
defer t.Stop()
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-d.stopCh:
|
|
|
|
return
|
2024-07-03 10:42:45 +00:00
|
|
|
case t := <-t.C:
|
|
|
|
flushTime := t.Truncate(dedupInterval).Add(dedupInterval)
|
|
|
|
flushTimestamp := flushTime.UnixMilli()
|
|
|
|
flushIntervals := int(flushTimestamp / int64(dedupInterval/time.Millisecond))
|
2024-09-24 20:03:04 +00:00
|
|
|
flushIdx := flushIntervals % d.stateSize
|
|
|
|
d.flush(pushFunc, dedupInterval, flushTimestamp, flushIdx)
|
2024-03-04 22:45:22 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-09-24 20:03:04 +00:00
|
|
|
func (d *Deduplicator) flush(pushFunc PushFunc, dedupInterval time.Duration, flushTimestamp int64, idx int) {
|
|
|
|
startTime := time.Now()
|
|
|
|
d.da.flush(func(data *pushCtxData) {
|
2024-03-04 22:45:22 +00:00
|
|
|
ctx := getDeduplicatorFlushCtx()
|
|
|
|
|
|
|
|
tss := ctx.tss
|
|
|
|
labels := ctx.labels
|
|
|
|
samples := ctx.samples
|
2024-09-24 20:03:04 +00:00
|
|
|
for _, ps := range data.samples {
|
2024-03-04 22:45:22 +00:00
|
|
|
labelsLen := len(labels)
|
2024-05-08 11:10:53 +00:00
|
|
|
labels = decompressLabels(labels, ps.key)
|
2024-03-04 22:45:22 +00:00
|
|
|
|
|
|
|
samplesLen := len(samples)
|
|
|
|
samples = append(samples, prompbmarshal.Sample{
|
|
|
|
Value: ps.value,
|
2024-07-03 10:42:45 +00:00
|
|
|
Timestamp: ps.timestamp,
|
2024-03-04 22:45:22 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
tss = append(tss, prompbmarshal.TimeSeries{
|
|
|
|
Labels: labels[labelsLen:],
|
|
|
|
Samples: samples[samplesLen:],
|
|
|
|
})
|
|
|
|
}
|
|
|
|
pushFunc(tss)
|
|
|
|
|
|
|
|
ctx.tss = tss
|
|
|
|
ctx.labels = labels
|
|
|
|
ctx.samples = samples
|
|
|
|
putDeduplicatorFlushCtx(ctx)
|
2024-09-24 20:03:04 +00:00
|
|
|
}, flushTimestamp, idx, idx)
|
2024-03-05 00:13:21 +00:00
|
|
|
|
2024-09-24 20:03:04 +00:00
|
|
|
duration := time.Since(startTime)
|
2024-03-05 00:13:21 +00:00
|
|
|
d.dedupFlushDuration.Update(duration.Seconds())
|
|
|
|
if duration > dedupInterval {
|
|
|
|
d.dedupFlushTimeouts.Inc()
|
|
|
|
logger.Warnf("deduplication couldn't be finished in the configured dedupInterval=%s; it took %.03fs; "+
|
|
|
|
"possible solutions: increase dedupInterval; reduce samples' ingestion rate", dedupInterval, duration.Seconds())
|
|
|
|
}
|
|
|
|
|
2024-03-04 22:45:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type deduplicatorPushCtx struct {
|
2024-09-24 20:03:04 +00:00
|
|
|
pss [][]pushSample
|
2024-03-05 00:13:21 +00:00
|
|
|
labels promutils.Labels
|
|
|
|
buf []byte
|
2024-03-04 22:45:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (ctx *deduplicatorPushCtx) reset() {
|
2024-07-03 10:42:45 +00:00
|
|
|
for i, sc := range ctx.pss {
|
|
|
|
ctx.pss[i] = sc[:0]
|
|
|
|
}
|
2024-03-04 22:45:22 +00:00
|
|
|
|
2024-03-05 00:13:21 +00:00
|
|
|
ctx.labels.Reset()
|
|
|
|
|
2024-03-04 22:45:22 +00:00
|
|
|
ctx.buf = ctx.buf[:0]
|
|
|
|
}
|
|
|
|
|
2024-09-24 20:03:04 +00:00
|
|
|
func getDeduplicatorPushCtx(stateSize int) *deduplicatorPushCtx {
|
2024-03-04 22:45:22 +00:00
|
|
|
v := deduplicatorPushCtxPool.Get()
|
|
|
|
if v == nil {
|
2024-09-24 20:03:04 +00:00
|
|
|
return &deduplicatorPushCtx{
|
|
|
|
pss: make([][]pushSample, stateSize),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
ctx := v.(*deduplicatorPushCtx)
|
|
|
|
if len(ctx.pss) < stateSize {
|
|
|
|
ctx.pss = slicesutil.SetLength(ctx.pss, stateSize)
|
2024-03-04 22:45:22 +00:00
|
|
|
}
|
2024-09-24 20:03:04 +00:00
|
|
|
return ctx
|
2024-03-04 22:45:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func putDeduplicatorPushCtx(ctx *deduplicatorPushCtx) {
|
|
|
|
ctx.reset()
|
|
|
|
deduplicatorPushCtxPool.Put(ctx)
|
|
|
|
}
|
|
|
|
|
|
|
|
var deduplicatorPushCtxPool sync.Pool
|
|
|
|
|
|
|
|
type deduplicatorFlushCtx struct {
|
|
|
|
tss []prompbmarshal.TimeSeries
|
|
|
|
labels []prompbmarshal.Label
|
|
|
|
samples []prompbmarshal.Sample
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ctx *deduplicatorFlushCtx) reset() {
|
|
|
|
clear(ctx.tss)
|
|
|
|
ctx.tss = ctx.tss[:0]
|
|
|
|
|
|
|
|
clear(ctx.labels)
|
|
|
|
ctx.labels = ctx.labels[:0]
|
|
|
|
|
|
|
|
clear(ctx.samples)
|
|
|
|
ctx.samples = ctx.samples[:0]
|
|
|
|
}
|
|
|
|
|
|
|
|
func getDeduplicatorFlushCtx() *deduplicatorFlushCtx {
|
|
|
|
v := deduplicatorFlushCtxPool.Get()
|
|
|
|
if v == nil {
|
|
|
|
return &deduplicatorFlushCtx{}
|
|
|
|
}
|
|
|
|
return v.(*deduplicatorFlushCtx)
|
|
|
|
}
|
|
|
|
|
|
|
|
func putDeduplicatorFlushCtx(ctx *deduplicatorFlushCtx) {
|
|
|
|
ctx.reset()
|
|
|
|
deduplicatorFlushCtxPool.Put(ctx)
|
|
|
|
}
|
|
|
|
|
|
|
|
var deduplicatorFlushCtxPool sync.Pool
|