2023-01-04 06:19:18 +00:00
|
|
|
package streamaggr
|
|
|
|
|
|
|
|
import (
|
lib/streamaggr: huge pile of changes
- Reduce memory usage by up to 5x when de-duplicating samples across big number of time series.
- Reduce memory usage by up to 5x when aggregating across big number of output time series.
- Add lib/promutils.LabelsCompressor, which is going to be used by other VictoriaMetrics components
for reducing memory usage for marshaled []prompbmarshal.Label.
- Add `dedup_interval` option at aggregation config, which allows setting individual
deduplication intervals per each aggregation.
- Add `keep_metric_names` option at aggregation config, which allows keeping the original
metric names in the output samples.
- Add `unique_samples` output, which counts the number of unique sample values.
- Add `increase_prometheus` and `total_prometheus` outputs, which ignore the first sample
per each newly encountered time series.
- Use 64-bit hashes instead of marshaled labels as map keys when calculating `count_series` output.
This makes obsolete https://github.com/VictoriaMetrics/VictoriaMetrics/pull/5579
- Expose various metrics, which may help debugging stream aggregation:
- vm_streamaggr_dedup_state_size_bytes - the size of data structures responsible for deduplication
- vm_streamaggr_dedup_state_items_count - the number of items in the deduplication data structures
- vm_streamaggr_labels_compressor_size_bytes - the size of labels compressor data structures
- vm_streamaggr_labels_compressor_items_count - the number of entries in the labels compressor
- vm_streamaggr_flush_duration_seconds - a histogram, which shows the duration of stream aggregation flushes
- vm_streamaggr_dedup_flush_duration_seconds - a histogram, which shows the duration of deduplication flushes
- vm_streamaggr_flush_timeouts_total - counter for timed out stream aggregation flushes,
which took longer than the configured interval
- vm_streamaggr_dedup_flush_timeouts_total - counter for timed out deduplication flushes,
which took longer than the configured dedup_interval
- Actualize docs/stream-aggregation.md
The memory usage reduction increases CPU usage during stream aggregation by up to 30%.
This commit is based on https://github.com/VictoriaMetrics/VictoriaMetrics/pull/5850
Updates https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5898
2024-03-02 00:42:26 +00:00
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
|
|
|
"github.com/cespare/xxhash/v2"
|
2023-01-04 06:19:18 +00:00
|
|
|
)
|
|
|
|
|
2024-09-24 20:03:04 +00:00
|
|
|
func countSeriesInitFn(values []aggrValue) []aggrValue {
|
|
|
|
for i := range values {
|
|
|
|
values[i] = &countSeriesAggrValue{
|
|
|
|
samples: make(map[uint64]struct{}),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return values
|
2023-01-04 06:19:18 +00:00
|
|
|
}
|
|
|
|
|
2024-09-24 20:03:04 +00:00
|
|
|
type countSeriesAggrValue struct {
|
|
|
|
samples map[uint64]struct{}
|
2023-01-04 06:19:18 +00:00
|
|
|
}
|
|
|
|
|
2024-09-24 20:03:04 +00:00
|
|
|
func (av *countSeriesAggrValue) pushSample(ctx *pushSampleCtx) {
|
|
|
|
// Count unique hashes over the inputKeys instead of unique inputKey values.
|
|
|
|
// This reduces memory usage at the cost of possible hash collisions for distinct inputKey values.
|
|
|
|
h := xxhash.Sum64(bytesutil.ToUnsafeBytes(ctx.inputKey))
|
|
|
|
if _, ok := av.samples[h]; !ok {
|
|
|
|
av.samples[h] = struct{}{}
|
2023-01-04 06:19:18 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-09-24 20:03:04 +00:00
|
|
|
func (av *countSeriesAggrValue) flush(ctx *flushCtx, key string) {
|
|
|
|
ctx.appendSeries(key, "count_series", float64(len(av.samples)))
|
|
|
|
clear(av.samples)
|
2023-01-04 06:19:18 +00:00
|
|
|
}
|