VictoriaMetrics/lib/promutils/labelscompressor_test.go
Hui Wang bf3d9ba57b
stream aggregation: fix possible duplicated aggregation results (#7118)
When ingesting samples with the same labels(duplicated samples or
samples with the same labels after `by` or `without` options). They
could register different entries for the same labelset in
LabelsCompressor.
For example, both index 99 and 100 can be assigned to label `foo=1` in
two concurrent pushes. Then due to differing label indexes in encoded
keys, the samples will appear as distinct in aggrState, resulting in
duplicated results after decompressing the label indexes.

fbde238cdc/lib/streamaggr/streamaggr.go (L933)

In this pull request, since we need to store `idxToLabel` first to
ensure the idx can be searched after `lc.labelToIdxStore`,
the `lc.idxToLabel` still could contain a duplicated entries
[100]="foo=1". But given the low likelihood of this issue and the size
of idxToLabel, it should be fine.
2024-09-30 14:30:34 +02:00

125 lines
2.7 KiB
Go

package promutils
import (
"fmt"
"sync"
"testing"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
)
func TestLabelsCompressorSerial(t *testing.T) {
var lc LabelsCompressor
f := func(labels []prompbmarshal.Label) {
t.Helper()
sExpected := labelsToString(labels)
data := lc.Compress(nil, labels)
labelsResult := lc.Decompress(nil, data)
sResult := labelsToString(labelsResult)
if sExpected != sResult {
t.Fatalf("unexpected result; got %s; want %s", sResult, sExpected)
}
if len(labels) > 0 {
if n := lc.SizeBytes(); n == 0 {
t.Fatalf("Unexpected zero SizeBytes()")
}
if n := lc.ItemsCount(); n == 0 {
t.Fatalf("Unexpected zero ItemsCount()")
}
}
}
// empty labels
f(nil)
f([]prompbmarshal.Label{})
// non-empty labels
f([]prompbmarshal.Label{
{
Name: "instance",
Value: "12345.4342.342.3",
},
{
Name: "job",
Value: "kube-pod-12323",
},
})
f([]prompbmarshal.Label{
{
Name: "instance",
Value: "12345.4342.342.3",
},
{
Name: "job",
Value: "kube-pod-12323",
},
{
Name: "pod",
Value: "foo-bar-baz",
},
})
}
func TestLabelsCompressorConcurrent(t *testing.T) {
const concurrency = 5
var lc LabelsCompressor
var expectCompressedKeys sync.Map
var wg sync.WaitGroup
for i := 0; i < concurrency; i++ {
wg.Add(1)
go func() {
defer wg.Done()
series := newTestSeries(100, 20)
for n, labels := range series {
sExpected := labelsToString(labels)
data := lc.Compress(nil, labels)
if expectData, ok := expectCompressedKeys.LoadOrStore(n, data); ok {
if string(data) != string(expectData.([]byte)) {
panic(fmt.Errorf("unexpected compress result at series/%d in iteration %d ", n, i))
}
}
labelsResult := lc.Decompress(nil, data)
sResult := labelsToString(labelsResult)
if sExpected != sResult {
panic(fmt.Errorf("unexpected result on iteration %d; got %s; want %s", i, sResult, sExpected))
}
}
}()
}
wg.Wait()
if n := lc.SizeBytes(); n == 0 {
t.Fatalf("Unexpected zero SizeBytes()")
}
if n := lc.ItemsCount(); n == 0 {
t.Fatalf("Unexpected zero ItemsCount()")
}
}
func labelsToString(labels []prompbmarshal.Label) string {
l := Labels{
Labels: labels,
}
return l.String()
}
func newTestSeries(seriesCount, labelsPerSeries int) [][]prompbmarshal.Label {
series := make([][]prompbmarshal.Label, seriesCount)
for i := 0; i < seriesCount; i++ {
labels := make([]prompbmarshal.Label, labelsPerSeries)
for j := 0; j < labelsPerSeries; j++ {
labels[j] = prompbmarshal.Label{
Name: fmt.Sprintf("label_%d", j),
Value: fmt.Sprintf("value_%d_%d", i, j),
}
}
series[i] = labels
}
return series
}