mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2025-03-21 15:45:01 +00:00
app/vmselect/promql: do not strip __name__
form time series after binary comparison operation
Example: foo > 10 Would leave `foo` name for all the matching time series on the left.
This commit is contained in:
parent
e88a03323a
commit
47a3228108
2 changed files with 86 additions and 5 deletions
|
@ -271,7 +271,7 @@ func adjustBinaryOpTags(be *binaryOpExpr, left, right []*timeseries) ([]*timeser
|
||||||
rvsLeft := make([]*timeseries, len(right))
|
rvsLeft := make([]*timeseries, len(right))
|
||||||
tsLeft := left[0]
|
tsLeft := left[0]
|
||||||
for i, tsRight := range right {
|
for i, tsRight := range right {
|
||||||
tsRight.MetricName.ResetMetricGroup()
|
resetMetricGroupIfRequired(be, tsRight)
|
||||||
rvsLeft[i] = tsLeft
|
rvsLeft[i] = tsLeft
|
||||||
}
|
}
|
||||||
return rvsLeft, right, right, nil
|
return rvsLeft, right, right, nil
|
||||||
|
@ -281,7 +281,7 @@ func adjustBinaryOpTags(be *binaryOpExpr, left, right []*timeseries) ([]*timeser
|
||||||
rvsRight := make([]*timeseries, len(left))
|
rvsRight := make([]*timeseries, len(left))
|
||||||
tsRight := right[0]
|
tsRight := right[0]
|
||||||
for i, tsLeft := range left {
|
for i, tsLeft := range left {
|
||||||
tsLeft.MetricName.ResetMetricGroup()
|
resetMetricGroupIfRequired(be, tsLeft)
|
||||||
rvsRight[i] = tsRight
|
rvsRight[i] = tsRight
|
||||||
}
|
}
|
||||||
return left, rvsRight, left, nil
|
return left, rvsRight, left, nil
|
||||||
|
@ -340,7 +340,7 @@ func adjustBinaryOpTags(be *binaryOpExpr, left, right []*timeseries) ([]*timeser
|
||||||
if err := ensureOneX("right", tssRight); err != nil {
|
if err := ensureOneX("right", tssRight); err != nil {
|
||||||
return nil, nil, nil, err
|
return nil, nil, nil, err
|
||||||
}
|
}
|
||||||
tssLeft[0].MetricName.ResetMetricGroup()
|
resetMetricGroupIfRequired(be, tssLeft[0])
|
||||||
rvsLeft = append(rvsLeft, tssLeft[0])
|
rvsLeft = append(rvsLeft, tssLeft[0])
|
||||||
rvsRight = append(rvsRight, tssRight[0])
|
rvsRight = append(rvsRight, tssRight[0])
|
||||||
default:
|
default:
|
||||||
|
@ -354,6 +354,19 @@ func adjustBinaryOpTags(be *binaryOpExpr, left, right []*timeseries) ([]*timeser
|
||||||
return rvsLeft, rvsRight, dst, nil
|
return rvsLeft, rvsRight, dst, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func resetMetricGroupIfRequired(be *binaryOpExpr, ts *timeseries) {
|
||||||
|
if isBinaryOpCmp(be.Op) && !be.Bool {
|
||||||
|
// Do not reset MetricGroup for non-boolean `compare` binary ops like Prometheus does.
|
||||||
|
return
|
||||||
|
}
|
||||||
|
switch be.Op {
|
||||||
|
case "default", "if", "ifnot":
|
||||||
|
// Do not reset MetricGroup for these ops.
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ts.MetricName.ResetMetricGroup()
|
||||||
|
}
|
||||||
|
|
||||||
func binaryOpPlus(left, right float64) float64 {
|
func binaryOpPlus(left, right float64) float64 {
|
||||||
return left + right
|
return left + right
|
||||||
}
|
}
|
||||||
|
|
|
@ -1450,6 +1450,62 @@ func TestExecSuccess(t *testing.T) {
|
||||||
resultExpected := []netstorage.Result{r1, r2}
|
resultExpected := []netstorage.Result{r1, r2}
|
||||||
f(q, resultExpected)
|
f(q, resultExpected)
|
||||||
})
|
})
|
||||||
|
t.Run(`a cmp scalar (leave MetricGroup)`, func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
q := `sort_desc((
|
||||||
|
label_set(time(), "__name__", "foo", "a", "x"),
|
||||||
|
label_set(time()+200, "__name__", "bar", "a", "x"),
|
||||||
|
) > 1300)`
|
||||||
|
r1 := netstorage.Result{
|
||||||
|
MetricName: metricNameExpected,
|
||||||
|
Values: []float64{nan, 1400, 1600, 1800, 2000, 2200},
|
||||||
|
Timestamps: timestampsExpected,
|
||||||
|
}
|
||||||
|
r1.MetricName.MetricGroup = []byte("bar")
|
||||||
|
r1.MetricName.Tags = []storage.Tag{{
|
||||||
|
Key: []byte("a"),
|
||||||
|
Value: []byte("x"),
|
||||||
|
}}
|
||||||
|
r2 := netstorage.Result{
|
||||||
|
MetricName: metricNameExpected,
|
||||||
|
Values: []float64{nan, nan, 1400, 1600, 1800, 2000},
|
||||||
|
Timestamps: timestampsExpected,
|
||||||
|
}
|
||||||
|
r2.MetricName.MetricGroup = []byte("foo")
|
||||||
|
r2.MetricName.Tags = []storage.Tag{{
|
||||||
|
Key: []byte("a"),
|
||||||
|
Value: []byte("x"),
|
||||||
|
}}
|
||||||
|
resultExpected := []netstorage.Result{r1, r2}
|
||||||
|
f(q, resultExpected)
|
||||||
|
})
|
||||||
|
t.Run(`a cmp bool scalar (drop MetricGroup)`, func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
q := `sort_desc((
|
||||||
|
label_set(time(), "__name__", "foo", "a", "x"),
|
||||||
|
label_set(time()+200, "__name__", "bar", "a", "y"),
|
||||||
|
) >= bool 1200)`
|
||||||
|
r1 := netstorage.Result{
|
||||||
|
MetricName: metricNameExpected,
|
||||||
|
Values: []float64{1, 1, 1, 1, 1, 1},
|
||||||
|
Timestamps: timestampsExpected,
|
||||||
|
}
|
||||||
|
r1.MetricName.Tags = []storage.Tag{{
|
||||||
|
Key: []byte("a"),
|
||||||
|
Value: []byte("y"),
|
||||||
|
}}
|
||||||
|
r2 := netstorage.Result{
|
||||||
|
MetricName: metricNameExpected,
|
||||||
|
Values: []float64{0, 1, 1, 1, 1, 1},
|
||||||
|
Timestamps: timestampsExpected,
|
||||||
|
}
|
||||||
|
r2.MetricName.Tags = []storage.Tag{{
|
||||||
|
Key: []byte("a"),
|
||||||
|
Value: []byte("x"),
|
||||||
|
}}
|
||||||
|
resultExpected := []netstorage.Result{r1, r2}
|
||||||
|
f(q, resultExpected)
|
||||||
|
})
|
||||||
t.Run(`1 > 2`, func(t *testing.T) {
|
t.Run(`1 > 2`, func(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
q := `1 > 2`
|
q := `1 > 2`
|
||||||
|
@ -1552,13 +1608,14 @@ func TestExecSuccess(t *testing.T) {
|
||||||
t.Run(`vector default scalar`, func(t *testing.T) {
|
t.Run(`vector default scalar`, func(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
q := `sort_desc(union(
|
q := `sort_desc(union(
|
||||||
label_set(time() > 1400, "foo", "bar"),
|
label_set(time() > 1400, "__name__", "x", "foo", "bar"),
|
||||||
label_set(time() < 1700, "foo", "baz")) default 123)`
|
label_set(time() < 1700, "__name__", "y", "foo", "baz")) default 123)`
|
||||||
r1 := netstorage.Result{
|
r1 := netstorage.Result{
|
||||||
MetricName: metricNameExpected,
|
MetricName: metricNameExpected,
|
||||||
Values: []float64{123, 123, 123, 1600, 1800, 2000},
|
Values: []float64{123, 123, 123, 1600, 1800, 2000},
|
||||||
Timestamps: timestampsExpected,
|
Timestamps: timestampsExpected,
|
||||||
}
|
}
|
||||||
|
r1.MetricName.MetricGroup = []byte("x")
|
||||||
r1.MetricName.Tags = []storage.Tag{{
|
r1.MetricName.Tags = []storage.Tag{{
|
||||||
Key: []byte("foo"),
|
Key: []byte("foo"),
|
||||||
Value: []byte("bar"),
|
Value: []byte("bar"),
|
||||||
|
@ -1568,6 +1625,7 @@ func TestExecSuccess(t *testing.T) {
|
||||||
Values: []float64{1000, 1200, 1400, 1600, 123, 123},
|
Values: []float64{1000, 1200, 1400, 1600, 123, 123},
|
||||||
Timestamps: timestampsExpected,
|
Timestamps: timestampsExpected,
|
||||||
}
|
}
|
||||||
|
r2.MetricName.MetricGroup = []byte("y")
|
||||||
r2.MetricName.Tags = []storage.Tag{{
|
r2.MetricName.Tags = []storage.Tag{{
|
||||||
Key: []byte("foo"),
|
Key: []byte("foo"),
|
||||||
Value: []byte("baz"),
|
Value: []byte("baz"),
|
||||||
|
@ -3620,6 +3678,16 @@ func TestExecError(t *testing.T) {
|
||||||
f(`(label_set(1, "foo", "bar", "a", "b"), label_set(1, "foo", "bar", "a", "c")) + on(foo) group_right() label_set(1, "foo", "bar")`)
|
f(`(label_set(1, "foo", "bar", "a", "b"), label_set(1, "foo", "bar", "a", "c")) + on(foo) group_right() label_set(1, "foo", "bar")`)
|
||||||
f(`1 + on() (label_set(1, "foo", bar"), label_set(2, "foo", "baz"))`)
|
f(`1 + on() (label_set(1, "foo", bar"), label_set(2, "foo", "baz"))`)
|
||||||
|
|
||||||
|
// duplicate metrics after binary op
|
||||||
|
f(`(
|
||||||
|
label_set(time(), "__name__", "foo", "a", "x"),
|
||||||
|
label_set(time()+200, "__name__", "bar", "a", "x"),
|
||||||
|
) > bool 1300`)
|
||||||
|
f(`(
|
||||||
|
label_set(time(), "__name__", "foo", "a", "x"),
|
||||||
|
label_set(time()+200, "__name__", "bar", "a", "x"),
|
||||||
|
) + 10`)
|
||||||
|
|
||||||
// With expressions
|
// With expressions
|
||||||
f(`ttf()`)
|
f(`ttf()`)
|
||||||
f(`ttf(1, 2)`)
|
f(`ttf(1, 2)`)
|
||||||
|
|
Loading…
Reference in a new issue