2020-04-06 11:44:03 +00:00
|
|
|
package main
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
2020-11-09 22:27:32 +00:00
|
|
|
"errors"
|
2020-12-19 12:10:59 +00:00
|
|
|
"reflect"
|
2022-03-16 15:26:33 +00:00
|
|
|
"sort"
|
2020-11-09 22:27:32 +00:00
|
|
|
"strings"
|
2020-04-06 11:44:03 +00:00
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/datasource"
|
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/notifier"
|
2020-04-27 21:18:02 +00:00
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
2020-04-06 11:44:03 +00:00
|
|
|
)
|
|
|
|
|
2020-06-01 10:46:37 +00:00
|
|
|
func TestAlertingRule_ToTimeSeries(t *testing.T) {
|
2020-04-27 21:18:02 +00:00
|
|
|
timestamp := time.Now()
|
|
|
|
testCases := []struct {
|
2020-06-01 10:46:37 +00:00
|
|
|
rule *AlertingRule
|
2020-04-27 21:18:02 +00:00
|
|
|
alert *notifier.Alert
|
|
|
|
expTS []prompbmarshal.TimeSeries
|
|
|
|
}{
|
|
|
|
{
|
2020-06-01 10:46:37 +00:00
|
|
|
newTestAlertingRule("instant", 0),
|
2020-04-27 21:18:02 +00:00
|
|
|
¬ifier.Alert{State: notifier.StateFiring},
|
|
|
|
[]prompbmarshal.TimeSeries{
|
2021-06-09 09:20:38 +00:00
|
|
|
newTimeSeries([]float64{1}, []int64{timestamp.UnixNano()}, map[string]string{
|
2020-04-27 21:18:02 +00:00
|
|
|
"__name__": alertMetricName,
|
|
|
|
alertStateLabel: notifier.StateFiring.String(),
|
2021-06-09 09:20:38 +00:00
|
|
|
}),
|
2020-04-27 21:18:02 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
2020-06-01 10:46:37 +00:00
|
|
|
newTestAlertingRule("instant extra labels", 0),
|
2020-04-27 21:18:02 +00:00
|
|
|
¬ifier.Alert{State: notifier.StateFiring, Labels: map[string]string{
|
|
|
|
"job": "foo",
|
|
|
|
"instance": "bar",
|
|
|
|
}},
|
|
|
|
[]prompbmarshal.TimeSeries{
|
2021-06-09 09:20:38 +00:00
|
|
|
newTimeSeries([]float64{1}, []int64{timestamp.UnixNano()}, map[string]string{
|
2020-04-27 21:18:02 +00:00
|
|
|
"__name__": alertMetricName,
|
|
|
|
alertStateLabel: notifier.StateFiring.String(),
|
|
|
|
"job": "foo",
|
|
|
|
"instance": "bar",
|
2021-06-09 09:20:38 +00:00
|
|
|
}),
|
2020-04-27 21:18:02 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
2020-06-01 10:46:37 +00:00
|
|
|
newTestAlertingRule("instant labels override", 0),
|
2020-04-27 21:18:02 +00:00
|
|
|
¬ifier.Alert{State: notifier.StateFiring, Labels: map[string]string{
|
|
|
|
alertStateLabel: "foo",
|
|
|
|
"__name__": "bar",
|
|
|
|
}},
|
|
|
|
[]prompbmarshal.TimeSeries{
|
2021-06-09 09:20:38 +00:00
|
|
|
newTimeSeries([]float64{1}, []int64{timestamp.UnixNano()}, map[string]string{
|
2020-04-27 21:18:02 +00:00
|
|
|
"__name__": alertMetricName,
|
|
|
|
alertStateLabel: notifier.StateFiring.String(),
|
2021-06-09 09:20:38 +00:00
|
|
|
}),
|
2020-04-27 21:18:02 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
2020-06-01 10:46:37 +00:00
|
|
|
newTestAlertingRule("for", time.Second),
|
2022-03-29 13:09:07 +00:00
|
|
|
¬ifier.Alert{State: notifier.StateFiring, ActiveAt: timestamp.Add(time.Second)},
|
2020-04-27 21:18:02 +00:00
|
|
|
[]prompbmarshal.TimeSeries{
|
2021-06-09 09:20:38 +00:00
|
|
|
newTimeSeries([]float64{1}, []int64{timestamp.UnixNano()}, map[string]string{
|
2020-04-27 21:18:02 +00:00
|
|
|
"__name__": alertMetricName,
|
|
|
|
alertStateLabel: notifier.StateFiring.String(),
|
2021-06-09 09:20:38 +00:00
|
|
|
}),
|
|
|
|
newTimeSeries([]float64{float64(timestamp.Add(time.Second).Unix())},
|
|
|
|
[]int64{timestamp.UnixNano()},
|
|
|
|
map[string]string{
|
2021-10-22 09:30:38 +00:00
|
|
|
"__name__": alertForStateMetricName,
|
2021-06-09 09:20:38 +00:00
|
|
|
}),
|
2020-04-27 21:18:02 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
2020-06-01 10:46:37 +00:00
|
|
|
newTestAlertingRule("for pending", 10*time.Second),
|
2022-03-29 13:09:07 +00:00
|
|
|
¬ifier.Alert{State: notifier.StatePending, ActiveAt: timestamp.Add(time.Second)},
|
2020-04-27 21:18:02 +00:00
|
|
|
[]prompbmarshal.TimeSeries{
|
2021-06-09 09:20:38 +00:00
|
|
|
newTimeSeries([]float64{1}, []int64{timestamp.UnixNano()}, map[string]string{
|
2020-04-27 21:18:02 +00:00
|
|
|
"__name__": alertMetricName,
|
|
|
|
alertStateLabel: notifier.StatePending.String(),
|
2021-06-09 09:20:38 +00:00
|
|
|
}),
|
|
|
|
newTimeSeries([]float64{float64(timestamp.Add(time.Second).Unix())},
|
|
|
|
[]int64{timestamp.UnixNano()},
|
|
|
|
map[string]string{
|
2021-10-22 09:30:38 +00:00
|
|
|
"__name__": alertForStateMetricName,
|
2021-06-09 09:20:38 +00:00
|
|
|
}),
|
2020-04-27 21:18:02 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
for _, tc := range testCases {
|
|
|
|
t.Run(tc.rule.Name, func(t *testing.T) {
|
2020-06-01 10:46:37 +00:00
|
|
|
tc.rule.alerts[tc.alert.ID] = tc.alert
|
2021-06-09 09:20:38 +00:00
|
|
|
tss := tc.rule.toTimeSeries(timestamp.Unix())
|
2020-06-01 10:46:37 +00:00
|
|
|
if err := compareTimeSeries(t, tc.expTS, tss); err != nil {
|
|
|
|
t.Fatalf("timeseries missmatch: %s", err)
|
2020-04-27 21:18:02 +00:00
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-01 10:46:37 +00:00
|
|
|
func TestAlertingRule_Exec(t *testing.T) {
|
2020-06-09 12:21:20 +00:00
|
|
|
const defaultStep = 5 * time.Millisecond
|
2021-10-22 09:30:38 +00:00
|
|
|
type testAlert struct {
|
|
|
|
labels []string
|
|
|
|
alert *notifier.Alert
|
|
|
|
}
|
2020-04-06 11:44:03 +00:00
|
|
|
testCases := []struct {
|
2020-06-01 10:46:37 +00:00
|
|
|
rule *AlertingRule
|
2020-04-06 11:44:03 +00:00
|
|
|
steps [][]datasource.Metric
|
2021-10-22 09:30:38 +00:00
|
|
|
expAlerts []testAlert
|
2020-04-06 11:44:03 +00:00
|
|
|
}{
|
|
|
|
{
|
2020-06-01 10:46:37 +00:00
|
|
|
newTestAlertingRule("empty", 0),
|
2020-04-06 11:44:03 +00:00
|
|
|
[][]datasource.Metric{},
|
2021-10-22 09:30:38 +00:00
|
|
|
nil,
|
2020-04-06 11:44:03 +00:00
|
|
|
},
|
2020-05-04 21:51:22 +00:00
|
|
|
{
|
2020-06-01 10:46:37 +00:00
|
|
|
newTestAlertingRule("empty labels", 0),
|
2020-05-04 21:51:22 +00:00
|
|
|
[][]datasource.Metric{
|
2021-06-09 09:20:38 +00:00
|
|
|
{datasource.Metric{Values: []float64{1}, Timestamps: []int64{1}}},
|
2020-05-04 21:51:22 +00:00
|
|
|
},
|
2021-10-22 09:30:38 +00:00
|
|
|
[]testAlert{
|
|
|
|
{alert: ¬ifier.Alert{State: notifier.StateFiring}},
|
2020-05-04 21:51:22 +00:00
|
|
|
},
|
|
|
|
},
|
2020-04-06 11:44:03 +00:00
|
|
|
{
|
2020-06-01 10:46:37 +00:00
|
|
|
newTestAlertingRule("single-firing", 0),
|
2020-04-06 11:44:03 +00:00
|
|
|
[][]datasource.Metric{
|
2020-05-04 21:51:22 +00:00
|
|
|
{metricWithLabels(t, "name", "foo")},
|
2020-04-06 11:44:03 +00:00
|
|
|
},
|
2021-10-22 09:30:38 +00:00
|
|
|
[]testAlert{
|
|
|
|
{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StateFiring}},
|
2020-04-06 11:44:03 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
2020-06-01 10:46:37 +00:00
|
|
|
newTestAlertingRule("single-firing=>inactive", 0),
|
2020-04-06 11:44:03 +00:00
|
|
|
[][]datasource.Metric{
|
2020-05-04 21:51:22 +00:00
|
|
|
{metricWithLabels(t, "name", "foo")},
|
2020-04-06 11:44:03 +00:00
|
|
|
{},
|
|
|
|
},
|
2021-10-22 09:30:38 +00:00
|
|
|
[]testAlert{
|
|
|
|
{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StateInactive}},
|
2020-04-06 11:44:03 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
2020-06-01 10:46:37 +00:00
|
|
|
newTestAlertingRule("single-firing=>inactive=>firing", 0),
|
2020-04-06 11:44:03 +00:00
|
|
|
[][]datasource.Metric{
|
2020-05-04 21:51:22 +00:00
|
|
|
{metricWithLabels(t, "name", "foo")},
|
2020-04-06 11:44:03 +00:00
|
|
|
{},
|
2020-05-04 21:51:22 +00:00
|
|
|
{metricWithLabels(t, "name", "foo")},
|
2020-04-06 11:44:03 +00:00
|
|
|
},
|
2021-10-22 09:30:38 +00:00
|
|
|
[]testAlert{
|
|
|
|
{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StateFiring}},
|
2020-04-06 11:44:03 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
2020-06-01 10:46:37 +00:00
|
|
|
newTestAlertingRule("single-firing=>inactive=>firing=>inactive", 0),
|
2020-04-06 11:44:03 +00:00
|
|
|
[][]datasource.Metric{
|
2020-05-04 21:51:22 +00:00
|
|
|
{metricWithLabels(t, "name", "foo")},
|
2020-04-06 11:44:03 +00:00
|
|
|
{},
|
2020-05-04 21:51:22 +00:00
|
|
|
{metricWithLabels(t, "name", "foo")},
|
2020-04-06 11:44:03 +00:00
|
|
|
{},
|
|
|
|
},
|
2021-10-22 09:30:38 +00:00
|
|
|
[]testAlert{
|
|
|
|
{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StateInactive}},
|
2020-04-06 11:44:03 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
2022-03-29 13:09:07 +00:00
|
|
|
newTestAlertingRule("single-firing=>inactive=>firing=>inactive=>inactive", 0),
|
2020-04-06 11:44:03 +00:00
|
|
|
[][]datasource.Metric{
|
2020-05-04 21:51:22 +00:00
|
|
|
{metricWithLabels(t, "name", "foo")},
|
2020-04-06 11:44:03 +00:00
|
|
|
{},
|
2020-05-04 21:51:22 +00:00
|
|
|
{metricWithLabels(t, "name", "foo")},
|
2020-04-06 11:44:03 +00:00
|
|
|
{},
|
|
|
|
{},
|
|
|
|
},
|
2022-03-29 13:09:07 +00:00
|
|
|
[]testAlert{
|
|
|
|
{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StateInactive}},
|
|
|
|
},
|
2020-04-06 11:44:03 +00:00
|
|
|
},
|
|
|
|
{
|
2020-06-01 10:46:37 +00:00
|
|
|
newTestAlertingRule("single-firing=>inactive=>firing=>inactive=>empty=>firing", 0),
|
2020-04-06 11:44:03 +00:00
|
|
|
[][]datasource.Metric{
|
2020-05-04 21:51:22 +00:00
|
|
|
{metricWithLabels(t, "name", "foo")},
|
2020-04-06 11:44:03 +00:00
|
|
|
{},
|
2020-05-04 21:51:22 +00:00
|
|
|
{metricWithLabels(t, "name", "foo")},
|
2020-04-06 11:44:03 +00:00
|
|
|
{},
|
|
|
|
{},
|
2020-05-04 21:51:22 +00:00
|
|
|
{metricWithLabels(t, "name", "foo")},
|
2020-04-06 11:44:03 +00:00
|
|
|
},
|
2021-10-22 09:30:38 +00:00
|
|
|
[]testAlert{
|
|
|
|
{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StateFiring}},
|
2020-04-06 11:44:03 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
2020-06-01 10:46:37 +00:00
|
|
|
newTestAlertingRule("multiple-firing", 0),
|
2020-04-06 11:44:03 +00:00
|
|
|
[][]datasource.Metric{
|
|
|
|
{
|
2020-05-04 21:51:22 +00:00
|
|
|
metricWithLabels(t, "name", "foo"),
|
|
|
|
metricWithLabels(t, "name", "foo1"),
|
|
|
|
metricWithLabels(t, "name", "foo2"),
|
2020-04-06 11:44:03 +00:00
|
|
|
},
|
|
|
|
},
|
2021-10-22 09:30:38 +00:00
|
|
|
[]testAlert{
|
|
|
|
{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StateFiring}},
|
|
|
|
{labels: []string{"name", "foo1"}, alert: ¬ifier.Alert{State: notifier.StateFiring}},
|
|
|
|
{labels: []string{"name", "foo2"}, alert: ¬ifier.Alert{State: notifier.StateFiring}},
|
2020-04-06 11:44:03 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
2020-06-01 10:46:37 +00:00
|
|
|
newTestAlertingRule("multiple-steps-firing", 0),
|
2020-04-06 11:44:03 +00:00
|
|
|
[][]datasource.Metric{
|
2020-05-04 21:51:22 +00:00
|
|
|
{metricWithLabels(t, "name", "foo")},
|
|
|
|
{metricWithLabels(t, "name", "foo1")},
|
|
|
|
{metricWithLabels(t, "name", "foo2")},
|
2020-04-06 11:44:03 +00:00
|
|
|
},
|
|
|
|
// 1: fire first alert
|
|
|
|
// 2: fire second alert, set first inactive
|
2022-03-29 13:09:07 +00:00
|
|
|
// 3: fire third alert, set second inactive
|
2021-10-22 09:30:38 +00:00
|
|
|
[]testAlert{
|
2022-03-29 13:09:07 +00:00
|
|
|
{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StateInactive}},
|
2021-10-22 09:30:38 +00:00
|
|
|
{labels: []string{"name", "foo1"}, alert: ¬ifier.Alert{State: notifier.StateInactive}},
|
|
|
|
{labels: []string{"name", "foo2"}, alert: ¬ifier.Alert{State: notifier.StateFiring}},
|
2020-04-06 11:44:03 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
2020-06-01 10:46:37 +00:00
|
|
|
newTestAlertingRule("for-pending", time.Minute),
|
2020-04-06 11:44:03 +00:00
|
|
|
[][]datasource.Metric{
|
2020-05-04 21:51:22 +00:00
|
|
|
{metricWithLabels(t, "name", "foo")},
|
2020-04-06 11:44:03 +00:00
|
|
|
},
|
2021-10-22 09:30:38 +00:00
|
|
|
[]testAlert{
|
|
|
|
{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StatePending}},
|
2020-04-06 11:44:03 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
2020-06-09 12:21:20 +00:00
|
|
|
newTestAlertingRule("for-fired", defaultStep),
|
2020-04-06 11:44:03 +00:00
|
|
|
[][]datasource.Metric{
|
2020-05-04 21:51:22 +00:00
|
|
|
{metricWithLabels(t, "name", "foo")},
|
|
|
|
{metricWithLabels(t, "name", "foo")},
|
2020-04-06 11:44:03 +00:00
|
|
|
},
|
2021-10-22 09:30:38 +00:00
|
|
|
[]testAlert{
|
|
|
|
{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StateFiring}},
|
2020-04-06 11:44:03 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
2020-06-01 10:46:37 +00:00
|
|
|
newTestAlertingRule("for-pending=>empty", time.Second),
|
2020-04-06 11:44:03 +00:00
|
|
|
[][]datasource.Metric{
|
2020-05-04 21:51:22 +00:00
|
|
|
{metricWithLabels(t, "name", "foo")},
|
|
|
|
{metricWithLabels(t, "name", "foo")},
|
2020-05-17 14:13:22 +00:00
|
|
|
// empty step to reset and delete pending alerts
|
2020-04-06 11:44:03 +00:00
|
|
|
{},
|
|
|
|
},
|
2021-10-22 09:30:38 +00:00
|
|
|
nil,
|
2020-04-06 11:44:03 +00:00
|
|
|
},
|
|
|
|
{
|
2020-06-09 12:21:20 +00:00
|
|
|
newTestAlertingRule("for-pending=>firing=>inactive", defaultStep),
|
2020-04-06 11:44:03 +00:00
|
|
|
[][]datasource.Metric{
|
2020-05-04 21:51:22 +00:00
|
|
|
{metricWithLabels(t, "name", "foo")},
|
|
|
|
{metricWithLabels(t, "name", "foo")},
|
2020-04-06 11:44:03 +00:00
|
|
|
// empty step to reset pending alerts
|
|
|
|
{},
|
|
|
|
},
|
2021-10-22 09:30:38 +00:00
|
|
|
[]testAlert{
|
|
|
|
{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StateInactive}},
|
2020-04-06 11:44:03 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
2020-06-09 12:21:20 +00:00
|
|
|
newTestAlertingRule("for-pending=>firing=>inactive=>pending", defaultStep),
|
2020-04-06 11:44:03 +00:00
|
|
|
[][]datasource.Metric{
|
2020-06-09 12:21:20 +00:00
|
|
|
{metricWithLabels(t, "name", "foo")},
|
|
|
|
{metricWithLabels(t, "name", "foo")},
|
2020-04-06 11:44:03 +00:00
|
|
|
// empty step to reset pending alerts
|
|
|
|
{},
|
2020-05-04 21:51:22 +00:00
|
|
|
{metricWithLabels(t, "name", "foo")},
|
2020-04-06 11:44:03 +00:00
|
|
|
},
|
2021-10-22 09:30:38 +00:00
|
|
|
[]testAlert{
|
|
|
|
{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StatePending}},
|
2020-04-06 11:44:03 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
2020-06-09 12:21:20 +00:00
|
|
|
newTestAlertingRule("for-pending=>firing=>inactive=>pending=>firing", defaultStep),
|
2020-04-06 11:44:03 +00:00
|
|
|
[][]datasource.Metric{
|
2020-05-04 21:51:22 +00:00
|
|
|
{metricWithLabels(t, "name", "foo")},
|
|
|
|
{metricWithLabels(t, "name", "foo")},
|
2020-04-06 11:44:03 +00:00
|
|
|
// empty step to reset pending alerts
|
|
|
|
{},
|
2020-05-04 21:51:22 +00:00
|
|
|
{metricWithLabels(t, "name", "foo")},
|
|
|
|
{metricWithLabels(t, "name", "foo")},
|
2020-04-06 11:44:03 +00:00
|
|
|
},
|
2021-10-22 09:30:38 +00:00
|
|
|
[]testAlert{
|
|
|
|
{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StateFiring}},
|
2020-04-06 11:44:03 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
2020-05-04 21:51:22 +00:00
|
|
|
fakeGroup := Group{Name: "TestRule_Exec"}
|
2020-04-06 11:44:03 +00:00
|
|
|
for _, tc := range testCases {
|
|
|
|
t.Run(tc.rule.Name, func(t *testing.T) {
|
|
|
|
fq := &fakeQuerier{}
|
2021-04-28 20:41:15 +00:00
|
|
|
tc.rule.q = fq
|
2020-06-01 10:46:37 +00:00
|
|
|
tc.rule.GroupID = fakeGroup.ID()
|
2020-04-06 11:44:03 +00:00
|
|
|
for _, step := range tc.steps {
|
|
|
|
fq.reset()
|
2020-05-04 21:51:22 +00:00
|
|
|
fq.add(step...)
|
2022-06-09 06:21:30 +00:00
|
|
|
if _, err := tc.rule.Exec(context.TODO(), time.Now(), 0); err != nil {
|
2020-04-06 11:44:03 +00:00
|
|
|
t.Fatalf("unexpected err: %s", err)
|
|
|
|
}
|
|
|
|
// artificial delay between applying steps
|
2020-06-09 12:21:20 +00:00
|
|
|
time.Sleep(defaultStep)
|
2020-04-06 11:44:03 +00:00
|
|
|
}
|
|
|
|
if len(tc.rule.alerts) != len(tc.expAlerts) {
|
|
|
|
t.Fatalf("expected %d alerts; got %d", len(tc.expAlerts), len(tc.rule.alerts))
|
|
|
|
}
|
2021-10-22 09:30:38 +00:00
|
|
|
expAlerts := make(map[uint64]*notifier.Alert)
|
|
|
|
for _, ta := range tc.expAlerts {
|
vmalert: fix labels and annotations processing for alerts (#2403)
To improve compatibility with Prometheus alerting the order of
templates processing has changed.
Before, vmalert did all labels processing beforehand. It meant
all extra labels (such as `alertname`, `alertgroup` or rule labels)
were available in templating. All collisions were resolved in favour
of extra labels.
In Prometheus, only labels from the received metric are available in
templating, so no collisions are possible.
This change makes vmalert's behaviour similar to Prometheus.
For example, consider alerting rule which is triggered by time series
with `alertname` label. In vmalert, this label would be overriden
by alerting rule's name everywhere: for alert labels, for annotations, etc.
In Prometheus, it would be overriden for alert's labels only, but in annotations
the original label value would be available.
See more details here https://github.com/prometheus/compliance/issues/80
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2022-04-06 18:24:45 +00:00
|
|
|
labels := make(map[string]string)
|
|
|
|
for i := 0; i < len(ta.labels); i += 2 {
|
|
|
|
k, v := ta.labels[i], ta.labels[i+1]
|
|
|
|
labels[k] = v
|
|
|
|
}
|
|
|
|
labels[alertNameLabel] = tc.rule.Name
|
|
|
|
h := hash(labels)
|
2021-10-22 09:30:38 +00:00
|
|
|
expAlerts[h] = ta.alert
|
|
|
|
}
|
|
|
|
for key, exp := range expAlerts {
|
2020-04-06 11:44:03 +00:00
|
|
|
got, ok := tc.rule.alerts[key]
|
|
|
|
if !ok {
|
|
|
|
t.Fatalf("expected to have key %d", key)
|
|
|
|
}
|
|
|
|
if got.State != exp.State {
|
|
|
|
t.Fatalf("expected state %d; got %d", exp.State, got.State)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-06-09 09:20:38 +00:00
|
|
|
func TestAlertingRule_ExecRange(t *testing.T) {
|
|
|
|
testCases := []struct {
|
|
|
|
rule *AlertingRule
|
|
|
|
data []datasource.Metric
|
|
|
|
expAlerts []*notifier.Alert
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
newTestAlertingRule("empty", 0),
|
|
|
|
[]datasource.Metric{},
|
|
|
|
nil,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
newTestAlertingRule("empty labels", 0),
|
|
|
|
[]datasource.Metric{
|
|
|
|
{Values: []float64{1}, Timestamps: []int64{1}},
|
|
|
|
},
|
|
|
|
[]*notifier.Alert{
|
|
|
|
{State: notifier.StateFiring},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
newTestAlertingRule("single-firing", 0),
|
|
|
|
[]datasource.Metric{
|
|
|
|
metricWithLabels(t, "name", "foo"),
|
|
|
|
},
|
|
|
|
[]*notifier.Alert{
|
|
|
|
{
|
|
|
|
Labels: map[string]string{"name": "foo"},
|
|
|
|
State: notifier.StateFiring,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
newTestAlertingRule("single-firing-on-range", 0),
|
|
|
|
[]datasource.Metric{
|
|
|
|
{Values: []float64{1, 1, 1}, Timestamps: []int64{1e3, 2e3, 3e3}},
|
|
|
|
},
|
|
|
|
[]*notifier.Alert{
|
|
|
|
{State: notifier.StateFiring},
|
|
|
|
{State: notifier.StateFiring},
|
|
|
|
{State: notifier.StateFiring},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
newTestAlertingRule("for-pending", time.Second),
|
|
|
|
[]datasource.Metric{
|
|
|
|
{Values: []float64{1, 1, 1}, Timestamps: []int64{1, 3, 5}},
|
|
|
|
},
|
|
|
|
[]*notifier.Alert{
|
2022-03-29 13:09:07 +00:00
|
|
|
{State: notifier.StatePending, ActiveAt: time.Unix(1, 0)},
|
|
|
|
{State: notifier.StatePending, ActiveAt: time.Unix(3, 0)},
|
|
|
|
{State: notifier.StatePending, ActiveAt: time.Unix(5, 0)},
|
2021-06-09 09:20:38 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
newTestAlertingRule("for-firing", 3*time.Second),
|
|
|
|
[]datasource.Metric{
|
|
|
|
{Values: []float64{1, 1, 1}, Timestamps: []int64{1, 3, 5}},
|
|
|
|
},
|
|
|
|
[]*notifier.Alert{
|
2022-03-29 13:09:07 +00:00
|
|
|
{State: notifier.StatePending, ActiveAt: time.Unix(1, 0)},
|
|
|
|
{State: notifier.StatePending, ActiveAt: time.Unix(1, 0)},
|
|
|
|
{State: notifier.StateFiring, ActiveAt: time.Unix(1, 0)},
|
2021-06-09 09:20:38 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
newTestAlertingRule("for=>pending=>firing=>pending=>firing=>pending", time.Second),
|
|
|
|
[]datasource.Metric{
|
|
|
|
{Values: []float64{1, 1, 1, 1, 1}, Timestamps: []int64{1, 2, 5, 6, 20}},
|
|
|
|
},
|
|
|
|
[]*notifier.Alert{
|
2022-03-29 13:09:07 +00:00
|
|
|
{State: notifier.StatePending, ActiveAt: time.Unix(1, 0)},
|
|
|
|
{State: notifier.StateFiring, ActiveAt: time.Unix(1, 0)},
|
|
|
|
{State: notifier.StatePending, ActiveAt: time.Unix(5, 0)},
|
|
|
|
{State: notifier.StateFiring, ActiveAt: time.Unix(5, 0)},
|
|
|
|
{State: notifier.StatePending, ActiveAt: time.Unix(20, 0)},
|
2021-06-09 09:20:38 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
newTestAlertingRule("multi-series-for=>pending=>pending=>firing", 3*time.Second),
|
|
|
|
[]datasource.Metric{
|
|
|
|
{Values: []float64{1, 1, 1}, Timestamps: []int64{1, 3, 5}},
|
|
|
|
{Values: []float64{1, 1}, Timestamps: []int64{1, 5},
|
|
|
|
Labels: []datasource.Label{{Name: "foo", Value: "bar"}},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
[]*notifier.Alert{
|
2022-03-29 13:09:07 +00:00
|
|
|
{State: notifier.StatePending, ActiveAt: time.Unix(1, 0)},
|
|
|
|
{State: notifier.StatePending, ActiveAt: time.Unix(1, 0)},
|
|
|
|
{State: notifier.StateFiring, ActiveAt: time.Unix(1, 0)},
|
2021-06-09 09:20:38 +00:00
|
|
|
//
|
2022-03-29 13:09:07 +00:00
|
|
|
{State: notifier.StatePending, ActiveAt: time.Unix(1, 0),
|
2021-06-09 09:20:38 +00:00
|
|
|
Labels: map[string]string{
|
|
|
|
"foo": "bar",
|
|
|
|
}},
|
2022-03-29 13:09:07 +00:00
|
|
|
{State: notifier.StatePending, ActiveAt: time.Unix(5, 0),
|
2021-06-09 09:20:38 +00:00
|
|
|
Labels: map[string]string{
|
|
|
|
"foo": "bar",
|
|
|
|
}},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
newTestRuleWithLabels("multi-series-firing", "source", "vm"),
|
|
|
|
[]datasource.Metric{
|
|
|
|
{Values: []float64{1, 1}, Timestamps: []int64{1, 100}},
|
|
|
|
{Values: []float64{1, 1}, Timestamps: []int64{1, 5},
|
|
|
|
Labels: []datasource.Label{{Name: "foo", Value: "bar"}},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
[]*notifier.Alert{
|
|
|
|
{State: notifier.StateFiring, Labels: map[string]string{
|
|
|
|
"source": "vm",
|
|
|
|
}},
|
|
|
|
{State: notifier.StateFiring, Labels: map[string]string{
|
|
|
|
"source": "vm",
|
|
|
|
}},
|
|
|
|
//
|
|
|
|
{State: notifier.StateFiring, Labels: map[string]string{
|
|
|
|
"foo": "bar",
|
|
|
|
"source": "vm",
|
|
|
|
}},
|
|
|
|
{State: notifier.StateFiring, Labels: map[string]string{
|
|
|
|
"foo": "bar",
|
|
|
|
"source": "vm",
|
|
|
|
}},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
fakeGroup := Group{Name: "TestRule_ExecRange"}
|
|
|
|
for _, tc := range testCases {
|
|
|
|
t.Run(tc.rule.Name, func(t *testing.T) {
|
|
|
|
fq := &fakeQuerier{}
|
|
|
|
tc.rule.q = fq
|
|
|
|
tc.rule.GroupID = fakeGroup.ID()
|
|
|
|
fq.add(tc.data...)
|
2022-06-09 06:58:25 +00:00
|
|
|
gotTS, err := tc.rule.ExecRange(context.TODO(), time.Now(), time.Now())
|
2021-06-09 09:20:38 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unexpected err: %s", err)
|
|
|
|
}
|
|
|
|
var expTS []prompbmarshal.TimeSeries
|
|
|
|
var j int
|
|
|
|
for _, series := range tc.data {
|
|
|
|
for _, timestamp := range series.Timestamps {
|
2021-10-22 09:30:38 +00:00
|
|
|
a := tc.expAlerts[j]
|
|
|
|
if a.Labels == nil {
|
|
|
|
a.Labels = make(map[string]string)
|
|
|
|
}
|
|
|
|
a.Labels[alertNameLabel] = tc.rule.Name
|
2022-03-29 13:09:07 +00:00
|
|
|
expTS = append(expTS, tc.rule.alertToTimeSeries(a, timestamp)...)
|
2021-06-09 09:20:38 +00:00
|
|
|
j++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if len(gotTS) != len(expTS) {
|
|
|
|
t.Fatalf("expected %d time series; got %d", len(expTS), len(gotTS))
|
|
|
|
}
|
|
|
|
for i := range expTS {
|
|
|
|
got, exp := gotTS[i], expTS[i]
|
|
|
|
if !reflect.DeepEqual(got, exp) {
|
|
|
|
t.Fatalf("%d: expected \n%v but got \n%v", i, exp, got)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-01 10:46:37 +00:00
|
|
|
func TestAlertingRule_Restore(t *testing.T) {
|
2020-05-04 21:51:22 +00:00
|
|
|
testCases := []struct {
|
2020-06-01 10:46:37 +00:00
|
|
|
rule *AlertingRule
|
2020-05-04 21:51:22 +00:00
|
|
|
metrics []datasource.Metric
|
|
|
|
expAlerts map[uint64]*notifier.Alert
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
newTestRuleWithLabels("no extra labels"),
|
|
|
|
[]datasource.Metric{
|
|
|
|
metricWithValueAndLabels(t, float64(time.Now().Truncate(time.Hour).Unix()),
|
|
|
|
"__name__", alertForStateMetricName,
|
|
|
|
),
|
|
|
|
},
|
|
|
|
map[uint64]*notifier.Alert{
|
vmalert: fix labels and annotations processing for alerts (#2403)
To improve compatibility with Prometheus alerting the order of
templates processing has changed.
Before, vmalert did all labels processing beforehand. It meant
all extra labels (such as `alertname`, `alertgroup` or rule labels)
were available in templating. All collisions were resolved in favour
of extra labels.
In Prometheus, only labels from the received metric are available in
templating, so no collisions are possible.
This change makes vmalert's behaviour similar to Prometheus.
For example, consider alerting rule which is triggered by time series
with `alertname` label. In vmalert, this label would be overriden
by alerting rule's name everywhere: for alert labels, for annotations, etc.
In Prometheus, it would be overriden for alert's labels only, but in annotations
the original label value would be available.
See more details here https://github.com/prometheus/compliance/issues/80
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2022-04-06 18:24:45 +00:00
|
|
|
hash(nil): {State: notifier.StatePending,
|
2022-03-29 13:09:07 +00:00
|
|
|
ActiveAt: time.Now().Truncate(time.Hour)},
|
2020-05-04 21:51:22 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
newTestRuleWithLabels("metric labels"),
|
|
|
|
[]datasource.Metric{
|
|
|
|
metricWithValueAndLabels(t, float64(time.Now().Truncate(time.Hour).Unix()),
|
|
|
|
"__name__", alertForStateMetricName,
|
2021-10-22 09:30:38 +00:00
|
|
|
alertNameLabel, "metric labels",
|
2020-10-30 08:18:20 +00:00
|
|
|
alertGroupNameLabel, "groupID",
|
2020-05-04 21:51:22 +00:00
|
|
|
"foo", "bar",
|
|
|
|
"namespace", "baz",
|
|
|
|
),
|
|
|
|
},
|
|
|
|
map[uint64]*notifier.Alert{
|
vmalert: fix labels and annotations processing for alerts (#2403)
To improve compatibility with Prometheus alerting the order of
templates processing has changed.
Before, vmalert did all labels processing beforehand. It meant
all extra labels (such as `alertname`, `alertgroup` or rule labels)
were available in templating. All collisions were resolved in favour
of extra labels.
In Prometheus, only labels from the received metric are available in
templating, so no collisions are possible.
This change makes vmalert's behaviour similar to Prometheus.
For example, consider alerting rule which is triggered by time series
with `alertname` label. In vmalert, this label would be overriden
by alerting rule's name everywhere: for alert labels, for annotations, etc.
In Prometheus, it would be overriden for alert's labels only, but in annotations
the original label value would be available.
See more details here https://github.com/prometheus/compliance/issues/80
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2022-04-06 18:24:45 +00:00
|
|
|
hash(map[string]string{
|
|
|
|
alertNameLabel: "metric labels",
|
|
|
|
alertGroupNameLabel: "groupID",
|
|
|
|
"foo": "bar",
|
|
|
|
"namespace": "baz",
|
|
|
|
}): {State: notifier.StatePending,
|
2022-03-29 13:09:07 +00:00
|
|
|
ActiveAt: time.Now().Truncate(time.Hour)},
|
2020-05-04 21:51:22 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
newTestRuleWithLabels("rule labels", "source", "vm"),
|
|
|
|
[]datasource.Metric{
|
|
|
|
metricWithValueAndLabels(t, float64(time.Now().Truncate(time.Hour).Unix()),
|
|
|
|
"__name__", alertForStateMetricName,
|
|
|
|
"foo", "bar",
|
|
|
|
"namespace", "baz",
|
2020-11-09 22:27:32 +00:00
|
|
|
// extra labels set by rule
|
2020-05-04 21:51:22 +00:00
|
|
|
"source", "vm",
|
|
|
|
),
|
|
|
|
},
|
|
|
|
map[uint64]*notifier.Alert{
|
vmalert: fix labels and annotations processing for alerts (#2403)
To improve compatibility with Prometheus alerting the order of
templates processing has changed.
Before, vmalert did all labels processing beforehand. It meant
all extra labels (such as `alertname`, `alertgroup` or rule labels)
were available in templating. All collisions were resolved in favour
of extra labels.
In Prometheus, only labels from the received metric are available in
templating, so no collisions are possible.
This change makes vmalert's behaviour similar to Prometheus.
For example, consider alerting rule which is triggered by time series
with `alertname` label. In vmalert, this label would be overriden
by alerting rule's name everywhere: for alert labels, for annotations, etc.
In Prometheus, it would be overriden for alert's labels only, but in annotations
the original label value would be available.
See more details here https://github.com/prometheus/compliance/issues/80
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2022-04-06 18:24:45 +00:00
|
|
|
hash(map[string]string{
|
|
|
|
"foo": "bar",
|
|
|
|
"namespace": "baz",
|
|
|
|
"source": "vm",
|
|
|
|
}): {State: notifier.StatePending,
|
2022-03-29 13:09:07 +00:00
|
|
|
ActiveAt: time.Now().Truncate(time.Hour)},
|
2020-05-04 21:51:22 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
newTestRuleWithLabels("multiple alerts"),
|
|
|
|
[]datasource.Metric{
|
|
|
|
metricWithValueAndLabels(t, float64(time.Now().Truncate(time.Hour).Unix()),
|
|
|
|
"__name__", alertForStateMetricName,
|
|
|
|
"host", "localhost-1",
|
|
|
|
),
|
|
|
|
metricWithValueAndLabels(t, float64(time.Now().Truncate(2*time.Hour).Unix()),
|
|
|
|
"__name__", alertForStateMetricName,
|
|
|
|
"host", "localhost-2",
|
|
|
|
),
|
|
|
|
metricWithValueAndLabels(t, float64(time.Now().Truncate(3*time.Hour).Unix()),
|
|
|
|
"__name__", alertForStateMetricName,
|
|
|
|
"host", "localhost-3",
|
|
|
|
),
|
|
|
|
},
|
|
|
|
map[uint64]*notifier.Alert{
|
vmalert: fix labels and annotations processing for alerts (#2403)
To improve compatibility with Prometheus alerting the order of
templates processing has changed.
Before, vmalert did all labels processing beforehand. It meant
all extra labels (such as `alertname`, `alertgroup` or rule labels)
were available in templating. All collisions were resolved in favour
of extra labels.
In Prometheus, only labels from the received metric are available in
templating, so no collisions are possible.
This change makes vmalert's behaviour similar to Prometheus.
For example, consider alerting rule which is triggered by time series
with `alertname` label. In vmalert, this label would be overriden
by alerting rule's name everywhere: for alert labels, for annotations, etc.
In Prometheus, it would be overriden for alert's labels only, but in annotations
the original label value would be available.
See more details here https://github.com/prometheus/compliance/issues/80
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2022-04-06 18:24:45 +00:00
|
|
|
hash(map[string]string{"host": "localhost-1"}): {State: notifier.StatePending,
|
2022-03-29 13:09:07 +00:00
|
|
|
ActiveAt: time.Now().Truncate(time.Hour)},
|
vmalert: fix labels and annotations processing for alerts (#2403)
To improve compatibility with Prometheus alerting the order of
templates processing has changed.
Before, vmalert did all labels processing beforehand. It meant
all extra labels (such as `alertname`, `alertgroup` or rule labels)
were available in templating. All collisions were resolved in favour
of extra labels.
In Prometheus, only labels from the received metric are available in
templating, so no collisions are possible.
This change makes vmalert's behaviour similar to Prometheus.
For example, consider alerting rule which is triggered by time series
with `alertname` label. In vmalert, this label would be overriden
by alerting rule's name everywhere: for alert labels, for annotations, etc.
In Prometheus, it would be overriden for alert's labels only, but in annotations
the original label value would be available.
See more details here https://github.com/prometheus/compliance/issues/80
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2022-04-06 18:24:45 +00:00
|
|
|
hash(map[string]string{"host": "localhost-2"}): {State: notifier.StatePending,
|
2022-03-29 13:09:07 +00:00
|
|
|
ActiveAt: time.Now().Truncate(2 * time.Hour)},
|
vmalert: fix labels and annotations processing for alerts (#2403)
To improve compatibility with Prometheus alerting the order of
templates processing has changed.
Before, vmalert did all labels processing beforehand. It meant
all extra labels (such as `alertname`, `alertgroup` or rule labels)
were available in templating. All collisions were resolved in favour
of extra labels.
In Prometheus, only labels from the received metric are available in
templating, so no collisions are possible.
This change makes vmalert's behaviour similar to Prometheus.
For example, consider alerting rule which is triggered by time series
with `alertname` label. In vmalert, this label would be overriden
by alerting rule's name everywhere: for alert labels, for annotations, etc.
In Prometheus, it would be overriden for alert's labels only, but in annotations
the original label value would be available.
See more details here https://github.com/prometheus/compliance/issues/80
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2022-04-06 18:24:45 +00:00
|
|
|
hash(map[string]string{"host": "localhost-3"}): {State: notifier.StatePending,
|
2022-03-29 13:09:07 +00:00
|
|
|
ActiveAt: time.Now().Truncate(3 * time.Hour)},
|
2020-05-04 21:51:22 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
fakeGroup := Group{Name: "TestRule_Exec"}
|
|
|
|
for _, tc := range testCases {
|
|
|
|
t.Run(tc.rule.Name, func(t *testing.T) {
|
|
|
|
fq := &fakeQuerier{}
|
2020-06-01 10:46:37 +00:00
|
|
|
tc.rule.GroupID = fakeGroup.ID()
|
2021-04-28 20:41:15 +00:00
|
|
|
tc.rule.q = fq
|
2020-05-04 21:51:22 +00:00
|
|
|
fq.add(tc.metrics...)
|
2020-07-28 11:20:31 +00:00
|
|
|
if err := tc.rule.Restore(context.TODO(), fq, time.Hour, nil); err != nil {
|
2020-05-04 21:51:22 +00:00
|
|
|
t.Fatalf("unexpected err: %s", err)
|
|
|
|
}
|
|
|
|
if len(tc.rule.alerts) != len(tc.expAlerts) {
|
|
|
|
t.Fatalf("expected %d alerts; got %d", len(tc.expAlerts), len(tc.rule.alerts))
|
|
|
|
}
|
|
|
|
for key, exp := range tc.expAlerts {
|
|
|
|
got, ok := tc.rule.alerts[key]
|
|
|
|
if !ok {
|
|
|
|
t.Fatalf("expected to have key %d", key)
|
|
|
|
}
|
|
|
|
if got.State != exp.State {
|
|
|
|
t.Fatalf("expected state %d; got %d", exp.State, got.State)
|
|
|
|
}
|
2022-03-29 13:09:07 +00:00
|
|
|
if got.ActiveAt != exp.ActiveAt {
|
|
|
|
t.Fatalf("expected ActiveAt %v; got %v", exp.ActiveAt, got.ActiveAt)
|
2020-05-04 21:51:22 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-11-09 22:27:32 +00:00
|
|
|
func TestAlertingRule_Exec_Negative(t *testing.T) {
|
|
|
|
fq := &fakeQuerier{}
|
|
|
|
ar := newTestAlertingRule("test", 0)
|
|
|
|
ar.Labels = map[string]string{"job": "test"}
|
2021-04-28 20:41:15 +00:00
|
|
|
ar.q = fq
|
2020-11-09 22:27:32 +00:00
|
|
|
|
|
|
|
// successful attempt
|
|
|
|
fq.add(metricWithValueAndLabels(t, 1, "__name__", "foo", "job", "bar"))
|
2022-06-09 06:21:30 +00:00
|
|
|
_, err := ar.Exec(context.TODO(), time.Now(), 0)
|
2020-11-09 22:27:32 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// label `job` will collide with rule extra label and will make both time series equal
|
|
|
|
fq.add(metricWithValueAndLabels(t, 1, "__name__", "foo", "job", "baz"))
|
2022-06-09 06:21:30 +00:00
|
|
|
_, err = ar.Exec(context.TODO(), time.Now(), 0)
|
2020-11-09 22:27:32 +00:00
|
|
|
if !errors.Is(err, errDuplicate) {
|
|
|
|
t.Fatalf("expected to have %s error; got %s", errDuplicate, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
fq.reset()
|
|
|
|
|
|
|
|
expErr := "connection reset by peer"
|
|
|
|
fq.setErr(errors.New(expErr))
|
2022-06-09 06:21:30 +00:00
|
|
|
_, err = ar.Exec(context.TODO(), time.Now(), 0)
|
2020-11-09 22:27:32 +00:00
|
|
|
if err == nil {
|
|
|
|
t.Fatalf("expected to get err; got nil")
|
|
|
|
}
|
|
|
|
if !strings.Contains(err.Error(), expErr) {
|
|
|
|
t.Fatalf("expected to get err %q; got %q insterad", expErr, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-06-09 06:21:30 +00:00
|
|
|
func TestAlertingRuleLimit(t *testing.T) {
|
|
|
|
fq := &fakeQuerier{}
|
|
|
|
ar := newTestAlertingRule("test", 0)
|
|
|
|
ar.Labels = map[string]string{"job": "test"}
|
|
|
|
ar.q = fq
|
|
|
|
ar.For = time.Minute
|
|
|
|
testCases := []struct {
|
|
|
|
limit int
|
|
|
|
err string
|
|
|
|
tssNum int
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
limit: 0,
|
|
|
|
tssNum: 4,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
limit: -1,
|
|
|
|
tssNum: 4,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
limit: 1,
|
|
|
|
err: "exec exceeded limit of 1 with 2 alerts",
|
|
|
|
tssNum: 0,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
limit: 4,
|
|
|
|
tssNum: 4,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
var (
|
|
|
|
err error
|
|
|
|
timestamp = time.Now()
|
|
|
|
)
|
|
|
|
fq.add(metricWithValueAndLabels(t, 1, "__name__", "foo", "job", "bar"))
|
|
|
|
fq.add(metricWithValueAndLabels(t, 1, "__name__", "foo", "bar", "job"))
|
|
|
|
for _, testCase := range testCases {
|
|
|
|
_, err = ar.Exec(context.TODO(), timestamp, testCase.limit)
|
|
|
|
if err != nil && !strings.EqualFold(err.Error(), testCase.err) {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
fq.reset()
|
|
|
|
}
|
|
|
|
|
2020-12-19 12:10:59 +00:00
|
|
|
func TestAlertingRule_Template(t *testing.T) {
|
|
|
|
testCases := []struct {
|
|
|
|
rule *AlertingRule
|
|
|
|
metrics []datasource.Metric
|
|
|
|
expAlerts map[uint64]*notifier.Alert
|
|
|
|
}{
|
|
|
|
{
|
2022-09-29 16:22:50 +00:00
|
|
|
&AlertingRule{
|
|
|
|
Name: "common",
|
|
|
|
Labels: map[string]string{
|
|
|
|
"region": "east",
|
|
|
|
},
|
|
|
|
Annotations: map[string]string{
|
|
|
|
"summary": `{{ $labels.alertname }}: Too high connection number for "{{ $labels.instance }}"`,
|
|
|
|
},
|
|
|
|
alerts: make(map[uint64]*notifier.Alert),
|
|
|
|
},
|
2020-12-19 12:10:59 +00:00
|
|
|
[]datasource.Metric{
|
|
|
|
metricWithValueAndLabels(t, 1, "instance", "foo"),
|
|
|
|
metricWithValueAndLabels(t, 1, "instance", "bar"),
|
|
|
|
},
|
|
|
|
map[uint64]*notifier.Alert{
|
vmalert: fix labels and annotations processing for alerts (#2403)
To improve compatibility with Prometheus alerting the order of
templates processing has changed.
Before, vmalert did all labels processing beforehand. It meant
all extra labels (such as `alertname`, `alertgroup` or rule labels)
were available in templating. All collisions were resolved in favour
of extra labels.
In Prometheus, only labels from the received metric are available in
templating, so no collisions are possible.
This change makes vmalert's behaviour similar to Prometheus.
For example, consider alerting rule which is triggered by time series
with `alertname` label. In vmalert, this label would be overriden
by alerting rule's name everywhere: for alert labels, for annotations, etc.
In Prometheus, it would be overriden for alert's labels only, but in annotations
the original label value would be available.
See more details here https://github.com/prometheus/compliance/issues/80
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2022-04-06 18:24:45 +00:00
|
|
|
hash(map[string]string{alertNameLabel: "common", "region": "east", "instance": "foo"}): {
|
2022-09-29 16:22:50 +00:00
|
|
|
Annotations: map[string]string{
|
|
|
|
"summary": `common: Too high connection number for "foo"`,
|
|
|
|
},
|
2020-12-19 12:10:59 +00:00
|
|
|
Labels: map[string]string{
|
2021-10-22 09:30:38 +00:00
|
|
|
alertNameLabel: "common",
|
|
|
|
"region": "east",
|
|
|
|
"instance": "foo",
|
2020-12-19 12:10:59 +00:00
|
|
|
},
|
|
|
|
},
|
vmalert: fix labels and annotations processing for alerts (#2403)
To improve compatibility with Prometheus alerting the order of
templates processing has changed.
Before, vmalert did all labels processing beforehand. It meant
all extra labels (such as `alertname`, `alertgroup` or rule labels)
were available in templating. All collisions were resolved in favour
of extra labels.
In Prometheus, only labels from the received metric are available in
templating, so no collisions are possible.
This change makes vmalert's behaviour similar to Prometheus.
For example, consider alerting rule which is triggered by time series
with `alertname` label. In vmalert, this label would be overriden
by alerting rule's name everywhere: for alert labels, for annotations, etc.
In Prometheus, it would be overriden for alert's labels only, but in annotations
the original label value would be available.
See more details here https://github.com/prometheus/compliance/issues/80
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2022-04-06 18:24:45 +00:00
|
|
|
hash(map[string]string{alertNameLabel: "common", "region": "east", "instance": "bar"}): {
|
2022-09-29 16:22:50 +00:00
|
|
|
Annotations: map[string]string{
|
|
|
|
"summary": `common: Too high connection number for "bar"`,
|
|
|
|
},
|
2020-12-19 12:10:59 +00:00
|
|
|
Labels: map[string]string{
|
2021-10-22 09:30:38 +00:00
|
|
|
alertNameLabel: "common",
|
|
|
|
"region": "east",
|
|
|
|
"instance": "bar",
|
2020-12-19 12:10:59 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
&AlertingRule{
|
|
|
|
Name: "override label",
|
|
|
|
Labels: map[string]string{
|
|
|
|
"instance": "{{ $labels.instance }}",
|
|
|
|
},
|
|
|
|
Annotations: map[string]string{
|
2022-06-27 07:57:56 +00:00
|
|
|
"summary": `{{ $labels.__name__ }}: Too high connection number for "{{ $labels.instance }}"`,
|
vmalert: fix labels and annotations processing for alerts (#2403)
To improve compatibility with Prometheus alerting the order of
templates processing has changed.
Before, vmalert did all labels processing beforehand. It meant
all extra labels (such as `alertname`, `alertgroup` or rule labels)
were available in templating. All collisions were resolved in favour
of extra labels.
In Prometheus, only labels from the received metric are available in
templating, so no collisions are possible.
This change makes vmalert's behaviour similar to Prometheus.
For example, consider alerting rule which is triggered by time series
with `alertname` label. In vmalert, this label would be overriden
by alerting rule's name everywhere: for alert labels, for annotations, etc.
In Prometheus, it would be overriden for alert's labels only, but in annotations
the original label value would be available.
See more details here https://github.com/prometheus/compliance/issues/80
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2022-04-06 18:24:45 +00:00
|
|
|
"description": `{{ $labels.alertname}}: It is {{ $value }} connections for "{{ $labels.instance }}"`,
|
2020-12-19 12:10:59 +00:00
|
|
|
},
|
|
|
|
alerts: make(map[uint64]*notifier.Alert),
|
|
|
|
},
|
|
|
|
[]datasource.Metric{
|
2022-06-27 07:57:56 +00:00
|
|
|
metricWithValueAndLabels(t, 2, "__name__", "first", "instance", "foo", alertNameLabel, "override"),
|
|
|
|
metricWithValueAndLabels(t, 10, "__name__", "second", "instance", "bar", alertNameLabel, "override"),
|
2020-12-19 12:10:59 +00:00
|
|
|
},
|
|
|
|
map[uint64]*notifier.Alert{
|
vmalert: fix labels and annotations processing for alerts (#2403)
To improve compatibility with Prometheus alerting the order of
templates processing has changed.
Before, vmalert did all labels processing beforehand. It meant
all extra labels (such as `alertname`, `alertgroup` or rule labels)
were available in templating. All collisions were resolved in favour
of extra labels.
In Prometheus, only labels from the received metric are available in
templating, so no collisions are possible.
This change makes vmalert's behaviour similar to Prometheus.
For example, consider alerting rule which is triggered by time series
with `alertname` label. In vmalert, this label would be overriden
by alerting rule's name everywhere: for alert labels, for annotations, etc.
In Prometheus, it would be overriden for alert's labels only, but in annotations
the original label value would be available.
See more details here https://github.com/prometheus/compliance/issues/80
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2022-04-06 18:24:45 +00:00
|
|
|
hash(map[string]string{alertNameLabel: "override label", "instance": "foo"}): {
|
2020-12-19 12:10:59 +00:00
|
|
|
Labels: map[string]string{
|
2021-10-22 09:30:38 +00:00
|
|
|
alertNameLabel: "override label",
|
|
|
|
"instance": "foo",
|
2020-12-19 12:10:59 +00:00
|
|
|
},
|
|
|
|
Annotations: map[string]string{
|
2022-06-27 07:57:56 +00:00
|
|
|
"summary": `first: Too high connection number for "foo"`,
|
vmalert: fix labels and annotations processing for alerts (#2403)
To improve compatibility with Prometheus alerting the order of
templates processing has changed.
Before, vmalert did all labels processing beforehand. It meant
all extra labels (such as `alertname`, `alertgroup` or rule labels)
were available in templating. All collisions were resolved in favour
of extra labels.
In Prometheus, only labels from the received metric are available in
templating, so no collisions are possible.
This change makes vmalert's behaviour similar to Prometheus.
For example, consider alerting rule which is triggered by time series
with `alertname` label. In vmalert, this label would be overriden
by alerting rule's name everywhere: for alert labels, for annotations, etc.
In Prometheus, it would be overriden for alert's labels only, but in annotations
the original label value would be available.
See more details here https://github.com/prometheus/compliance/issues/80
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2022-04-06 18:24:45 +00:00
|
|
|
"description": `override: It is 2 connections for "foo"`,
|
2020-12-19 12:10:59 +00:00
|
|
|
},
|
|
|
|
},
|
vmalert: fix labels and annotations processing for alerts (#2403)
To improve compatibility with Prometheus alerting the order of
templates processing has changed.
Before, vmalert did all labels processing beforehand. It meant
all extra labels (such as `alertname`, `alertgroup` or rule labels)
were available in templating. All collisions were resolved in favour
of extra labels.
In Prometheus, only labels from the received metric are available in
templating, so no collisions are possible.
This change makes vmalert's behaviour similar to Prometheus.
For example, consider alerting rule which is triggered by time series
with `alertname` label. In vmalert, this label would be overriden
by alerting rule's name everywhere: for alert labels, for annotations, etc.
In Prometheus, it would be overriden for alert's labels only, but in annotations
the original label value would be available.
See more details here https://github.com/prometheus/compliance/issues/80
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2022-04-06 18:24:45 +00:00
|
|
|
hash(map[string]string{alertNameLabel: "override label", "instance": "bar"}): {
|
2020-12-19 12:10:59 +00:00
|
|
|
Labels: map[string]string{
|
2021-10-22 09:30:38 +00:00
|
|
|
alertNameLabel: "override label",
|
|
|
|
"instance": "bar",
|
2020-12-19 12:10:59 +00:00
|
|
|
},
|
|
|
|
Annotations: map[string]string{
|
2022-06-27 07:57:56 +00:00
|
|
|
"summary": `second: Too high connection number for "bar"`,
|
vmalert: fix labels and annotations processing for alerts (#2403)
To improve compatibility with Prometheus alerting the order of
templates processing has changed.
Before, vmalert did all labels processing beforehand. It meant
all extra labels (such as `alertname`, `alertgroup` or rule labels)
were available in templating. All collisions were resolved in favour
of extra labels.
In Prometheus, only labels from the received metric are available in
templating, so no collisions are possible.
This change makes vmalert's behaviour similar to Prometheus.
For example, consider alerting rule which is triggered by time series
with `alertname` label. In vmalert, this label would be overriden
by alerting rule's name everywhere: for alert labels, for annotations, etc.
In Prometheus, it would be overriden for alert's labels only, but in annotations
the original label value would be available.
See more details here https://github.com/prometheus/compliance/issues/80
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2022-04-06 18:24:45 +00:00
|
|
|
"description": `override: It is 10 connections for "bar"`,
|
2020-12-19 12:10:59 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2021-12-10 10:10:26 +00:00
|
|
|
{
|
|
|
|
&AlertingRule{
|
vmalert: fix labels and annotations processing for alerts (#2403)
To improve compatibility with Prometheus alerting the order of
templates processing has changed.
Before, vmalert did all labels processing beforehand. It meant
all extra labels (such as `alertname`, `alertgroup` or rule labels)
were available in templating. All collisions were resolved in favour
of extra labels.
In Prometheus, only labels from the received metric are available in
templating, so no collisions are possible.
This change makes vmalert's behaviour similar to Prometheus.
For example, consider alerting rule which is triggered by time series
with `alertname` label. In vmalert, this label would be overriden
by alerting rule's name everywhere: for alert labels, for annotations, etc.
In Prometheus, it would be overriden for alert's labels only, but in annotations
the original label value would be available.
See more details here https://github.com/prometheus/compliance/issues/80
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2022-04-06 18:24:45 +00:00
|
|
|
Name: "OriginLabels",
|
2021-12-10 10:10:26 +00:00
|
|
|
GroupName: "Testing",
|
|
|
|
Labels: map[string]string{
|
|
|
|
"instance": "{{ $labels.instance }}",
|
|
|
|
},
|
|
|
|
Annotations: map[string]string{
|
vmalert: fix labels and annotations processing for alerts (#2403)
To improve compatibility with Prometheus alerting the order of
templates processing has changed.
Before, vmalert did all labels processing beforehand. It meant
all extra labels (such as `alertname`, `alertgroup` or rule labels)
were available in templating. All collisions were resolved in favour
of extra labels.
In Prometheus, only labels from the received metric are available in
templating, so no collisions are possible.
This change makes vmalert's behaviour similar to Prometheus.
For example, consider alerting rule which is triggered by time series
with `alertname` label. In vmalert, this label would be overriden
by alerting rule's name everywhere: for alert labels, for annotations, etc.
In Prometheus, it would be overriden for alert's labels only, but in annotations
the original label value would be available.
See more details here https://github.com/prometheus/compliance/issues/80
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2022-04-06 18:24:45 +00:00
|
|
|
"summary": `Alert "{{ $labels.alertname }}({{ $labels.alertgroup }})" for instance {{ $labels.instance }}`,
|
2021-12-10 10:10:26 +00:00
|
|
|
},
|
|
|
|
alerts: make(map[uint64]*notifier.Alert),
|
|
|
|
},
|
|
|
|
[]datasource.Metric{
|
vmalert: fix labels and annotations processing for alerts (#2403)
To improve compatibility with Prometheus alerting the order of
templates processing has changed.
Before, vmalert did all labels processing beforehand. It meant
all extra labels (such as `alertname`, `alertgroup` or rule labels)
were available in templating. All collisions were resolved in favour
of extra labels.
In Prometheus, only labels from the received metric are available in
templating, so no collisions are possible.
This change makes vmalert's behaviour similar to Prometheus.
For example, consider alerting rule which is triggered by time series
with `alertname` label. In vmalert, this label would be overriden
by alerting rule's name everywhere: for alert labels, for annotations, etc.
In Prometheus, it would be overriden for alert's labels only, but in annotations
the original label value would be available.
See more details here https://github.com/prometheus/compliance/issues/80
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2022-04-06 18:24:45 +00:00
|
|
|
metricWithValueAndLabels(t, 1,
|
|
|
|
alertNameLabel, "originAlertname",
|
|
|
|
alertGroupNameLabel, "originGroupname",
|
|
|
|
"instance", "foo"),
|
2021-12-10 10:10:26 +00:00
|
|
|
},
|
|
|
|
map[uint64]*notifier.Alert{
|
vmalert: fix labels and annotations processing for alerts (#2403)
To improve compatibility with Prometheus alerting the order of
templates processing has changed.
Before, vmalert did all labels processing beforehand. It meant
all extra labels (such as `alertname`, `alertgroup` or rule labels)
were available in templating. All collisions were resolved in favour
of extra labels.
In Prometheus, only labels from the received metric are available in
templating, so no collisions are possible.
This change makes vmalert's behaviour similar to Prometheus.
For example, consider alerting rule which is triggered by time series
with `alertname` label. In vmalert, this label would be overriden
by alerting rule's name everywhere: for alert labels, for annotations, etc.
In Prometheus, it would be overriden for alert's labels only, but in annotations
the original label value would be available.
See more details here https://github.com/prometheus/compliance/issues/80
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2022-04-06 18:24:45 +00:00
|
|
|
hash(map[string]string{
|
|
|
|
alertNameLabel: "OriginLabels",
|
|
|
|
alertGroupNameLabel: "Testing",
|
|
|
|
"instance": "foo"}): {
|
2021-12-10 10:10:26 +00:00
|
|
|
Labels: map[string]string{
|
vmalert: fix labels and annotations processing for alerts (#2403)
To improve compatibility with Prometheus alerting the order of
templates processing has changed.
Before, vmalert did all labels processing beforehand. It meant
all extra labels (such as `alertname`, `alertgroup` or rule labels)
were available in templating. All collisions were resolved in favour
of extra labels.
In Prometheus, only labels from the received metric are available in
templating, so no collisions are possible.
This change makes vmalert's behaviour similar to Prometheus.
For example, consider alerting rule which is triggered by time series
with `alertname` label. In vmalert, this label would be overriden
by alerting rule's name everywhere: for alert labels, for annotations, etc.
In Prometheus, it would be overriden for alert's labels only, but in annotations
the original label value would be available.
See more details here https://github.com/prometheus/compliance/issues/80
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2022-04-06 18:24:45 +00:00
|
|
|
alertNameLabel: "OriginLabels",
|
2021-12-10 10:10:26 +00:00
|
|
|
alertGroupNameLabel: "Testing",
|
|
|
|
"instance": "foo",
|
|
|
|
},
|
|
|
|
Annotations: map[string]string{
|
vmalert: fix labels and annotations processing for alerts (#2403)
To improve compatibility with Prometheus alerting the order of
templates processing has changed.
Before, vmalert did all labels processing beforehand. It meant
all extra labels (such as `alertname`, `alertgroup` or rule labels)
were available in templating. All collisions were resolved in favour
of extra labels.
In Prometheus, only labels from the received metric are available in
templating, so no collisions are possible.
This change makes vmalert's behaviour similar to Prometheus.
For example, consider alerting rule which is triggered by time series
with `alertname` label. In vmalert, this label would be overriden
by alerting rule's name everywhere: for alert labels, for annotations, etc.
In Prometheus, it would be overriden for alert's labels only, but in annotations
the original label value would be available.
See more details here https://github.com/prometheus/compliance/issues/80
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2022-04-06 18:24:45 +00:00
|
|
|
"summary": `Alert "originAlertname(originGroupname)" for instance foo`,
|
2021-12-10 10:10:26 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2020-12-19 12:10:59 +00:00
|
|
|
}
|
|
|
|
fakeGroup := Group{Name: "TestRule_Exec"}
|
|
|
|
for _, tc := range testCases {
|
|
|
|
t.Run(tc.rule.Name, func(t *testing.T) {
|
|
|
|
fq := &fakeQuerier{}
|
|
|
|
tc.rule.GroupID = fakeGroup.ID()
|
2021-04-28 20:41:15 +00:00
|
|
|
tc.rule.q = fq
|
2022-12-29 11:36:44 +00:00
|
|
|
tc.rule.state = newRuleState(10)
|
2020-12-19 12:10:59 +00:00
|
|
|
fq.add(tc.metrics...)
|
2022-06-09 06:21:30 +00:00
|
|
|
if _, err := tc.rule.Exec(context.TODO(), time.Now(), 0); err != nil {
|
2020-12-19 12:10:59 +00:00
|
|
|
t.Fatalf("unexpected err: %s", err)
|
|
|
|
}
|
|
|
|
for hash, expAlert := range tc.expAlerts {
|
|
|
|
gotAlert := tc.rule.alerts[hash]
|
|
|
|
if gotAlert == nil {
|
|
|
|
t.Fatalf("alert %d is missing; labels: %v; annotations: %v",
|
|
|
|
hash, expAlert.Labels, expAlert.Annotations)
|
|
|
|
}
|
|
|
|
if !reflect.DeepEqual(expAlert.Annotations, gotAlert.Annotations) {
|
|
|
|
t.Fatalf("expected to have annotations %#v; got %#v", expAlert.Annotations, gotAlert.Annotations)
|
|
|
|
}
|
|
|
|
if !reflect.DeepEqual(expAlert.Labels, gotAlert.Labels) {
|
|
|
|
t.Fatalf("expected to have labels %#v; got %#v", expAlert.Labels, gotAlert.Labels)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-03-16 15:26:33 +00:00
|
|
|
func TestAlertsToSend(t *testing.T) {
|
|
|
|
ts := time.Now()
|
|
|
|
f := func(alerts, expAlerts []*notifier.Alert, resolveDuration, resendDelay time.Duration) {
|
|
|
|
t.Helper()
|
|
|
|
ar := &AlertingRule{alerts: make(map[uint64]*notifier.Alert)}
|
|
|
|
for i, a := range alerts {
|
|
|
|
ar.alerts[uint64(i)] = a
|
|
|
|
}
|
|
|
|
gotAlerts := ar.alertsToSend(ts, resolveDuration, resendDelay)
|
|
|
|
if gotAlerts == nil && expAlerts == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if len(gotAlerts) != len(expAlerts) {
|
|
|
|
t.Fatalf("expected to get %d alerts; got %d instead",
|
|
|
|
len(expAlerts), len(gotAlerts))
|
|
|
|
}
|
|
|
|
sort.Slice(expAlerts, func(i, j int) bool {
|
|
|
|
return expAlerts[i].Name < expAlerts[j].Name
|
|
|
|
})
|
|
|
|
sort.Slice(gotAlerts, func(i, j int) bool {
|
|
|
|
return gotAlerts[i].Name < gotAlerts[j].Name
|
|
|
|
})
|
|
|
|
for i, exp := range expAlerts {
|
|
|
|
got := gotAlerts[i]
|
|
|
|
if got.LastSent != exp.LastSent {
|
|
|
|
t.Fatalf("expected LastSent to be %v; got %v", exp.LastSent, got.LastSent)
|
|
|
|
}
|
|
|
|
if got.End != exp.End {
|
|
|
|
t.Fatalf("expected End to be %v; got %v", exp.End, got.End)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
f( // send firing alert with custom resolve time
|
|
|
|
[]*notifier.Alert{{State: notifier.StateFiring}},
|
|
|
|
[]*notifier.Alert{{LastSent: ts, End: ts.Add(5 * time.Minute)}},
|
|
|
|
5*time.Minute, time.Minute,
|
|
|
|
)
|
|
|
|
f( // resolve inactive alert at the current timestamp
|
2022-03-29 13:09:07 +00:00
|
|
|
[]*notifier.Alert{{State: notifier.StateInactive, ResolvedAt: ts}},
|
2022-03-16 15:26:33 +00:00
|
|
|
[]*notifier.Alert{{LastSent: ts, End: ts}},
|
|
|
|
time.Minute, time.Minute,
|
|
|
|
)
|
|
|
|
f( // mixed case of firing and resolved alerts. Names are added for deterministic sorting
|
2022-03-29 13:09:07 +00:00
|
|
|
[]*notifier.Alert{{Name: "a", State: notifier.StateFiring}, {Name: "b", State: notifier.StateInactive, ResolvedAt: ts}},
|
2022-03-16 15:26:33 +00:00
|
|
|
[]*notifier.Alert{{Name: "a", LastSent: ts, End: ts.Add(5 * time.Minute)}, {Name: "b", LastSent: ts, End: ts}},
|
|
|
|
5*time.Minute, time.Minute,
|
|
|
|
)
|
|
|
|
f( // mixed case of pending and resolved alerts. Names are added for deterministic sorting
|
2022-03-29 13:09:07 +00:00
|
|
|
[]*notifier.Alert{{Name: "a", State: notifier.StatePending}, {Name: "b", State: notifier.StateInactive, ResolvedAt: ts}},
|
2022-03-16 15:26:33 +00:00
|
|
|
[]*notifier.Alert{{Name: "b", LastSent: ts, End: ts}},
|
|
|
|
5*time.Minute, time.Minute,
|
|
|
|
)
|
|
|
|
f( // attempt to send alert that was already sent in the resendDelay interval
|
|
|
|
[]*notifier.Alert{{State: notifier.StateFiring, LastSent: ts.Add(-time.Second)}},
|
|
|
|
nil,
|
|
|
|
time.Minute, time.Minute,
|
|
|
|
)
|
|
|
|
f( // attempt to send alert that was sent out of the resendDelay interval
|
|
|
|
[]*notifier.Alert{{State: notifier.StateFiring, LastSent: ts.Add(-2 * time.Minute)}},
|
|
|
|
[]*notifier.Alert{{LastSent: ts, End: ts.Add(time.Minute)}},
|
|
|
|
time.Minute, time.Minute,
|
|
|
|
)
|
|
|
|
f( // alert must be sent even if resendDelay interval is 0
|
|
|
|
[]*notifier.Alert{{State: notifier.StateFiring, LastSent: ts.Add(-time.Second)}},
|
|
|
|
[]*notifier.Alert{{LastSent: ts, End: ts.Add(time.Minute)}},
|
|
|
|
time.Minute, 0,
|
|
|
|
)
|
2022-03-29 13:09:07 +00:00
|
|
|
f( // inactive alert which has been sent already
|
|
|
|
[]*notifier.Alert{{State: notifier.StateInactive, LastSent: ts.Add(-time.Second), ResolvedAt: ts.Add(-2 * time.Second)}},
|
|
|
|
nil,
|
|
|
|
time.Minute, time.Minute,
|
|
|
|
)
|
|
|
|
f( // inactive alert which has been resolved after last send
|
|
|
|
[]*notifier.Alert{{State: notifier.StateInactive, LastSent: ts.Add(-time.Second), ResolvedAt: ts}},
|
|
|
|
[]*notifier.Alert{{LastSent: ts, End: ts}},
|
|
|
|
time.Minute, time.Minute,
|
|
|
|
)
|
2022-03-16 15:26:33 +00:00
|
|
|
}
|
|
|
|
|
2020-06-01 10:46:37 +00:00
|
|
|
func newTestRuleWithLabels(name string, labels ...string) *AlertingRule {
|
|
|
|
r := newTestAlertingRule(name, 0)
|
2020-05-04 21:51:22 +00:00
|
|
|
r.Labels = make(map[string]string)
|
|
|
|
for i := 0; i < len(labels); i += 2 {
|
|
|
|
r.Labels[labels[i]] = labels[i+1]
|
|
|
|
}
|
|
|
|
return r
|
|
|
|
}
|
|
|
|
|
2020-06-01 10:46:37 +00:00
|
|
|
func newTestAlertingRule(name string, waitFor time.Duration) *AlertingRule {
|
2022-09-14 12:04:24 +00:00
|
|
|
return &AlertingRule{
|
|
|
|
Name: name,
|
|
|
|
For: waitFor,
|
|
|
|
EvalInterval: waitFor,
|
|
|
|
alerts: make(map[uint64]*notifier.Alert),
|
2022-12-29 11:36:44 +00:00
|
|
|
state: newRuleState(10),
|
2022-09-14 12:04:24 +00:00
|
|
|
}
|
2020-05-04 21:51:22 +00:00
|
|
|
}
|