2020-06-01 10:46:37 +00:00
|
|
|
package main
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"fmt"
|
|
|
|
"hash/fnv"
|
|
|
|
"sort"
|
|
|
|
"strconv"
|
|
|
|
"sync"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/config"
|
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/datasource"
|
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/notifier"
|
2022-05-14 09:38:44 +00:00
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/templates"
|
2022-02-02 12:11:41 +00:00
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/utils"
|
2020-06-01 10:46:37 +00:00
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
|
|
|
)
|
|
|
|
|
|
|
|
// AlertingRule is basic alert entity
|
|
|
|
type AlertingRule struct {
|
2021-06-09 09:20:38 +00:00
|
|
|
Type datasource.Type
|
|
|
|
RuleID uint64
|
|
|
|
Name string
|
|
|
|
Expr string
|
|
|
|
For time.Duration
|
|
|
|
Labels map[string]string
|
|
|
|
Annotations map[string]string
|
|
|
|
GroupID uint64
|
|
|
|
GroupName string
|
|
|
|
EvalInterval time.Duration
|
2020-06-01 10:46:37 +00:00
|
|
|
|
2021-04-28 20:41:15 +00:00
|
|
|
q datasource.Querier
|
|
|
|
|
2020-06-01 10:46:37 +00:00
|
|
|
// guard status fields
|
|
|
|
mu sync.RWMutex
|
|
|
|
// stores list of active alerts
|
|
|
|
alerts map[uint64]*notifier.Alert
|
|
|
|
// stores last moment of time Exec was called
|
|
|
|
lastExecTime time.Time
|
2022-03-15 11:54:53 +00:00
|
|
|
// stores the duration of the last Exec call
|
|
|
|
lastExecDuration time.Duration
|
2020-06-01 10:46:37 +00:00
|
|
|
// stores last error that happened in Exec func
|
|
|
|
// resets on every successful Exec
|
|
|
|
// may be used as Health state
|
|
|
|
lastExecError error
|
2021-08-05 06:59:46 +00:00
|
|
|
// stores the number of samples returned during
|
|
|
|
// the last evaluation
|
|
|
|
lastExecSamples int
|
app/vmalert: extend metrics set exported by `vmalert` #573 (#654)
* app/vmalert: extend metrics set exported by `vmalert` #573
New metrics were added to improve observability:
+ vmalert_alerts_pending{alertname, group} - number of pending alerts per group
per alert;
+ vmalert_alerts_acitve{alertname, group} - number of active alerts per group
per alert;
+ vmalert_alerts_error{alertname, group} - is 1 if alertname ended up with error
during prev execution, is 0 if no errors happened;
+ vmalert_recording_rules_error{recording, group} - is 1 if recording rule
ended up with error during prev execution, is 0 if no errors happened;
* vmalert_iteration_total{group, file} - now contains group and file name labels.
This should improve control over specific groups;
* vmalert_iteration_duration_seconds{group, file} - now contains group and file name labels. This should improve control over specific groups;
Some collisions for alerts and recording rules are possible, because neither
group name nor alert/recording rule name are unique for compatibility reasons.
Commit contains list of TODOs for Unregistering metrics since groups and rules
are ephemeral and could be removed without application restart. In order to
unlock Unregistering feature corresponding PR was filed - https://github.com/VictoriaMetrics/metrics/pull/13
* app/vmalert: extend metrics set exported by `vmalert` #573
The changes are following:
* add an ID label to rules metrics, since `name` collisions within one group is
a common case - see the k8s example alerts;
* supports metrics unregistering on rule updates. Consider the case when one rule
was added or removed from the group, or the whole group was added or removed.
The change depends on https://github.com/VictoriaMetrics/metrics/pull/16
where race condition for Unregister method was fixed.
2020-08-09 06:41:29 +00:00
|
|
|
|
|
|
|
metrics *alertingRuleMetrics
|
|
|
|
}
|
|
|
|
|
|
|
|
type alertingRuleMetrics struct {
|
2022-02-02 12:11:41 +00:00
|
|
|
errors *utils.Gauge
|
|
|
|
pending *utils.Gauge
|
|
|
|
active *utils.Gauge
|
|
|
|
samples *utils.Gauge
|
2020-06-01 10:46:37 +00:00
|
|
|
}
|
|
|
|
|
2021-04-28 20:41:15 +00:00
|
|
|
func newAlertingRule(qb datasource.QuerierBuilder, group *Group, cfg config.Rule) *AlertingRule {
|
app/vmalert: extend metrics set exported by `vmalert` #573 (#654)
* app/vmalert: extend metrics set exported by `vmalert` #573
New metrics were added to improve observability:
+ vmalert_alerts_pending{alertname, group} - number of pending alerts per group
per alert;
+ vmalert_alerts_acitve{alertname, group} - number of active alerts per group
per alert;
+ vmalert_alerts_error{alertname, group} - is 1 if alertname ended up with error
during prev execution, is 0 if no errors happened;
+ vmalert_recording_rules_error{recording, group} - is 1 if recording rule
ended up with error during prev execution, is 0 if no errors happened;
* vmalert_iteration_total{group, file} - now contains group and file name labels.
This should improve control over specific groups;
* vmalert_iteration_duration_seconds{group, file} - now contains group and file name labels. This should improve control over specific groups;
Some collisions for alerts and recording rules are possible, because neither
group name nor alert/recording rule name are unique for compatibility reasons.
Commit contains list of TODOs for Unregistering metrics since groups and rules
are ephemeral and could be removed without application restart. In order to
unlock Unregistering feature corresponding PR was filed - https://github.com/VictoriaMetrics/metrics/pull/13
* app/vmalert: extend metrics set exported by `vmalert` #573
The changes are following:
* add an ID label to rules metrics, since `name` collisions within one group is
a common case - see the k8s example alerts;
* supports metrics unregistering on rule updates. Consider the case when one rule
was added or removed from the group, or the whole group was added or removed.
The change depends on https://github.com/VictoriaMetrics/metrics/pull/16
where race condition for Unregister method was fixed.
2020-08-09 06:41:29 +00:00
|
|
|
ar := &AlertingRule{
|
2021-11-05 17:49:32 +00:00
|
|
|
Type: group.Type,
|
2021-06-09 09:20:38 +00:00
|
|
|
RuleID: cfg.ID,
|
|
|
|
Name: cfg.Alert,
|
|
|
|
Expr: cfg.Expr,
|
|
|
|
For: cfg.For.Duration(),
|
|
|
|
Labels: cfg.Labels,
|
|
|
|
Annotations: cfg.Annotations,
|
|
|
|
GroupID: group.ID(),
|
|
|
|
GroupName: group.Name,
|
|
|
|
EvalInterval: group.Interval,
|
2021-04-30 06:46:03 +00:00
|
|
|
q: qb.BuildWithParams(datasource.QuerierParams{
|
2021-11-05 17:49:32 +00:00
|
|
|
DataSourceType: &group.Type,
|
2021-04-30 06:46:03 +00:00
|
|
|
EvaluationInterval: group.Interval,
|
2021-12-02 12:45:08 +00:00
|
|
|
QueryParams: group.Params,
|
2021-04-30 06:46:03 +00:00
|
|
|
}),
|
|
|
|
alerts: make(map[uint64]*notifier.Alert),
|
|
|
|
metrics: &alertingRuleMetrics{},
|
2020-06-01 10:46:37 +00:00
|
|
|
}
|
app/vmalert: extend metrics set exported by `vmalert` #573 (#654)
* app/vmalert: extend metrics set exported by `vmalert` #573
New metrics were added to improve observability:
+ vmalert_alerts_pending{alertname, group} - number of pending alerts per group
per alert;
+ vmalert_alerts_acitve{alertname, group} - number of active alerts per group
per alert;
+ vmalert_alerts_error{alertname, group} - is 1 if alertname ended up with error
during prev execution, is 0 if no errors happened;
+ vmalert_recording_rules_error{recording, group} - is 1 if recording rule
ended up with error during prev execution, is 0 if no errors happened;
* vmalert_iteration_total{group, file} - now contains group and file name labels.
This should improve control over specific groups;
* vmalert_iteration_duration_seconds{group, file} - now contains group and file name labels. This should improve control over specific groups;
Some collisions for alerts and recording rules are possible, because neither
group name nor alert/recording rule name are unique for compatibility reasons.
Commit contains list of TODOs for Unregistering metrics since groups and rules
are ephemeral and could be removed without application restart. In order to
unlock Unregistering feature corresponding PR was filed - https://github.com/VictoriaMetrics/metrics/pull/13
* app/vmalert: extend metrics set exported by `vmalert` #573
The changes are following:
* add an ID label to rules metrics, since `name` collisions within one group is
a common case - see the k8s example alerts;
* supports metrics unregistering on rule updates. Consider the case when one rule
was added or removed from the group, or the whole group was added or removed.
The change depends on https://github.com/VictoriaMetrics/metrics/pull/16
where race condition for Unregister method was fixed.
2020-08-09 06:41:29 +00:00
|
|
|
|
|
|
|
labels := fmt.Sprintf(`alertname=%q, group=%q, id="%d"`, ar.Name, group.Name, ar.ID())
|
2022-02-02 12:11:41 +00:00
|
|
|
ar.metrics.pending = utils.GetOrCreateGauge(fmt.Sprintf(`vmalert_alerts_pending{%s}`, labels),
|
app/vmalert: extend metrics set exported by `vmalert` #573 (#654)
* app/vmalert: extend metrics set exported by `vmalert` #573
New metrics were added to improve observability:
+ vmalert_alerts_pending{alertname, group} - number of pending alerts per group
per alert;
+ vmalert_alerts_acitve{alertname, group} - number of active alerts per group
per alert;
+ vmalert_alerts_error{alertname, group} - is 1 if alertname ended up with error
during prev execution, is 0 if no errors happened;
+ vmalert_recording_rules_error{recording, group} - is 1 if recording rule
ended up with error during prev execution, is 0 if no errors happened;
* vmalert_iteration_total{group, file} - now contains group and file name labels.
This should improve control over specific groups;
* vmalert_iteration_duration_seconds{group, file} - now contains group and file name labels. This should improve control over specific groups;
Some collisions for alerts and recording rules are possible, because neither
group name nor alert/recording rule name are unique for compatibility reasons.
Commit contains list of TODOs for Unregistering metrics since groups and rules
are ephemeral and could be removed without application restart. In order to
unlock Unregistering feature corresponding PR was filed - https://github.com/VictoriaMetrics/metrics/pull/13
* app/vmalert: extend metrics set exported by `vmalert` #573
The changes are following:
* add an ID label to rules metrics, since `name` collisions within one group is
a common case - see the k8s example alerts;
* supports metrics unregistering on rule updates. Consider the case when one rule
was added or removed from the group, or the whole group was added or removed.
The change depends on https://github.com/VictoriaMetrics/metrics/pull/16
where race condition for Unregister method was fixed.
2020-08-09 06:41:29 +00:00
|
|
|
func() float64 {
|
2021-08-05 06:59:46 +00:00
|
|
|
ar.mu.RLock()
|
|
|
|
defer ar.mu.RUnlock()
|
app/vmalert: extend metrics set exported by `vmalert` #573 (#654)
* app/vmalert: extend metrics set exported by `vmalert` #573
New metrics were added to improve observability:
+ vmalert_alerts_pending{alertname, group} - number of pending alerts per group
per alert;
+ vmalert_alerts_acitve{alertname, group} - number of active alerts per group
per alert;
+ vmalert_alerts_error{alertname, group} - is 1 if alertname ended up with error
during prev execution, is 0 if no errors happened;
+ vmalert_recording_rules_error{recording, group} - is 1 if recording rule
ended up with error during prev execution, is 0 if no errors happened;
* vmalert_iteration_total{group, file} - now contains group and file name labels.
This should improve control over specific groups;
* vmalert_iteration_duration_seconds{group, file} - now contains group and file name labels. This should improve control over specific groups;
Some collisions for alerts and recording rules are possible, because neither
group name nor alert/recording rule name are unique for compatibility reasons.
Commit contains list of TODOs for Unregistering metrics since groups and rules
are ephemeral and could be removed without application restart. In order to
unlock Unregistering feature corresponding PR was filed - https://github.com/VictoriaMetrics/metrics/pull/13
* app/vmalert: extend metrics set exported by `vmalert` #573
The changes are following:
* add an ID label to rules metrics, since `name` collisions within one group is
a common case - see the k8s example alerts;
* supports metrics unregistering on rule updates. Consider the case when one rule
was added or removed from the group, or the whole group was added or removed.
The change depends on https://github.com/VictoriaMetrics/metrics/pull/16
where race condition for Unregister method was fixed.
2020-08-09 06:41:29 +00:00
|
|
|
var num int
|
|
|
|
for _, a := range ar.alerts {
|
|
|
|
if a.State == notifier.StatePending {
|
|
|
|
num++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return float64(num)
|
|
|
|
})
|
2022-02-02 12:11:41 +00:00
|
|
|
ar.metrics.active = utils.GetOrCreateGauge(fmt.Sprintf(`vmalert_alerts_firing{%s}`, labels),
|
app/vmalert: extend metrics set exported by `vmalert` #573 (#654)
* app/vmalert: extend metrics set exported by `vmalert` #573
New metrics were added to improve observability:
+ vmalert_alerts_pending{alertname, group} - number of pending alerts per group
per alert;
+ vmalert_alerts_acitve{alertname, group} - number of active alerts per group
per alert;
+ vmalert_alerts_error{alertname, group} - is 1 if alertname ended up with error
during prev execution, is 0 if no errors happened;
+ vmalert_recording_rules_error{recording, group} - is 1 if recording rule
ended up with error during prev execution, is 0 if no errors happened;
* vmalert_iteration_total{group, file} - now contains group and file name labels.
This should improve control over specific groups;
* vmalert_iteration_duration_seconds{group, file} - now contains group and file name labels. This should improve control over specific groups;
Some collisions for alerts and recording rules are possible, because neither
group name nor alert/recording rule name are unique for compatibility reasons.
Commit contains list of TODOs for Unregistering metrics since groups and rules
are ephemeral and could be removed without application restart. In order to
unlock Unregistering feature corresponding PR was filed - https://github.com/VictoriaMetrics/metrics/pull/13
* app/vmalert: extend metrics set exported by `vmalert` #573
The changes are following:
* add an ID label to rules metrics, since `name` collisions within one group is
a common case - see the k8s example alerts;
* supports metrics unregistering on rule updates. Consider the case when one rule
was added or removed from the group, or the whole group was added or removed.
The change depends on https://github.com/VictoriaMetrics/metrics/pull/16
where race condition for Unregister method was fixed.
2020-08-09 06:41:29 +00:00
|
|
|
func() float64 {
|
2021-08-05 06:59:46 +00:00
|
|
|
ar.mu.RLock()
|
|
|
|
defer ar.mu.RUnlock()
|
app/vmalert: extend metrics set exported by `vmalert` #573 (#654)
* app/vmalert: extend metrics set exported by `vmalert` #573
New metrics were added to improve observability:
+ vmalert_alerts_pending{alertname, group} - number of pending alerts per group
per alert;
+ vmalert_alerts_acitve{alertname, group} - number of active alerts per group
per alert;
+ vmalert_alerts_error{alertname, group} - is 1 if alertname ended up with error
during prev execution, is 0 if no errors happened;
+ vmalert_recording_rules_error{recording, group} - is 1 if recording rule
ended up with error during prev execution, is 0 if no errors happened;
* vmalert_iteration_total{group, file} - now contains group and file name labels.
This should improve control over specific groups;
* vmalert_iteration_duration_seconds{group, file} - now contains group and file name labels. This should improve control over specific groups;
Some collisions for alerts and recording rules are possible, because neither
group name nor alert/recording rule name are unique for compatibility reasons.
Commit contains list of TODOs for Unregistering metrics since groups and rules
are ephemeral and could be removed without application restart. In order to
unlock Unregistering feature corresponding PR was filed - https://github.com/VictoriaMetrics/metrics/pull/13
* app/vmalert: extend metrics set exported by `vmalert` #573
The changes are following:
* add an ID label to rules metrics, since `name` collisions within one group is
a common case - see the k8s example alerts;
* supports metrics unregistering on rule updates. Consider the case when one rule
was added or removed from the group, or the whole group was added or removed.
The change depends on https://github.com/VictoriaMetrics/metrics/pull/16
where race condition for Unregister method was fixed.
2020-08-09 06:41:29 +00:00
|
|
|
var num int
|
|
|
|
for _, a := range ar.alerts {
|
|
|
|
if a.State == notifier.StateFiring {
|
|
|
|
num++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return float64(num)
|
|
|
|
})
|
2022-02-02 12:11:41 +00:00
|
|
|
ar.metrics.errors = utils.GetOrCreateGauge(fmt.Sprintf(`vmalert_alerting_rules_error{%s}`, labels),
|
app/vmalert: extend metrics set exported by `vmalert` #573 (#654)
* app/vmalert: extend metrics set exported by `vmalert` #573
New metrics were added to improve observability:
+ vmalert_alerts_pending{alertname, group} - number of pending alerts per group
per alert;
+ vmalert_alerts_acitve{alertname, group} - number of active alerts per group
per alert;
+ vmalert_alerts_error{alertname, group} - is 1 if alertname ended up with error
during prev execution, is 0 if no errors happened;
+ vmalert_recording_rules_error{recording, group} - is 1 if recording rule
ended up with error during prev execution, is 0 if no errors happened;
* vmalert_iteration_total{group, file} - now contains group and file name labels.
This should improve control over specific groups;
* vmalert_iteration_duration_seconds{group, file} - now contains group and file name labels. This should improve control over specific groups;
Some collisions for alerts and recording rules are possible, because neither
group name nor alert/recording rule name are unique for compatibility reasons.
Commit contains list of TODOs for Unregistering metrics since groups and rules
are ephemeral and could be removed without application restart. In order to
unlock Unregistering feature corresponding PR was filed - https://github.com/VictoriaMetrics/metrics/pull/13
* app/vmalert: extend metrics set exported by `vmalert` #573
The changes are following:
* add an ID label to rules metrics, since `name` collisions within one group is
a common case - see the k8s example alerts;
* supports metrics unregistering on rule updates. Consider the case when one rule
was added or removed from the group, or the whole group was added or removed.
The change depends on https://github.com/VictoriaMetrics/metrics/pull/16
where race condition for Unregister method was fixed.
2020-08-09 06:41:29 +00:00
|
|
|
func() float64 {
|
2021-08-05 06:59:46 +00:00
|
|
|
ar.mu.RLock()
|
|
|
|
defer ar.mu.RUnlock()
|
app/vmalert: extend metrics set exported by `vmalert` #573 (#654)
* app/vmalert: extend metrics set exported by `vmalert` #573
New metrics were added to improve observability:
+ vmalert_alerts_pending{alertname, group} - number of pending alerts per group
per alert;
+ vmalert_alerts_acitve{alertname, group} - number of active alerts per group
per alert;
+ vmalert_alerts_error{alertname, group} - is 1 if alertname ended up with error
during prev execution, is 0 if no errors happened;
+ vmalert_recording_rules_error{recording, group} - is 1 if recording rule
ended up with error during prev execution, is 0 if no errors happened;
* vmalert_iteration_total{group, file} - now contains group and file name labels.
This should improve control over specific groups;
* vmalert_iteration_duration_seconds{group, file} - now contains group and file name labels. This should improve control over specific groups;
Some collisions for alerts and recording rules are possible, because neither
group name nor alert/recording rule name are unique for compatibility reasons.
Commit contains list of TODOs for Unregistering metrics since groups and rules
are ephemeral and could be removed without application restart. In order to
unlock Unregistering feature corresponding PR was filed - https://github.com/VictoriaMetrics/metrics/pull/13
* app/vmalert: extend metrics set exported by `vmalert` #573
The changes are following:
* add an ID label to rules metrics, since `name` collisions within one group is
a common case - see the k8s example alerts;
* supports metrics unregistering on rule updates. Consider the case when one rule
was added or removed from the group, or the whole group was added or removed.
The change depends on https://github.com/VictoriaMetrics/metrics/pull/16
where race condition for Unregister method was fixed.
2020-08-09 06:41:29 +00:00
|
|
|
if ar.lastExecError == nil {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
return 1
|
|
|
|
})
|
2022-02-02 12:11:41 +00:00
|
|
|
ar.metrics.samples = utils.GetOrCreateGauge(fmt.Sprintf(`vmalert_alerting_rules_last_evaluation_samples{%s}`, labels),
|
2021-08-05 06:59:46 +00:00
|
|
|
func() float64 {
|
|
|
|
ar.mu.RLock()
|
|
|
|
defer ar.mu.RUnlock()
|
|
|
|
return float64(ar.lastExecSamples)
|
|
|
|
})
|
app/vmalert: extend metrics set exported by `vmalert` #573 (#654)
* app/vmalert: extend metrics set exported by `vmalert` #573
New metrics were added to improve observability:
+ vmalert_alerts_pending{alertname, group} - number of pending alerts per group
per alert;
+ vmalert_alerts_acitve{alertname, group} - number of active alerts per group
per alert;
+ vmalert_alerts_error{alertname, group} - is 1 if alertname ended up with error
during prev execution, is 0 if no errors happened;
+ vmalert_recording_rules_error{recording, group} - is 1 if recording rule
ended up with error during prev execution, is 0 if no errors happened;
* vmalert_iteration_total{group, file} - now contains group and file name labels.
This should improve control over specific groups;
* vmalert_iteration_duration_seconds{group, file} - now contains group and file name labels. This should improve control over specific groups;
Some collisions for alerts and recording rules are possible, because neither
group name nor alert/recording rule name are unique for compatibility reasons.
Commit contains list of TODOs for Unregistering metrics since groups and rules
are ephemeral and could be removed without application restart. In order to
unlock Unregistering feature corresponding PR was filed - https://github.com/VictoriaMetrics/metrics/pull/13
* app/vmalert: extend metrics set exported by `vmalert` #573
The changes are following:
* add an ID label to rules metrics, since `name` collisions within one group is
a common case - see the k8s example alerts;
* supports metrics unregistering on rule updates. Consider the case when one rule
was added or removed from the group, or the whole group was added or removed.
The change depends on https://github.com/VictoriaMetrics/metrics/pull/16
where race condition for Unregister method was fixed.
2020-08-09 06:41:29 +00:00
|
|
|
return ar
|
|
|
|
}
|
|
|
|
|
|
|
|
// Close unregisters rule metrics
|
|
|
|
func (ar *AlertingRule) Close() {
|
2022-02-02 12:11:41 +00:00
|
|
|
ar.metrics.active.Unregister()
|
|
|
|
ar.metrics.pending.Unregister()
|
|
|
|
ar.metrics.errors.Unregister()
|
|
|
|
ar.metrics.samples.Unregister()
|
2020-06-01 10:46:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// String implements Stringer interface
|
|
|
|
func (ar *AlertingRule) String() string {
|
|
|
|
return ar.Name
|
|
|
|
}
|
|
|
|
|
|
|
|
// ID returns unique Rule ID
|
|
|
|
// within the parent Group.
|
|
|
|
func (ar *AlertingRule) ID() uint64 {
|
2020-06-15 19:15:47 +00:00
|
|
|
return ar.RuleID
|
2020-06-01 10:46:37 +00:00
|
|
|
}
|
|
|
|
|
vmalert: fix labels and annotations processing for alerts (#2403)
To improve compatibility with Prometheus alerting the order of
templates processing has changed.
Before, vmalert did all labels processing beforehand. It meant
all extra labels (such as `alertname`, `alertgroup` or rule labels)
were available in templating. All collisions were resolved in favour
of extra labels.
In Prometheus, only labels from the received metric are available in
templating, so no collisions are possible.
This change makes vmalert's behaviour similar to Prometheus.
For example, consider alerting rule which is triggered by time series
with `alertname` label. In vmalert, this label would be overriden
by alerting rule's name everywhere: for alert labels, for annotations, etc.
In Prometheus, it would be overriden for alert's labels only, but in annotations
the original label value would be available.
See more details here https://github.com/prometheus/compliance/issues/80
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2022-04-06 18:24:45 +00:00
|
|
|
type labelSet struct {
|
|
|
|
// origin labels from series
|
|
|
|
// used for templating
|
|
|
|
origin map[string]string
|
|
|
|
// processed labels with additional data
|
|
|
|
// used as Alert labels
|
|
|
|
processed map[string]string
|
|
|
|
}
|
|
|
|
|
|
|
|
// toLabels converts labels from given Metric
|
|
|
|
// to labelSet which contains original and processed labels.
|
2022-05-14 09:38:44 +00:00
|
|
|
func (ar *AlertingRule) toLabels(m datasource.Metric, qFn templates.QueryFn) (*labelSet, error) {
|
vmalert: fix labels and annotations processing for alerts (#2403)
To improve compatibility with Prometheus alerting the order of
templates processing has changed.
Before, vmalert did all labels processing beforehand. It meant
all extra labels (such as `alertname`, `alertgroup` or rule labels)
were available in templating. All collisions were resolved in favour
of extra labels.
In Prometheus, only labels from the received metric are available in
templating, so no collisions are possible.
This change makes vmalert's behaviour similar to Prometheus.
For example, consider alerting rule which is triggered by time series
with `alertname` label. In vmalert, this label would be overriden
by alerting rule's name everywhere: for alert labels, for annotations, etc.
In Prometheus, it would be overriden for alert's labels only, but in annotations
the original label value would be available.
See more details here https://github.com/prometheus/compliance/issues/80
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2022-04-06 18:24:45 +00:00
|
|
|
ls := &labelSet{
|
|
|
|
origin: make(map[string]string, len(m.Labels)),
|
|
|
|
processed: make(map[string]string),
|
|
|
|
}
|
|
|
|
for _, l := range m.Labels {
|
|
|
|
// drop __name__ to be consistent with Prometheus alerting
|
|
|
|
if l.Name == "__name__" {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
ls.origin[l.Name] = l.Value
|
|
|
|
ls.processed[l.Name] = l.Value
|
|
|
|
}
|
|
|
|
|
|
|
|
extraLabels, err := notifier.ExecTemplate(qFn, ar.Labels, notifier.AlertTplData{
|
|
|
|
Labels: ls.origin,
|
|
|
|
Value: m.Values[0],
|
|
|
|
Expr: ar.Expr,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("failed to expand labels: %s", err)
|
|
|
|
}
|
|
|
|
for k, v := range extraLabels {
|
|
|
|
ls.processed[k] = v
|
|
|
|
}
|
|
|
|
|
|
|
|
// set additional labels to identify group and rule name
|
|
|
|
if ar.Name != "" {
|
|
|
|
ls.processed[alertNameLabel] = ar.Name
|
|
|
|
}
|
|
|
|
if !*disableAlertGroupLabel && ar.GroupName != "" {
|
|
|
|
ls.processed[alertGroupNameLabel] = ar.GroupName
|
|
|
|
}
|
|
|
|
return ls, nil
|
|
|
|
}
|
|
|
|
|
2021-06-09 09:20:38 +00:00
|
|
|
// ExecRange executes alerting rule on the given time range similarly to Exec.
|
|
|
|
// It doesn't update internal states of the Rule and meant to be used just
|
|
|
|
// to get time series for backfilling.
|
|
|
|
// It returns ALERT and ALERT_FOR_STATE time series as result.
|
2022-06-09 06:21:30 +00:00
|
|
|
func (ar *AlertingRule) ExecRange(ctx context.Context, start, end time.Time, limit int) ([]prompbmarshal.TimeSeries, error) {
|
2021-06-09 09:20:38 +00:00
|
|
|
series, err := ar.q.QueryRange(ctx, ar.Expr, start, end)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
var result []prompbmarshal.TimeSeries
|
2022-06-09 06:21:30 +00:00
|
|
|
timestamp2Series := make(map[int64][]prompbmarshal.TimeSeries, 0)
|
2021-06-09 09:20:38 +00:00
|
|
|
qFn := func(query string) ([]datasource.Metric, error) {
|
|
|
|
return nil, fmt.Errorf("`query` template isn't supported in replay mode")
|
|
|
|
}
|
|
|
|
for _, s := range series {
|
vmalert: fix labels and annotations processing for alerts (#2403)
To improve compatibility with Prometheus alerting the order of
templates processing has changed.
Before, vmalert did all labels processing beforehand. It meant
all extra labels (such as `alertname`, `alertgroup` or rule labels)
were available in templating. All collisions were resolved in favour
of extra labels.
In Prometheus, only labels from the received metric are available in
templating, so no collisions are possible.
This change makes vmalert's behaviour similar to Prometheus.
For example, consider alerting rule which is triggered by time series
with `alertname` label. In vmalert, this label would be overriden
by alerting rule's name everywhere: for alert labels, for annotations, etc.
In Prometheus, it would be overriden for alert's labels only, but in annotations
the original label value would be available.
See more details here https://github.com/prometheus/compliance/issues/80
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2022-04-06 18:24:45 +00:00
|
|
|
a, err := ar.newAlert(s, nil, time.Time{}, qFn) // initial alert
|
2021-06-09 09:20:38 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("failed to create alert: %s", err)
|
|
|
|
}
|
|
|
|
if ar.For == 0 { // if alert is instant
|
|
|
|
a.State = notifier.StateFiring
|
|
|
|
for i := range s.Values {
|
2022-06-09 06:21:30 +00:00
|
|
|
if limit > 0 {
|
|
|
|
timestamp2Series[s.Timestamps[i]] = append(timestamp2Series[s.Timestamps[i]], ar.alertToTimeSeries(a, s.Timestamps[i])...)
|
|
|
|
} else {
|
|
|
|
result = append(result, ar.alertToTimeSeries(a, s.Timestamps[i])...)
|
|
|
|
}
|
2021-06-09 09:20:38 +00:00
|
|
|
}
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
// if alert with For > 0
|
|
|
|
prevT := time.Time{}
|
|
|
|
for i := range s.Values {
|
|
|
|
at := time.Unix(s.Timestamps[i], 0)
|
|
|
|
if at.Sub(prevT) > ar.EvalInterval {
|
|
|
|
// reset to Pending if there are gaps > EvalInterval between DPs
|
|
|
|
a.State = notifier.StatePending
|
2022-03-29 13:09:07 +00:00
|
|
|
a.ActiveAt = at
|
|
|
|
} else if at.Sub(a.ActiveAt) >= ar.For {
|
2021-06-09 09:20:38 +00:00
|
|
|
a.State = notifier.StateFiring
|
2022-03-29 13:09:07 +00:00
|
|
|
a.Start = at
|
2021-06-09 09:20:38 +00:00
|
|
|
}
|
|
|
|
prevT = at
|
2022-06-09 06:21:30 +00:00
|
|
|
if limit > 0 {
|
|
|
|
timestamp2Series[s.Timestamps[i]] = append(timestamp2Series[s.Timestamps[i]], ar.alertToTimeSeries(a, s.Timestamps[i])...)
|
|
|
|
} else {
|
|
|
|
result = append(result, ar.alertToTimeSeries(a, s.Timestamps[i])...)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if limit <= 0 {
|
|
|
|
return result, nil
|
|
|
|
}
|
|
|
|
sortedTimestamp := make([]int64, 0)
|
|
|
|
for timestamp := range timestamp2Series {
|
|
|
|
sortedTimestamp = append(sortedTimestamp, timestamp)
|
|
|
|
}
|
|
|
|
sort.Slice(sortedTimestamp, func(i, j int) bool { return sortedTimestamp[i] < sortedTimestamp[j] })
|
|
|
|
for _, timestamp := range sortedTimestamp {
|
|
|
|
if len(timestamp2Series[timestamp]) > limit {
|
|
|
|
logger.Errorf("exec exceeded limit of %d with %d alerts", limit, len(timestamp2Series[timestamp]))
|
|
|
|
continue
|
2021-06-09 09:20:38 +00:00
|
|
|
}
|
2022-06-09 06:21:30 +00:00
|
|
|
result = append(result, timestamp2Series[timestamp]...)
|
2021-06-09 09:20:38 +00:00
|
|
|
}
|
|
|
|
return result, nil
|
|
|
|
}
|
|
|
|
|
2022-03-29 13:09:07 +00:00
|
|
|
// resolvedRetention is the duration for which a resolved alert instance
|
|
|
|
// is kept in memory state and consequently repeatedly sent to the AlertManager.
|
|
|
|
const resolvedRetention = 15 * time.Minute
|
|
|
|
|
2020-06-01 10:46:37 +00:00
|
|
|
// Exec executes AlertingRule expression via the given Querier.
|
|
|
|
// Based on the Querier results AlertingRule maintains notifier.Alerts
|
2022-06-09 06:21:30 +00:00
|
|
|
func (ar *AlertingRule) Exec(ctx context.Context, ts time.Time, limit int) ([]prompbmarshal.TimeSeries, error) {
|
2022-03-15 11:54:53 +00:00
|
|
|
start := time.Now()
|
2022-03-29 13:09:07 +00:00
|
|
|
qMetrics, err := ar.q.Query(ctx, ar.Expr, ts)
|
2020-06-01 10:46:37 +00:00
|
|
|
ar.mu.Lock()
|
|
|
|
defer ar.mu.Unlock()
|
|
|
|
|
2022-03-15 11:54:53 +00:00
|
|
|
ar.lastExecTime = start
|
|
|
|
ar.lastExecDuration = time.Since(start)
|
2020-06-01 10:46:37 +00:00
|
|
|
ar.lastExecError = err
|
2021-08-05 06:59:46 +00:00
|
|
|
ar.lastExecSamples = len(qMetrics)
|
2020-06-01 10:46:37 +00:00
|
|
|
if err != nil {
|
2020-06-30 19:58:18 +00:00
|
|
|
return nil, fmt.Errorf("failed to execute query %q: %w", ar.Expr, err)
|
2020-06-01 10:46:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for h, a := range ar.alerts {
|
|
|
|
// cleanup inactive alerts from previous Exec
|
2022-03-29 13:09:07 +00:00
|
|
|
if a.State == notifier.StateInactive && ts.Sub(a.ResolvedAt) > resolvedRetention {
|
2020-06-01 10:46:37 +00:00
|
|
|
delete(ar.alerts, h)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-03-29 13:09:07 +00:00
|
|
|
qFn := func(query string) ([]datasource.Metric, error) { return ar.q.Query(ctx, query, ts) }
|
2020-06-01 10:46:37 +00:00
|
|
|
updated := make(map[uint64]struct{})
|
|
|
|
// update list of active alerts
|
|
|
|
for _, m := range qMetrics {
|
vmalert: fix labels and annotations processing for alerts (#2403)
To improve compatibility with Prometheus alerting the order of
templates processing has changed.
Before, vmalert did all labels processing beforehand. It meant
all extra labels (such as `alertname`, `alertgroup` or rule labels)
were available in templating. All collisions were resolved in favour
of extra labels.
In Prometheus, only labels from the received metric are available in
templating, so no collisions are possible.
This change makes vmalert's behaviour similar to Prometheus.
For example, consider alerting rule which is triggered by time series
with `alertname` label. In vmalert, this label would be overriden
by alerting rule's name everywhere: for alert labels, for annotations, etc.
In Prometheus, it would be overriden for alert's labels only, but in annotations
the original label value would be available.
See more details here https://github.com/prometheus/compliance/issues/80
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2022-04-06 18:24:45 +00:00
|
|
|
ls, err := ar.toLabels(m, qFn)
|
2020-12-19 12:10:59 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("failed to expand labels: %s", err)
|
|
|
|
}
|
vmalert: fix labels and annotations processing for alerts (#2403)
To improve compatibility with Prometheus alerting the order of
templates processing has changed.
Before, vmalert did all labels processing beforehand. It meant
all extra labels (such as `alertname`, `alertgroup` or rule labels)
were available in templating. All collisions were resolved in favour
of extra labels.
In Prometheus, only labels from the received metric are available in
templating, so no collisions are possible.
This change makes vmalert's behaviour similar to Prometheus.
For example, consider alerting rule which is triggered by time series
with `alertname` label. In vmalert, this label would be overriden
by alerting rule's name everywhere: for alert labels, for annotations, etc.
In Prometheus, it would be overriden for alert's labels only, but in annotations
the original label value would be available.
See more details here https://github.com/prometheus/compliance/issues/80
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2022-04-06 18:24:45 +00:00
|
|
|
h := hash(ls.processed)
|
2020-11-09 22:27:32 +00:00
|
|
|
if _, ok := updated[h]; ok {
|
|
|
|
// duplicate may be caused by extra labels
|
|
|
|
// conflicting with the metric labels
|
vmalert: fix labels and annotations processing for alerts (#2403)
To improve compatibility with Prometheus alerting the order of
templates processing has changed.
Before, vmalert did all labels processing beforehand. It meant
all extra labels (such as `alertname`, `alertgroup` or rule labels)
were available in templating. All collisions were resolved in favour
of extra labels.
In Prometheus, only labels from the received metric are available in
templating, so no collisions are possible.
This change makes vmalert's behaviour similar to Prometheus.
For example, consider alerting rule which is triggered by time series
with `alertname` label. In vmalert, this label would be overriden
by alerting rule's name everywhere: for alert labels, for annotations, etc.
In Prometheus, it would be overriden for alert's labels only, but in annotations
the original label value would be available.
See more details here https://github.com/prometheus/compliance/issues/80
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2022-04-06 18:24:45 +00:00
|
|
|
ar.lastExecError = fmt.Errorf("labels %v: %w", ls.processed, errDuplicate)
|
2022-03-29 13:09:07 +00:00
|
|
|
return nil, ar.lastExecError
|
2020-11-09 22:27:32 +00:00
|
|
|
}
|
2020-06-01 10:46:37 +00:00
|
|
|
updated[h] = struct{}{}
|
|
|
|
if a, ok := ar.alerts[h]; ok {
|
2022-03-29 13:09:07 +00:00
|
|
|
if a.State == notifier.StateInactive {
|
|
|
|
// alert could be in inactive state for resolvedRetention
|
|
|
|
// so when we again receive metrics for it - we switch it
|
|
|
|
// back to notifier.StatePending
|
|
|
|
a.State = notifier.StatePending
|
|
|
|
a.ActiveAt = ts
|
|
|
|
}
|
2021-06-09 09:20:38 +00:00
|
|
|
if a.Value != m.Values[0] {
|
2020-06-01 10:46:37 +00:00
|
|
|
// update Value field with latest value
|
2021-06-09 09:20:38 +00:00
|
|
|
a.Value = m.Values[0]
|
2020-06-01 10:46:37 +00:00
|
|
|
// and re-exec template since Value can be used
|
2020-12-19 12:10:59 +00:00
|
|
|
// in annotations
|
vmalert: fix labels and annotations processing for alerts (#2403)
To improve compatibility with Prometheus alerting the order of
templates processing has changed.
Before, vmalert did all labels processing beforehand. It meant
all extra labels (such as `alertname`, `alertgroup` or rule labels)
were available in templating. All collisions were resolved in favour
of extra labels.
In Prometheus, only labels from the received metric are available in
templating, so no collisions are possible.
This change makes vmalert's behaviour similar to Prometheus.
For example, consider alerting rule which is triggered by time series
with `alertname` label. In vmalert, this label would be overriden
by alerting rule's name everywhere: for alert labels, for annotations, etc.
In Prometheus, it would be overriden for alert's labels only, but in annotations
the original label value would be available.
See more details here https://github.com/prometheus/compliance/issues/80
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2022-04-06 18:24:45 +00:00
|
|
|
a.Annotations, err = a.ExecTemplate(qFn, ls.origin, ar.Annotations)
|
2020-06-01 10:46:37 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
continue
|
|
|
|
}
|
vmalert: fix labels and annotations processing for alerts (#2403)
To improve compatibility with Prometheus alerting the order of
templates processing has changed.
Before, vmalert did all labels processing beforehand. It meant
all extra labels (such as `alertname`, `alertgroup` or rule labels)
were available in templating. All collisions were resolved in favour
of extra labels.
In Prometheus, only labels from the received metric are available in
templating, so no collisions are possible.
This change makes vmalert's behaviour similar to Prometheus.
For example, consider alerting rule which is triggered by time series
with `alertname` label. In vmalert, this label would be overriden
by alerting rule's name everywhere: for alert labels, for annotations, etc.
In Prometheus, it would be overriden for alert's labels only, but in annotations
the original label value would be available.
See more details here https://github.com/prometheus/compliance/issues/80
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2022-04-06 18:24:45 +00:00
|
|
|
a, err := ar.newAlert(m, ls, ar.lastExecTime, qFn)
|
2020-06-01 10:46:37 +00:00
|
|
|
if err != nil {
|
|
|
|
ar.lastExecError = err
|
2020-06-30 19:58:18 +00:00
|
|
|
return nil, fmt.Errorf("failed to create alert: %w", err)
|
2020-06-01 10:46:37 +00:00
|
|
|
}
|
|
|
|
a.ID = h
|
|
|
|
a.State = notifier.StatePending
|
2022-03-29 13:09:07 +00:00
|
|
|
a.ActiveAt = ts
|
2020-06-01 10:46:37 +00:00
|
|
|
ar.alerts[h] = a
|
|
|
|
}
|
2022-06-09 06:21:30 +00:00
|
|
|
var numActivePending int
|
2020-06-01 10:46:37 +00:00
|
|
|
for h, a := range ar.alerts {
|
|
|
|
// if alert wasn't updated in this iteration
|
|
|
|
// means it is resolved already
|
|
|
|
if _, ok := updated[h]; !ok {
|
|
|
|
if a.State == notifier.StatePending {
|
|
|
|
// alert was in Pending state - it is not
|
|
|
|
// active anymore
|
|
|
|
delete(ar.alerts, h)
|
|
|
|
continue
|
|
|
|
}
|
2022-03-29 13:09:07 +00:00
|
|
|
if a.State == notifier.StateFiring {
|
|
|
|
a.State = notifier.StateInactive
|
|
|
|
a.ResolvedAt = ts
|
|
|
|
}
|
2020-06-01 10:46:37 +00:00
|
|
|
continue
|
|
|
|
}
|
2022-06-09 06:21:30 +00:00
|
|
|
numActivePending++
|
2022-05-09 08:11:06 +00:00
|
|
|
if a.State == notifier.StatePending && ts.Sub(a.ActiveAt) >= ar.For {
|
2020-06-01 10:46:37 +00:00
|
|
|
a.State = notifier.StateFiring
|
2022-03-29 13:09:07 +00:00
|
|
|
a.Start = ts
|
2020-06-01 10:46:37 +00:00
|
|
|
alertsFired.Inc()
|
|
|
|
}
|
|
|
|
}
|
2022-06-09 06:21:30 +00:00
|
|
|
if limit > 0 && numActivePending > limit {
|
|
|
|
ar.alerts = map[uint64]*notifier.Alert{}
|
|
|
|
return nil, fmt.Errorf("exec exceeded limit of %d with %d alerts", limit, numActivePending)
|
|
|
|
}
|
2022-03-29 13:09:07 +00:00
|
|
|
return ar.toTimeSeries(ts.Unix()), nil
|
2020-06-01 10:46:37 +00:00
|
|
|
}
|
|
|
|
|
2021-06-09 09:20:38 +00:00
|
|
|
func (ar *AlertingRule) toTimeSeries(timestamp int64) []prompbmarshal.TimeSeries {
|
2020-06-01 10:46:37 +00:00
|
|
|
var tss []prompbmarshal.TimeSeries
|
|
|
|
for _, a := range ar.alerts {
|
|
|
|
if a.State == notifier.StateInactive {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
ts := ar.alertToTimeSeries(a, timestamp)
|
|
|
|
tss = append(tss, ts...)
|
|
|
|
}
|
|
|
|
return tss
|
|
|
|
}
|
|
|
|
|
2020-06-01 11:34:58 +00:00
|
|
|
// UpdateWith copies all significant fields.
|
2020-06-01 10:46:37 +00:00
|
|
|
// alerts state isn't copied since
|
|
|
|
// it should be updated in next 2 Execs
|
|
|
|
func (ar *AlertingRule) UpdateWith(r Rule) error {
|
|
|
|
nr, ok := r.(*AlertingRule)
|
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("BUG: attempt to update alerting rule with wrong type %#v", r)
|
|
|
|
}
|
|
|
|
ar.Expr = nr.Expr
|
|
|
|
ar.For = nr.For
|
|
|
|
ar.Labels = nr.Labels
|
|
|
|
ar.Annotations = nr.Annotations
|
2021-06-09 09:20:38 +00:00
|
|
|
ar.EvalInterval = nr.EvalInterval
|
2021-05-22 21:26:01 +00:00
|
|
|
ar.q = nr.q
|
2020-06-01 10:46:37 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO: consider hashing algorithm in VM
|
vmalert: fix labels and annotations processing for alerts (#2403)
To improve compatibility with Prometheus alerting the order of
templates processing has changed.
Before, vmalert did all labels processing beforehand. It meant
all extra labels (such as `alertname`, `alertgroup` or rule labels)
were available in templating. All collisions were resolved in favour
of extra labels.
In Prometheus, only labels from the received metric are available in
templating, so no collisions are possible.
This change makes vmalert's behaviour similar to Prometheus.
For example, consider alerting rule which is triggered by time series
with `alertname` label. In vmalert, this label would be overriden
by alerting rule's name everywhere: for alert labels, for annotations, etc.
In Prometheus, it would be overriden for alert's labels only, but in annotations
the original label value would be available.
See more details here https://github.com/prometheus/compliance/issues/80
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2022-04-06 18:24:45 +00:00
|
|
|
func hash(labels map[string]string) uint64 {
|
2020-06-01 10:46:37 +00:00
|
|
|
hash := fnv.New64a()
|
vmalert: fix labels and annotations processing for alerts (#2403)
To improve compatibility with Prometheus alerting the order of
templates processing has changed.
Before, vmalert did all labels processing beforehand. It meant
all extra labels (such as `alertname`, `alertgroup` or rule labels)
were available in templating. All collisions were resolved in favour
of extra labels.
In Prometheus, only labels from the received metric are available in
templating, so no collisions are possible.
This change makes vmalert's behaviour similar to Prometheus.
For example, consider alerting rule which is triggered by time series
with `alertname` label. In vmalert, this label would be overriden
by alerting rule's name everywhere: for alert labels, for annotations, etc.
In Prometheus, it would be overriden for alert's labels only, but in annotations
the original label value would be available.
See more details here https://github.com/prometheus/compliance/issues/80
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2022-04-06 18:24:45 +00:00
|
|
|
keys := make([]string, 0, len(labels))
|
|
|
|
for k := range labels {
|
|
|
|
keys = append(keys, k)
|
|
|
|
}
|
|
|
|
sort.Strings(keys)
|
|
|
|
for _, k := range keys {
|
2020-06-01 10:46:37 +00:00
|
|
|
// drop __name__ to be consistent with Prometheus alerting
|
vmalert: fix labels and annotations processing for alerts (#2403)
To improve compatibility with Prometheus alerting the order of
templates processing has changed.
Before, vmalert did all labels processing beforehand. It meant
all extra labels (such as `alertname`, `alertgroup` or rule labels)
were available in templating. All collisions were resolved in favour
of extra labels.
In Prometheus, only labels from the received metric are available in
templating, so no collisions are possible.
This change makes vmalert's behaviour similar to Prometheus.
For example, consider alerting rule which is triggered by time series
with `alertname` label. In vmalert, this label would be overriden
by alerting rule's name everywhere: for alert labels, for annotations, etc.
In Prometheus, it would be overriden for alert's labels only, but in annotations
the original label value would be available.
See more details here https://github.com/prometheus/compliance/issues/80
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2022-04-06 18:24:45 +00:00
|
|
|
if k == "__name__" {
|
2020-06-01 10:46:37 +00:00
|
|
|
continue
|
|
|
|
}
|
vmalert: fix labels and annotations processing for alerts (#2403)
To improve compatibility with Prometheus alerting the order of
templates processing has changed.
Before, vmalert did all labels processing beforehand. It meant
all extra labels (such as `alertname`, `alertgroup` or rule labels)
were available in templating. All collisions were resolved in favour
of extra labels.
In Prometheus, only labels from the received metric are available in
templating, so no collisions are possible.
This change makes vmalert's behaviour similar to Prometheus.
For example, consider alerting rule which is triggered by time series
with `alertname` label. In vmalert, this label would be overriden
by alerting rule's name everywhere: for alert labels, for annotations, etc.
In Prometheus, it would be overriden for alert's labels only, but in annotations
the original label value would be available.
See more details here https://github.com/prometheus/compliance/issues/80
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2022-04-06 18:24:45 +00:00
|
|
|
name, value := k, labels[k]
|
|
|
|
hash.Write([]byte(name))
|
|
|
|
hash.Write([]byte(value))
|
2020-06-01 10:46:37 +00:00
|
|
|
hash.Write([]byte("\xff"))
|
|
|
|
}
|
|
|
|
return hash.Sum64()
|
|
|
|
}
|
|
|
|
|
2022-05-14 09:38:44 +00:00
|
|
|
func (ar *AlertingRule) newAlert(m datasource.Metric, ls *labelSet, start time.Time, qFn templates.QueryFn) (*notifier.Alert, error) {
|
vmalert: fix labels and annotations processing for alerts (#2403)
To improve compatibility with Prometheus alerting the order of
templates processing has changed.
Before, vmalert did all labels processing beforehand. It meant
all extra labels (such as `alertname`, `alertgroup` or rule labels)
were available in templating. All collisions were resolved in favour
of extra labels.
In Prometheus, only labels from the received metric are available in
templating, so no collisions are possible.
This change makes vmalert's behaviour similar to Prometheus.
For example, consider alerting rule which is triggered by time series
with `alertname` label. In vmalert, this label would be overriden
by alerting rule's name everywhere: for alert labels, for annotations, etc.
In Prometheus, it would be overriden for alert's labels only, but in annotations
the original label value would be available.
See more details here https://github.com/prometheus/compliance/issues/80
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2022-04-06 18:24:45 +00:00
|
|
|
var err error
|
|
|
|
if ls == nil {
|
|
|
|
ls, err = ar.toLabels(m, qFn)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("failed to expand labels: %s", err)
|
|
|
|
}
|
|
|
|
}
|
2020-06-01 10:46:37 +00:00
|
|
|
a := ¬ifier.Alert{
|
2022-03-29 13:09:07 +00:00
|
|
|
GroupID: ar.GroupID,
|
|
|
|
Name: ar.Name,
|
vmalert: fix labels and annotations processing for alerts (#2403)
To improve compatibility with Prometheus alerting the order of
templates processing has changed.
Before, vmalert did all labels processing beforehand. It meant
all extra labels (such as `alertname`, `alertgroup` or rule labels)
were available in templating. All collisions were resolved in favour
of extra labels.
In Prometheus, only labels from the received metric are available in
templating, so no collisions are possible.
This change makes vmalert's behaviour similar to Prometheus.
For example, consider alerting rule which is triggered by time series
with `alertname` label. In vmalert, this label would be overriden
by alerting rule's name everywhere: for alert labels, for annotations, etc.
In Prometheus, it would be overriden for alert's labels only, but in annotations
the original label value would be available.
See more details here https://github.com/prometheus/compliance/issues/80
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2022-04-06 18:24:45 +00:00
|
|
|
Labels: ls.processed,
|
2022-03-29 13:09:07 +00:00
|
|
|
Value: m.Values[0],
|
|
|
|
ActiveAt: start,
|
|
|
|
Expr: ar.Expr,
|
2020-06-01 10:46:37 +00:00
|
|
|
}
|
vmalert: fix labels and annotations processing for alerts (#2403)
To improve compatibility with Prometheus alerting the order of
templates processing has changed.
Before, vmalert did all labels processing beforehand. It meant
all extra labels (such as `alertname`, `alertgroup` or rule labels)
were available in templating. All collisions were resolved in favour
of extra labels.
In Prometheus, only labels from the received metric are available in
templating, so no collisions are possible.
This change makes vmalert's behaviour similar to Prometheus.
For example, consider alerting rule which is triggered by time series
with `alertname` label. In vmalert, this label would be overriden
by alerting rule's name everywhere: for alert labels, for annotations, etc.
In Prometheus, it would be overriden for alert's labels only, but in annotations
the original label value would be available.
See more details here https://github.com/prometheus/compliance/issues/80
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2022-04-06 18:24:45 +00:00
|
|
|
a.Annotations, err = a.ExecTemplate(qFn, ls.origin, ar.Annotations)
|
2020-12-19 12:10:59 +00:00
|
|
|
return a, err
|
2020-06-01 10:46:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// AlertAPI generates APIAlert object from alert by its id(hash)
|
|
|
|
func (ar *AlertingRule) AlertAPI(id uint64) *APIAlert {
|
|
|
|
ar.mu.RLock()
|
|
|
|
defer ar.mu.RUnlock()
|
|
|
|
a, ok := ar.alerts[id]
|
|
|
|
if !ok {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return ar.newAlertAPI(*a)
|
|
|
|
}
|
|
|
|
|
2022-03-15 11:54:53 +00:00
|
|
|
// ToAPI returns Rule representation in form
|
|
|
|
// of APIRule
|
|
|
|
func (ar *AlertingRule) ToAPI() APIRule {
|
|
|
|
r := APIRule{
|
|
|
|
Type: "alerting",
|
|
|
|
DatasourceType: ar.Type.String(),
|
|
|
|
Name: ar.Name,
|
|
|
|
Query: ar.Expr,
|
|
|
|
Duration: ar.For.Seconds(),
|
|
|
|
Labels: ar.Labels,
|
|
|
|
Annotations: ar.Annotations,
|
|
|
|
LastEvaluation: ar.lastExecTime,
|
|
|
|
EvaluationTime: ar.lastExecDuration.Seconds(),
|
|
|
|
Health: "ok",
|
|
|
|
State: "inactive",
|
|
|
|
Alerts: ar.AlertsToAPI(),
|
|
|
|
LastSamples: ar.lastExecSamples,
|
|
|
|
|
|
|
|
// encode as strings to avoid rounding in JSON
|
|
|
|
ID: fmt.Sprintf("%d", ar.ID()),
|
|
|
|
GroupID: fmt.Sprintf("%d", ar.GroupID),
|
|
|
|
}
|
2020-06-01 10:46:37 +00:00
|
|
|
if ar.lastExecError != nil {
|
2022-03-15 11:54:53 +00:00
|
|
|
r.LastError = ar.lastExecError.Error()
|
|
|
|
r.Health = "err"
|
2020-06-01 10:46:37 +00:00
|
|
|
}
|
2022-03-15 11:54:53 +00:00
|
|
|
// satisfy APIRule.State logic
|
|
|
|
if len(r.Alerts) > 0 {
|
|
|
|
r.State = notifier.StatePending.String()
|
|
|
|
stateFiring := notifier.StateFiring.String()
|
|
|
|
for _, a := range r.Alerts {
|
|
|
|
if a.State == stateFiring {
|
|
|
|
r.State = stateFiring
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
2020-06-01 10:46:37 +00:00
|
|
|
}
|
2022-03-15 11:54:53 +00:00
|
|
|
return r
|
2020-06-01 10:46:37 +00:00
|
|
|
}
|
|
|
|
|
2022-03-15 11:54:53 +00:00
|
|
|
// AlertsToAPI generates list of APIAlert objects from existing alerts
|
|
|
|
func (ar *AlertingRule) AlertsToAPI() []*APIAlert {
|
2020-06-01 10:46:37 +00:00
|
|
|
var alerts []*APIAlert
|
|
|
|
ar.mu.RLock()
|
|
|
|
for _, a := range ar.alerts {
|
2022-03-29 13:09:07 +00:00
|
|
|
if a.State == notifier.StateInactive {
|
|
|
|
continue
|
|
|
|
}
|
2020-06-01 10:46:37 +00:00
|
|
|
alerts = append(alerts, ar.newAlertAPI(*a))
|
|
|
|
}
|
|
|
|
ar.mu.RUnlock()
|
|
|
|
return alerts
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ar *AlertingRule) newAlertAPI(a notifier.Alert) *APIAlert {
|
2021-10-13 12:25:11 +00:00
|
|
|
aa := &APIAlert{
|
2020-06-01 10:46:37 +00:00
|
|
|
// encode as strings to avoid rounding
|
|
|
|
ID: fmt.Sprintf("%d", a.ID),
|
|
|
|
GroupID: fmt.Sprintf("%d", a.GroupID),
|
2021-09-07 19:39:22 +00:00
|
|
|
RuleID: fmt.Sprintf("%d", ar.RuleID),
|
2020-06-01 10:46:37 +00:00
|
|
|
|
|
|
|
Name: a.Name,
|
|
|
|
Expression: ar.Expr,
|
|
|
|
Labels: a.Labels,
|
|
|
|
Annotations: a.Annotations,
|
|
|
|
State: a.State.String(),
|
2022-03-29 13:09:07 +00:00
|
|
|
ActiveAt: a.ActiveAt,
|
2021-10-22 09:30:38 +00:00
|
|
|
Restored: a.Restored,
|
2021-09-07 19:39:22 +00:00
|
|
|
Value: strconv.FormatFloat(a.Value, 'f', -1, 32),
|
2020-06-01 10:46:37 +00:00
|
|
|
}
|
2021-10-13 12:25:11 +00:00
|
|
|
if alertURLGeneratorFn != nil {
|
|
|
|
aa.SourceLink = alertURLGeneratorFn(a)
|
|
|
|
}
|
|
|
|
return aa
|
2020-06-01 10:46:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
const (
|
2020-09-11 19:52:56 +00:00
|
|
|
// alertMetricName is the metric name for synthetic alert timeseries.
|
2020-06-01 10:46:37 +00:00
|
|
|
alertMetricName = "ALERTS"
|
2020-09-11 19:52:56 +00:00
|
|
|
// alertForStateMetricName is the metric name for 'for' state of alert.
|
2020-06-01 10:46:37 +00:00
|
|
|
alertForStateMetricName = "ALERTS_FOR_STATE"
|
|
|
|
|
2020-09-11 19:52:56 +00:00
|
|
|
// alertNameLabel is the label name indicating the name of an alert.
|
2020-06-01 10:46:37 +00:00
|
|
|
alertNameLabel = "alertname"
|
2020-09-11 19:52:56 +00:00
|
|
|
// alertStateLabel is the label name indicating the state of an alert.
|
2020-06-01 10:46:37 +00:00
|
|
|
alertStateLabel = "alertstate"
|
2020-09-11 19:52:56 +00:00
|
|
|
|
|
|
|
// alertGroupNameLabel defines the label name attached for generated time series.
|
2021-10-22 09:30:38 +00:00
|
|
|
// attaching this label may be disabled via `-disableAlertgroupLabel` flag.
|
2020-09-11 19:52:56 +00:00
|
|
|
alertGroupNameLabel = "alertgroup"
|
2020-06-01 10:46:37 +00:00
|
|
|
)
|
|
|
|
|
2022-03-29 13:09:07 +00:00
|
|
|
// alertToTimeSeries converts the given alert with the given timestamp to time series
|
2021-06-09 09:20:38 +00:00
|
|
|
func (ar *AlertingRule) alertToTimeSeries(a *notifier.Alert, timestamp int64) []prompbmarshal.TimeSeries {
|
2020-06-01 10:46:37 +00:00
|
|
|
var tss []prompbmarshal.TimeSeries
|
2021-10-22 09:30:38 +00:00
|
|
|
tss = append(tss, alertToTimeSeries(a, timestamp))
|
2020-06-01 10:46:37 +00:00
|
|
|
if ar.For > 0 {
|
2021-10-22 09:30:38 +00:00
|
|
|
tss = append(tss, alertForToTimeSeries(a, timestamp))
|
2020-06-01 10:46:37 +00:00
|
|
|
}
|
|
|
|
return tss
|
|
|
|
}
|
|
|
|
|
2021-10-22 09:30:38 +00:00
|
|
|
func alertToTimeSeries(a *notifier.Alert, timestamp int64) prompbmarshal.TimeSeries {
|
2020-06-01 10:46:37 +00:00
|
|
|
labels := make(map[string]string)
|
|
|
|
for k, v := range a.Labels {
|
|
|
|
labels[k] = v
|
|
|
|
}
|
|
|
|
labels["__name__"] = alertMetricName
|
|
|
|
labels[alertStateLabel] = a.State.String()
|
2021-06-09 09:20:38 +00:00
|
|
|
return newTimeSeries([]float64{1}, []int64{timestamp}, labels)
|
2020-06-01 10:46:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// alertForToTimeSeries returns a timeseries that represents
|
|
|
|
// state of active alerts, where value is time when alert become active
|
2021-10-22 09:30:38 +00:00
|
|
|
func alertForToTimeSeries(a *notifier.Alert, timestamp int64) prompbmarshal.TimeSeries {
|
2020-06-01 10:46:37 +00:00
|
|
|
labels := make(map[string]string)
|
|
|
|
for k, v := range a.Labels {
|
|
|
|
labels[k] = v
|
|
|
|
}
|
|
|
|
labels["__name__"] = alertForStateMetricName
|
2022-03-29 13:09:07 +00:00
|
|
|
return newTimeSeries([]float64{float64(a.ActiveAt.Unix())}, []int64{timestamp}, labels)
|
2020-06-01 10:46:37 +00:00
|
|
|
}
|
|
|
|
|
2021-10-22 09:30:38 +00:00
|
|
|
// Restore restores the state of active alerts basing on previously written time series.
|
2022-03-29 13:09:07 +00:00
|
|
|
// Restore restores only ActiveAt field. Field State will be always Pending and supposed
|
2020-06-01 10:46:37 +00:00
|
|
|
// to be updated on next Exec, as well as Value field.
|
|
|
|
// Only rules with For > 0 will be restored.
|
2020-07-28 11:20:31 +00:00
|
|
|
func (ar *AlertingRule) Restore(ctx context.Context, q datasource.Querier, lookback time.Duration, labels map[string]string) error {
|
2020-06-01 10:46:37 +00:00
|
|
|
if q == nil {
|
|
|
|
return fmt.Errorf("querier is nil")
|
|
|
|
}
|
2020-07-28 11:20:31 +00:00
|
|
|
|
2022-03-29 13:09:07 +00:00
|
|
|
ts := time.Now()
|
|
|
|
qFn := func(query string) ([]datasource.Metric, error) { return ar.q.Query(ctx, query, ts) }
|
2020-12-14 18:11:45 +00:00
|
|
|
|
2020-07-28 11:20:31 +00:00
|
|
|
// account for external labels in filter
|
|
|
|
var labelsFilter string
|
|
|
|
for k, v := range labels {
|
|
|
|
labelsFilter += fmt.Sprintf(",%s=%q", k, v)
|
|
|
|
}
|
|
|
|
|
2020-10-30 08:18:20 +00:00
|
|
|
// Get the last data point in range via MetricsQL `last_over_time`.
|
2020-06-01 10:46:37 +00:00
|
|
|
// We don't use plain PromQL since Prometheus doesn't support
|
|
|
|
// remote write protocol which is used for state persistence in vmalert.
|
2020-07-28 11:20:31 +00:00
|
|
|
expr := fmt.Sprintf("last_over_time(%s{alertname=%q%s}[%ds])",
|
|
|
|
alertForStateMetricName, ar.Name, labelsFilter, int(lookback.Seconds()))
|
2022-03-29 13:09:07 +00:00
|
|
|
qMetrics, err := q.Query(ctx, expr, ts)
|
2020-06-01 10:46:37 +00:00
|
|
|
if err != nil {
|
2021-05-10 08:06:31 +00:00
|
|
|
return err
|
2020-06-01 10:46:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for _, m := range qMetrics {
|
vmalert: fix labels and annotations processing for alerts (#2403)
To improve compatibility with Prometheus alerting the order of
templates processing has changed.
Before, vmalert did all labels processing beforehand. It meant
all extra labels (such as `alertname`, `alertgroup` or rule labels)
were available in templating. All collisions were resolved in favour
of extra labels.
In Prometheus, only labels from the received metric are available in
templating, so no collisions are possible.
This change makes vmalert's behaviour similar to Prometheus.
For example, consider alerting rule which is triggered by time series
with `alertname` label. In vmalert, this label would be overriden
by alerting rule's name everywhere: for alert labels, for annotations, etc.
In Prometheus, it would be overriden for alert's labels only, but in annotations
the original label value would be available.
See more details here https://github.com/prometheus/compliance/issues/80
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2022-04-06 18:24:45 +00:00
|
|
|
ls := &labelSet{
|
|
|
|
origin: make(map[string]string, len(m.Labels)),
|
|
|
|
processed: make(map[string]string, len(m.Labels)),
|
|
|
|
}
|
|
|
|
for _, l := range m.Labels {
|
|
|
|
if l.Name == "__name__" {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
ls.origin[l.Name] = l.Value
|
|
|
|
ls.processed[l.Name] = l.Value
|
|
|
|
}
|
|
|
|
a, err := ar.newAlert(m, ls, time.Unix(int64(m.Values[0]), 0), qFn)
|
2020-06-01 10:46:37 +00:00
|
|
|
if err != nil {
|
2020-06-30 19:58:18 +00:00
|
|
|
return fmt.Errorf("failed to create alert: %w", err)
|
2020-06-01 10:46:37 +00:00
|
|
|
}
|
vmalert: fix labels and annotations processing for alerts (#2403)
To improve compatibility with Prometheus alerting the order of
templates processing has changed.
Before, vmalert did all labels processing beforehand. It meant
all extra labels (such as `alertname`, `alertgroup` or rule labels)
were available in templating. All collisions were resolved in favour
of extra labels.
In Prometheus, only labels from the received metric are available in
templating, so no collisions are possible.
This change makes vmalert's behaviour similar to Prometheus.
For example, consider alerting rule which is triggered by time series
with `alertname` label. In vmalert, this label would be overriden
by alerting rule's name everywhere: for alert labels, for annotations, etc.
In Prometheus, it would be overriden for alert's labels only, but in annotations
the original label value would be available.
See more details here https://github.com/prometheus/compliance/issues/80
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2022-04-06 18:24:45 +00:00
|
|
|
a.ID = hash(ls.processed)
|
2020-06-01 10:46:37 +00:00
|
|
|
a.State = notifier.StatePending
|
2021-10-22 09:30:38 +00:00
|
|
|
a.Restored = true
|
2020-06-01 10:46:37 +00:00
|
|
|
ar.alerts[a.ID] = a
|
vmalert: fix labels and annotations processing for alerts (#2403)
To improve compatibility with Prometheus alerting the order of
templates processing has changed.
Before, vmalert did all labels processing beforehand. It meant
all extra labels (such as `alertname`, `alertgroup` or rule labels)
were available in templating. All collisions were resolved in favour
of extra labels.
In Prometheus, only labels from the received metric are available in
templating, so no collisions are possible.
This change makes vmalert's behaviour similar to Prometheus.
For example, consider alerting rule which is triggered by time series
with `alertname` label. In vmalert, this label would be overriden
by alerting rule's name everywhere: for alert labels, for annotations, etc.
In Prometheus, it would be overriden for alert's labels only, but in annotations
the original label value would be available.
See more details here https://github.com/prometheus/compliance/issues/80
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2022-04-06 18:24:45 +00:00
|
|
|
logger.Infof("alert %q (%d) restored to state at %v", a.Name, a.ID, a.ActiveAt)
|
2020-06-01 10:46:37 +00:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
2022-03-16 15:26:33 +00:00
|
|
|
|
|
|
|
// alertsToSend walks through the current alerts of AlertingRule
|
|
|
|
// and returns only those which should be sent to notifier.
|
|
|
|
// Isn't concurrent safe.
|
|
|
|
func (ar *AlertingRule) alertsToSend(ts time.Time, resolveDuration, resendDelay time.Duration) []notifier.Alert {
|
2022-03-29 13:09:07 +00:00
|
|
|
needsSending := func(a *notifier.Alert) bool {
|
|
|
|
if a.State == notifier.StatePending {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
if a.ResolvedAt.After(a.LastSent) {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
return a.LastSent.Add(resendDelay).Before(ts)
|
|
|
|
}
|
|
|
|
|
2022-03-16 15:26:33 +00:00
|
|
|
var alerts []notifier.Alert
|
|
|
|
for _, a := range ar.alerts {
|
2022-03-29 13:09:07 +00:00
|
|
|
if !needsSending(a) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
a.End = ts.Add(resolveDuration)
|
|
|
|
if a.State == notifier.StateInactive {
|
|
|
|
a.End = a.ResolvedAt
|
2022-03-16 15:26:33 +00:00
|
|
|
}
|
2022-03-29 13:09:07 +00:00
|
|
|
a.LastSent = ts
|
|
|
|
alerts = append(alerts, *a)
|
2022-03-16 15:26:33 +00:00
|
|
|
}
|
|
|
|
return alerts
|
|
|
|
}
|