2023-10-13 11:54:33 +00:00
|
|
|
package rule
|
2020-06-01 10:46:37 +00:00
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"fmt"
|
|
|
|
"hash/fnv"
|
|
|
|
"sort"
|
2022-09-13 13:25:43 +00:00
|
|
|
"strings"
|
2020-06-01 10:46:37 +00:00
|
|
|
"sync"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/config"
|
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/datasource"
|
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/notifier"
|
2022-05-14 09:38:44 +00:00
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/templates"
|
2022-02-02 12:11:41 +00:00
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/utils"
|
2024-11-14 11:23:39 +00:00
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/decimal"
|
2020-06-01 10:46:37 +00:00
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
2024-11-14 11:23:39 +00:00
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promrelabel"
|
2020-06-01 10:46:37 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
// AlertingRule is basic alert entity
|
|
|
|
type AlertingRule struct {
|
2023-07-27 13:13:13 +00:00
|
|
|
Type config.Type
|
|
|
|
RuleID uint64
|
|
|
|
Name string
|
|
|
|
Expr string
|
|
|
|
For time.Duration
|
|
|
|
KeepFiringFor time.Duration
|
|
|
|
Labels map[string]string
|
|
|
|
Annotations map[string]string
|
|
|
|
GroupID uint64
|
|
|
|
GroupName string
|
2023-12-04 15:40:33 +00:00
|
|
|
File string
|
2023-07-27 13:13:13 +00:00
|
|
|
EvalInterval time.Duration
|
|
|
|
Debug bool
|
2020-06-01 10:46:37 +00:00
|
|
|
|
2021-04-28 20:41:15 +00:00
|
|
|
q datasource.Querier
|
|
|
|
|
2022-09-14 12:04:24 +00:00
|
|
|
alertsMu sync.RWMutex
|
2020-06-01 10:46:37 +00:00
|
|
|
// stores list of active alerts
|
|
|
|
alerts map[uint64]*notifier.Alert
|
2022-09-14 12:04:24 +00:00
|
|
|
|
|
|
|
// state stores recent state changes
|
|
|
|
// during evaluations
|
|
|
|
state *ruleState
|
app/vmalert: extend metrics set exported by `vmalert` #573 (#654)
* app/vmalert: extend metrics set exported by `vmalert` #573
New metrics were added to improve observability:
+ vmalert_alerts_pending{alertname, group} - number of pending alerts per group
per alert;
+ vmalert_alerts_acitve{alertname, group} - number of active alerts per group
per alert;
+ vmalert_alerts_error{alertname, group} - is 1 if alertname ended up with error
during prev execution, is 0 if no errors happened;
+ vmalert_recording_rules_error{recording, group} - is 1 if recording rule
ended up with error during prev execution, is 0 if no errors happened;
* vmalert_iteration_total{group, file} - now contains group and file name labels.
This should improve control over specific groups;
* vmalert_iteration_duration_seconds{group, file} - now contains group and file name labels. This should improve control over specific groups;
Some collisions for alerts and recording rules are possible, because neither
group name nor alert/recording rule name are unique for compatibility reasons.
Commit contains list of TODOs for Unregistering metrics since groups and rules
are ephemeral and could be removed without application restart. In order to
unlock Unregistering feature corresponding PR was filed - https://github.com/VictoriaMetrics/metrics/pull/13
* app/vmalert: extend metrics set exported by `vmalert` #573
The changes are following:
* add an ID label to rules metrics, since `name` collisions within one group is
a common case - see the k8s example alerts;
* supports metrics unregistering on rule updates. Consider the case when one rule
was added or removed from the group, or the whole group was added or removed.
The change depends on https://github.com/VictoriaMetrics/metrics/pull/16
where race condition for Unregister method was fixed.
2020-08-09 06:41:29 +00:00
|
|
|
|
|
|
|
metrics *alertingRuleMetrics
|
|
|
|
}
|
|
|
|
|
|
|
|
type alertingRuleMetrics struct {
|
2023-12-06 18:39:35 +00:00
|
|
|
errors *utils.Counter
|
2023-05-08 07:36:39 +00:00
|
|
|
pending *utils.Gauge
|
|
|
|
active *utils.Gauge
|
|
|
|
samples *utils.Gauge
|
|
|
|
seriesFetched *utils.Gauge
|
2020-06-01 10:46:37 +00:00
|
|
|
}
|
|
|
|
|
2023-10-13 11:54:33 +00:00
|
|
|
// NewAlertingRule creates a new AlertingRule
|
|
|
|
func NewAlertingRule(qb datasource.QuerierBuilder, group *Group, cfg config.Rule) *AlertingRule {
|
app/vmalert: extend metrics set exported by `vmalert` #573 (#654)
* app/vmalert: extend metrics set exported by `vmalert` #573
New metrics were added to improve observability:
+ vmalert_alerts_pending{alertname, group} - number of pending alerts per group
per alert;
+ vmalert_alerts_acitve{alertname, group} - number of active alerts per group
per alert;
+ vmalert_alerts_error{alertname, group} - is 1 if alertname ended up with error
during prev execution, is 0 if no errors happened;
+ vmalert_recording_rules_error{recording, group} - is 1 if recording rule
ended up with error during prev execution, is 0 if no errors happened;
* vmalert_iteration_total{group, file} - now contains group and file name labels.
This should improve control over specific groups;
* vmalert_iteration_duration_seconds{group, file} - now contains group and file name labels. This should improve control over specific groups;
Some collisions for alerts and recording rules are possible, because neither
group name nor alert/recording rule name are unique for compatibility reasons.
Commit contains list of TODOs for Unregistering metrics since groups and rules
are ephemeral and could be removed without application restart. In order to
unlock Unregistering feature corresponding PR was filed - https://github.com/VictoriaMetrics/metrics/pull/13
* app/vmalert: extend metrics set exported by `vmalert` #573
The changes are following:
* add an ID label to rules metrics, since `name` collisions within one group is
a common case - see the k8s example alerts;
* supports metrics unregistering on rule updates. Consider the case when one rule
was added or removed from the group, or the whole group was added or removed.
The change depends on https://github.com/VictoriaMetrics/metrics/pull/16
where race condition for Unregister method was fixed.
2020-08-09 06:41:29 +00:00
|
|
|
ar := &AlertingRule{
|
2023-07-27 13:13:13 +00:00
|
|
|
Type: group.Type,
|
|
|
|
RuleID: cfg.ID,
|
|
|
|
Name: cfg.Alert,
|
|
|
|
Expr: cfg.Expr,
|
|
|
|
For: cfg.For.Duration(),
|
|
|
|
KeepFiringFor: cfg.KeepFiringFor.Duration(),
|
|
|
|
Labels: cfg.Labels,
|
|
|
|
Annotations: cfg.Annotations,
|
|
|
|
GroupID: group.ID(),
|
|
|
|
GroupName: group.Name,
|
2023-12-04 15:40:33 +00:00
|
|
|
File: group.File,
|
2023-07-27 13:13:13 +00:00
|
|
|
EvalInterval: group.Interval,
|
|
|
|
Debug: cfg.Debug,
|
2021-04-30 06:46:03 +00:00
|
|
|
q: qb.BuildWithParams(datasource.QuerierParams{
|
2024-10-29 15:30:39 +00:00
|
|
|
DataSourceType: group.Type.String(),
|
|
|
|
ApplyIntervalAsTimeFilter: setIntervalAsTimeFilter(group.Type.String(), cfg.Expr),
|
|
|
|
EvaluationInterval: group.Interval,
|
|
|
|
QueryParams: group.Params,
|
|
|
|
Headers: group.Headers,
|
|
|
|
Debug: cfg.Debug,
|
2021-04-30 06:46:03 +00:00
|
|
|
}),
|
|
|
|
alerts: make(map[uint64]*notifier.Alert),
|
|
|
|
metrics: &alertingRuleMetrics{},
|
2020-06-01 10:46:37 +00:00
|
|
|
}
|
app/vmalert: extend metrics set exported by `vmalert` #573 (#654)
* app/vmalert: extend metrics set exported by `vmalert` #573
New metrics were added to improve observability:
+ vmalert_alerts_pending{alertname, group} - number of pending alerts per group
per alert;
+ vmalert_alerts_acitve{alertname, group} - number of active alerts per group
per alert;
+ vmalert_alerts_error{alertname, group} - is 1 if alertname ended up with error
during prev execution, is 0 if no errors happened;
+ vmalert_recording_rules_error{recording, group} - is 1 if recording rule
ended up with error during prev execution, is 0 if no errors happened;
* vmalert_iteration_total{group, file} - now contains group and file name labels.
This should improve control over specific groups;
* vmalert_iteration_duration_seconds{group, file} - now contains group and file name labels. This should improve control over specific groups;
Some collisions for alerts and recording rules are possible, because neither
group name nor alert/recording rule name are unique for compatibility reasons.
Commit contains list of TODOs for Unregistering metrics since groups and rules
are ephemeral and could be removed without application restart. In order to
unlock Unregistering feature corresponding PR was filed - https://github.com/VictoriaMetrics/metrics/pull/13
* app/vmalert: extend metrics set exported by `vmalert` #573
The changes are following:
* add an ID label to rules metrics, since `name` collisions within one group is
a common case - see the k8s example alerts;
* supports metrics unregistering on rule updates. Consider the case when one rule
was added or removed from the group, or the whole group was added or removed.
The change depends on https://github.com/VictoriaMetrics/metrics/pull/16
where race condition for Unregister method was fixed.
2020-08-09 06:41:29 +00:00
|
|
|
|
2023-10-13 11:54:33 +00:00
|
|
|
entrySize := *ruleUpdateEntriesLimit
|
2022-12-29 11:36:44 +00:00
|
|
|
if cfg.UpdateEntriesLimit != nil {
|
2023-10-13 11:54:33 +00:00
|
|
|
entrySize = *cfg.UpdateEntriesLimit
|
|
|
|
}
|
|
|
|
if entrySize < 1 {
|
|
|
|
entrySize = 1
|
|
|
|
}
|
|
|
|
ar.state = &ruleState{
|
|
|
|
entries: make([]StateEntry, entrySize),
|
2022-12-29 11:36:44 +00:00
|
|
|
}
|
|
|
|
|
2023-11-02 15:01:31 +00:00
|
|
|
labels := fmt.Sprintf(`alertname=%q, group=%q, file=%q, id="%d"`, ar.Name, group.Name, group.File, ar.ID())
|
2022-02-02 12:11:41 +00:00
|
|
|
ar.metrics.pending = utils.GetOrCreateGauge(fmt.Sprintf(`vmalert_alerts_pending{%s}`, labels),
|
app/vmalert: extend metrics set exported by `vmalert` #573 (#654)
* app/vmalert: extend metrics set exported by `vmalert` #573
New metrics were added to improve observability:
+ vmalert_alerts_pending{alertname, group} - number of pending alerts per group
per alert;
+ vmalert_alerts_acitve{alertname, group} - number of active alerts per group
per alert;
+ vmalert_alerts_error{alertname, group} - is 1 if alertname ended up with error
during prev execution, is 0 if no errors happened;
+ vmalert_recording_rules_error{recording, group} - is 1 if recording rule
ended up with error during prev execution, is 0 if no errors happened;
* vmalert_iteration_total{group, file} - now contains group and file name labels.
This should improve control over specific groups;
* vmalert_iteration_duration_seconds{group, file} - now contains group and file name labels. This should improve control over specific groups;
Some collisions for alerts and recording rules are possible, because neither
group name nor alert/recording rule name are unique for compatibility reasons.
Commit contains list of TODOs for Unregistering metrics since groups and rules
are ephemeral and could be removed without application restart. In order to
unlock Unregistering feature corresponding PR was filed - https://github.com/VictoriaMetrics/metrics/pull/13
* app/vmalert: extend metrics set exported by `vmalert` #573
The changes are following:
* add an ID label to rules metrics, since `name` collisions within one group is
a common case - see the k8s example alerts;
* supports metrics unregistering on rule updates. Consider the case when one rule
was added or removed from the group, or the whole group was added or removed.
The change depends on https://github.com/VictoriaMetrics/metrics/pull/16
where race condition for Unregister method was fixed.
2020-08-09 06:41:29 +00:00
|
|
|
func() float64 {
|
2022-09-14 12:04:24 +00:00
|
|
|
ar.alertsMu.RLock()
|
|
|
|
defer ar.alertsMu.RUnlock()
|
app/vmalert: extend metrics set exported by `vmalert` #573 (#654)
* app/vmalert: extend metrics set exported by `vmalert` #573
New metrics were added to improve observability:
+ vmalert_alerts_pending{alertname, group} - number of pending alerts per group
per alert;
+ vmalert_alerts_acitve{alertname, group} - number of active alerts per group
per alert;
+ vmalert_alerts_error{alertname, group} - is 1 if alertname ended up with error
during prev execution, is 0 if no errors happened;
+ vmalert_recording_rules_error{recording, group} - is 1 if recording rule
ended up with error during prev execution, is 0 if no errors happened;
* vmalert_iteration_total{group, file} - now contains group and file name labels.
This should improve control over specific groups;
* vmalert_iteration_duration_seconds{group, file} - now contains group and file name labels. This should improve control over specific groups;
Some collisions for alerts and recording rules are possible, because neither
group name nor alert/recording rule name are unique for compatibility reasons.
Commit contains list of TODOs for Unregistering metrics since groups and rules
are ephemeral and could be removed without application restart. In order to
unlock Unregistering feature corresponding PR was filed - https://github.com/VictoriaMetrics/metrics/pull/13
* app/vmalert: extend metrics set exported by `vmalert` #573
The changes are following:
* add an ID label to rules metrics, since `name` collisions within one group is
a common case - see the k8s example alerts;
* supports metrics unregistering on rule updates. Consider the case when one rule
was added or removed from the group, or the whole group was added or removed.
The change depends on https://github.com/VictoriaMetrics/metrics/pull/16
where race condition for Unregister method was fixed.
2020-08-09 06:41:29 +00:00
|
|
|
var num int
|
|
|
|
for _, a := range ar.alerts {
|
|
|
|
if a.State == notifier.StatePending {
|
|
|
|
num++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return float64(num)
|
|
|
|
})
|
2022-02-02 12:11:41 +00:00
|
|
|
ar.metrics.active = utils.GetOrCreateGauge(fmt.Sprintf(`vmalert_alerts_firing{%s}`, labels),
|
app/vmalert: extend metrics set exported by `vmalert` #573 (#654)
* app/vmalert: extend metrics set exported by `vmalert` #573
New metrics were added to improve observability:
+ vmalert_alerts_pending{alertname, group} - number of pending alerts per group
per alert;
+ vmalert_alerts_acitve{alertname, group} - number of active alerts per group
per alert;
+ vmalert_alerts_error{alertname, group} - is 1 if alertname ended up with error
during prev execution, is 0 if no errors happened;
+ vmalert_recording_rules_error{recording, group} - is 1 if recording rule
ended up with error during prev execution, is 0 if no errors happened;
* vmalert_iteration_total{group, file} - now contains group and file name labels.
This should improve control over specific groups;
* vmalert_iteration_duration_seconds{group, file} - now contains group and file name labels. This should improve control over specific groups;
Some collisions for alerts and recording rules are possible, because neither
group name nor alert/recording rule name are unique for compatibility reasons.
Commit contains list of TODOs for Unregistering metrics since groups and rules
are ephemeral and could be removed without application restart. In order to
unlock Unregistering feature corresponding PR was filed - https://github.com/VictoriaMetrics/metrics/pull/13
* app/vmalert: extend metrics set exported by `vmalert` #573
The changes are following:
* add an ID label to rules metrics, since `name` collisions within one group is
a common case - see the k8s example alerts;
* supports metrics unregistering on rule updates. Consider the case when one rule
was added or removed from the group, or the whole group was added or removed.
The change depends on https://github.com/VictoriaMetrics/metrics/pull/16
where race condition for Unregister method was fixed.
2020-08-09 06:41:29 +00:00
|
|
|
func() float64 {
|
2022-09-14 12:04:24 +00:00
|
|
|
ar.alertsMu.RLock()
|
|
|
|
defer ar.alertsMu.RUnlock()
|
app/vmalert: extend metrics set exported by `vmalert` #573 (#654)
* app/vmalert: extend metrics set exported by `vmalert` #573
New metrics were added to improve observability:
+ vmalert_alerts_pending{alertname, group} - number of pending alerts per group
per alert;
+ vmalert_alerts_acitve{alertname, group} - number of active alerts per group
per alert;
+ vmalert_alerts_error{alertname, group} - is 1 if alertname ended up with error
during prev execution, is 0 if no errors happened;
+ vmalert_recording_rules_error{recording, group} - is 1 if recording rule
ended up with error during prev execution, is 0 if no errors happened;
* vmalert_iteration_total{group, file} - now contains group and file name labels.
This should improve control over specific groups;
* vmalert_iteration_duration_seconds{group, file} - now contains group and file name labels. This should improve control over specific groups;
Some collisions for alerts and recording rules are possible, because neither
group name nor alert/recording rule name are unique for compatibility reasons.
Commit contains list of TODOs for Unregistering metrics since groups and rules
are ephemeral and could be removed without application restart. In order to
unlock Unregistering feature corresponding PR was filed - https://github.com/VictoriaMetrics/metrics/pull/13
* app/vmalert: extend metrics set exported by `vmalert` #573
The changes are following:
* add an ID label to rules metrics, since `name` collisions within one group is
a common case - see the k8s example alerts;
* supports metrics unregistering on rule updates. Consider the case when one rule
was added or removed from the group, or the whole group was added or removed.
The change depends on https://github.com/VictoriaMetrics/metrics/pull/16
where race condition for Unregister method was fixed.
2020-08-09 06:41:29 +00:00
|
|
|
var num int
|
|
|
|
for _, a := range ar.alerts {
|
|
|
|
if a.State == notifier.StateFiring {
|
|
|
|
num++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return float64(num)
|
|
|
|
})
|
2023-12-06 18:39:35 +00:00
|
|
|
ar.metrics.errors = utils.GetOrCreateCounter(fmt.Sprintf(`vmalert_alerting_rules_errors_total{%s}`, labels))
|
2022-02-02 12:11:41 +00:00
|
|
|
ar.metrics.samples = utils.GetOrCreateGauge(fmt.Sprintf(`vmalert_alerting_rules_last_evaluation_samples{%s}`, labels),
|
2021-08-05 06:59:46 +00:00
|
|
|
func() float64 {
|
2022-09-14 12:04:24 +00:00
|
|
|
e := ar.state.getLast()
|
2023-10-13 11:54:33 +00:00
|
|
|
return float64(e.Samples)
|
2021-08-05 06:59:46 +00:00
|
|
|
})
|
2023-05-08 07:36:39 +00:00
|
|
|
ar.metrics.seriesFetched = utils.GetOrCreateGauge(fmt.Sprintf(`vmalert_alerting_rules_last_evaluation_series_fetched{%s}`, labels),
|
|
|
|
func() float64 {
|
|
|
|
e := ar.state.getLast()
|
2023-10-13 11:54:33 +00:00
|
|
|
if e.SeriesFetched == nil {
|
2023-05-08 07:36:39 +00:00
|
|
|
// means seriesFetched is unsupported
|
|
|
|
return -1
|
|
|
|
}
|
2023-10-13 11:54:33 +00:00
|
|
|
seriesFetched := float64(*e.SeriesFetched)
|
|
|
|
if seriesFetched == 0 && e.Samples > 0 {
|
2023-05-10 13:04:05 +00:00
|
|
|
// `alert: 0.95` will fetch no series
|
|
|
|
// but will get one time series in response.
|
2023-10-13 11:54:33 +00:00
|
|
|
seriesFetched = float64(e.Samples)
|
2023-05-10 13:04:05 +00:00
|
|
|
}
|
|
|
|
return seriesFetched
|
2023-05-08 07:36:39 +00:00
|
|
|
})
|
app/vmalert: extend metrics set exported by `vmalert` #573 (#654)
* app/vmalert: extend metrics set exported by `vmalert` #573
New metrics were added to improve observability:
+ vmalert_alerts_pending{alertname, group} - number of pending alerts per group
per alert;
+ vmalert_alerts_acitve{alertname, group} - number of active alerts per group
per alert;
+ vmalert_alerts_error{alertname, group} - is 1 if alertname ended up with error
during prev execution, is 0 if no errors happened;
+ vmalert_recording_rules_error{recording, group} - is 1 if recording rule
ended up with error during prev execution, is 0 if no errors happened;
* vmalert_iteration_total{group, file} - now contains group and file name labels.
This should improve control over specific groups;
* vmalert_iteration_duration_seconds{group, file} - now contains group and file name labels. This should improve control over specific groups;
Some collisions for alerts and recording rules are possible, because neither
group name nor alert/recording rule name are unique for compatibility reasons.
Commit contains list of TODOs for Unregistering metrics since groups and rules
are ephemeral and could be removed without application restart. In order to
unlock Unregistering feature corresponding PR was filed - https://github.com/VictoriaMetrics/metrics/pull/13
* app/vmalert: extend metrics set exported by `vmalert` #573
The changes are following:
* add an ID label to rules metrics, since `name` collisions within one group is
a common case - see the k8s example alerts;
* supports metrics unregistering on rule updates. Consider the case when one rule
was added or removed from the group, or the whole group was added or removed.
The change depends on https://github.com/VictoriaMetrics/metrics/pull/16
where race condition for Unregister method was fixed.
2020-08-09 06:41:29 +00:00
|
|
|
return ar
|
|
|
|
}
|
|
|
|
|
2023-10-13 11:54:33 +00:00
|
|
|
// close unregisters rule metrics
|
|
|
|
func (ar *AlertingRule) close() {
|
2022-02-02 12:11:41 +00:00
|
|
|
ar.metrics.active.Unregister()
|
|
|
|
ar.metrics.pending.Unregister()
|
|
|
|
ar.metrics.errors.Unregister()
|
|
|
|
ar.metrics.samples.Unregister()
|
2023-05-08 07:36:39 +00:00
|
|
|
ar.metrics.seriesFetched.Unregister()
|
2020-06-01 10:46:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// String implements Stringer interface
|
|
|
|
func (ar *AlertingRule) String() string {
|
|
|
|
return ar.Name
|
|
|
|
}
|
|
|
|
|
|
|
|
// ID returns unique Rule ID
|
|
|
|
// within the parent Group.
|
|
|
|
func (ar *AlertingRule) ID() uint64 {
|
2020-06-15 19:15:47 +00:00
|
|
|
return ar.RuleID
|
2020-06-01 10:46:37 +00:00
|
|
|
}
|
|
|
|
|
2023-10-13 11:54:33 +00:00
|
|
|
// GetAlerts returns active alerts of rule
|
|
|
|
func (ar *AlertingRule) GetAlerts() []*notifier.Alert {
|
|
|
|
ar.alertsMu.RLock()
|
|
|
|
defer ar.alertsMu.RUnlock()
|
|
|
|
var alerts []*notifier.Alert
|
|
|
|
for _, a := range ar.alerts {
|
|
|
|
alerts = append(alerts, a)
|
|
|
|
}
|
|
|
|
return alerts
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetAlert returns alert if id exists
|
|
|
|
func (ar *AlertingRule) GetAlert(id uint64) *notifier.Alert {
|
|
|
|
ar.alertsMu.RLock()
|
|
|
|
defer ar.alertsMu.RUnlock()
|
|
|
|
if ar.alerts == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return ar.alerts[id]
|
|
|
|
}
|
|
|
|
|
2024-07-09 22:14:15 +00:00
|
|
|
func (ar *AlertingRule) logDebugf(at time.Time, a *notifier.Alert, format string, args ...any) {
|
2022-09-13 13:25:43 +00:00
|
|
|
if !ar.Debug {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
prefix := fmt.Sprintf("DEBUG rule %q:%q (%d) at %v: ",
|
|
|
|
ar.GroupName, ar.Name, ar.RuleID, at.Format(time.RFC3339))
|
|
|
|
|
|
|
|
if a != nil {
|
|
|
|
labelKeys := make([]string, len(a.Labels))
|
|
|
|
var i int
|
|
|
|
for k := range a.Labels {
|
|
|
|
labelKeys[i] = k
|
|
|
|
i++
|
|
|
|
}
|
|
|
|
sort.Strings(labelKeys)
|
2022-09-13 13:33:00 +00:00
|
|
|
labels := make([]string, len(labelKeys))
|
|
|
|
for i, l := range labelKeys {
|
|
|
|
labels[i] = fmt.Sprintf("%s=%q", l, a.Labels[l])
|
2022-09-13 13:25:43 +00:00
|
|
|
}
|
2022-09-13 13:33:00 +00:00
|
|
|
labelsStr := strings.Join(labels, ",")
|
|
|
|
prefix += fmt.Sprintf("alert %d {%s} ", a.ID, labelsStr)
|
2022-09-13 13:25:43 +00:00
|
|
|
}
|
2022-09-13 13:33:00 +00:00
|
|
|
msg := fmt.Sprintf(format, args...)
|
2022-09-13 13:25:43 +00:00
|
|
|
logger.Infof("%s", prefix+msg)
|
|
|
|
}
|
|
|
|
|
2023-10-13 11:54:33 +00:00
|
|
|
// updateWith copies all significant fields.
|
|
|
|
// alerts state isn't copied since
|
|
|
|
// it should be updated in next 2 Execs
|
|
|
|
func (ar *AlertingRule) updateWith(r Rule) error {
|
|
|
|
nr, ok := r.(*AlertingRule)
|
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("BUG: attempt to update alerting rule with wrong type %#v", r)
|
|
|
|
}
|
|
|
|
ar.Expr = nr.Expr
|
|
|
|
ar.For = nr.For
|
|
|
|
ar.KeepFiringFor = nr.KeepFiringFor
|
|
|
|
ar.Labels = nr.Labels
|
|
|
|
ar.Annotations = nr.Annotations
|
|
|
|
ar.EvalInterval = nr.EvalInterval
|
|
|
|
ar.Debug = nr.Debug
|
|
|
|
ar.q = nr.q
|
|
|
|
ar.state = nr.state
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
vmalert: fix labels and annotations processing for alerts (#2403)
To improve compatibility with Prometheus alerting the order of
templates processing has changed.
Before, vmalert did all labels processing beforehand. It meant
all extra labels (such as `alertname`, `alertgroup` or rule labels)
were available in templating. All collisions were resolved in favour
of extra labels.
In Prometheus, only labels from the received metric are available in
templating, so no collisions are possible.
This change makes vmalert's behaviour similar to Prometheus.
For example, consider alerting rule which is triggered by time series
with `alertname` label. In vmalert, this label would be overriden
by alerting rule's name everywhere: for alert labels, for annotations, etc.
In Prometheus, it would be overriden for alert's labels only, but in annotations
the original label value would be available.
See more details here https://github.com/prometheus/compliance/issues/80
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2022-04-06 18:24:45 +00:00
|
|
|
type labelSet struct {
|
2022-09-29 16:22:50 +00:00
|
|
|
// origin labels extracted from received time series
|
|
|
|
// plus extra labels (group labels, service labels like alertNameLabel).
|
|
|
|
// in case of conflicts, origin labels from time series preferred.
|
|
|
|
// used for templating annotations
|
vmalert: fix labels and annotations processing for alerts (#2403)
To improve compatibility with Prometheus alerting the order of
templates processing has changed.
Before, vmalert did all labels processing beforehand. It meant
all extra labels (such as `alertname`, `alertgroup` or rule labels)
were available in templating. All collisions were resolved in favour
of extra labels.
In Prometheus, only labels from the received metric are available in
templating, so no collisions are possible.
This change makes vmalert's behaviour similar to Prometheus.
For example, consider alerting rule which is triggered by time series
with `alertname` label. In vmalert, this label would be overriden
by alerting rule's name everywhere: for alert labels, for annotations, etc.
In Prometheus, it would be overriden for alert's labels only, but in annotations
the original label value would be available.
See more details here https://github.com/prometheus/compliance/issues/80
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2022-04-06 18:24:45 +00:00
|
|
|
origin map[string]string
|
2022-09-29 16:22:50 +00:00
|
|
|
// processed labels includes origin labels
|
|
|
|
// plus extra labels (group labels, service labels like alertNameLabel).
|
2023-12-22 15:07:47 +00:00
|
|
|
// in case of key conflicts, origin labels are renamed with prefix `exported_` and extra labels are preferred.
|
|
|
|
// see https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5161
|
2022-09-29 16:22:50 +00:00
|
|
|
// used as labels attached to notifier.Alert and ALERTS series written to remote storage.
|
vmalert: fix labels and annotations processing for alerts (#2403)
To improve compatibility with Prometheus alerting the order of
templates processing has changed.
Before, vmalert did all labels processing beforehand. It meant
all extra labels (such as `alertname`, `alertgroup` or rule labels)
were available in templating. All collisions were resolved in favour
of extra labels.
In Prometheus, only labels from the received metric are available in
templating, so no collisions are possible.
This change makes vmalert's behaviour similar to Prometheus.
For example, consider alerting rule which is triggered by time series
with `alertname` label. In vmalert, this label would be overriden
by alerting rule's name everywhere: for alert labels, for annotations, etc.
In Prometheus, it would be overriden for alert's labels only, but in annotations
the original label value would be available.
See more details here https://github.com/prometheus/compliance/issues/80
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2022-04-06 18:24:45 +00:00
|
|
|
processed map[string]string
|
|
|
|
}
|
|
|
|
|
2023-12-22 15:07:47 +00:00
|
|
|
// add adds a value v with key k to origin and processed label sets.
|
|
|
|
// On k conflicts in processed set, the passed v is preferred.
|
|
|
|
// On k conflicts in origin set, the original value is preferred and copied
|
|
|
|
// to processed with `exported_%k` key. The copy happens only if passed v isn't equal to origin[k] value.
|
|
|
|
func (ls *labelSet) add(k, v string) {
|
|
|
|
ls.processed[k] = v
|
|
|
|
ov, ok := ls.origin[k]
|
|
|
|
if !ok {
|
|
|
|
ls.origin[k] = v
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if ov != v {
|
|
|
|
// copy value only if v and ov are different
|
|
|
|
key := fmt.Sprintf("exported_%s", k)
|
|
|
|
ls.processed[key] = ov
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
vmalert: fix labels and annotations processing for alerts (#2403)
To improve compatibility with Prometheus alerting the order of
templates processing has changed.
Before, vmalert did all labels processing beforehand. It meant
all extra labels (such as `alertname`, `alertgroup` or rule labels)
were available in templating. All collisions were resolved in favour
of extra labels.
In Prometheus, only labels from the received metric are available in
templating, so no collisions are possible.
This change makes vmalert's behaviour similar to Prometheus.
For example, consider alerting rule which is triggered by time series
with `alertname` label. In vmalert, this label would be overriden
by alerting rule's name everywhere: for alert labels, for annotations, etc.
In Prometheus, it would be overriden for alert's labels only, but in annotations
the original label value would be available.
See more details here https://github.com/prometheus/compliance/issues/80
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2022-04-06 18:24:45 +00:00
|
|
|
// toLabels converts labels from given Metric
|
|
|
|
// to labelSet which contains original and processed labels.
|
2022-05-14 09:38:44 +00:00
|
|
|
func (ar *AlertingRule) toLabels(m datasource.Metric, qFn templates.QueryFn) (*labelSet, error) {
|
vmalert: fix labels and annotations processing for alerts (#2403)
To improve compatibility with Prometheus alerting the order of
templates processing has changed.
Before, vmalert did all labels processing beforehand. It meant
all extra labels (such as `alertname`, `alertgroup` or rule labels)
were available in templating. All collisions were resolved in favour
of extra labels.
In Prometheus, only labels from the received metric are available in
templating, so no collisions are possible.
This change makes vmalert's behaviour similar to Prometheus.
For example, consider alerting rule which is triggered by time series
with `alertname` label. In vmalert, this label would be overriden
by alerting rule's name everywhere: for alert labels, for annotations, etc.
In Prometheus, it would be overriden for alert's labels only, but in annotations
the original label value would be available.
See more details here https://github.com/prometheus/compliance/issues/80
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2022-04-06 18:24:45 +00:00
|
|
|
ls := &labelSet{
|
2022-09-29 16:22:50 +00:00
|
|
|
origin: make(map[string]string),
|
vmalert: fix labels and annotations processing for alerts (#2403)
To improve compatibility with Prometheus alerting the order of
templates processing has changed.
Before, vmalert did all labels processing beforehand. It meant
all extra labels (such as `alertname`, `alertgroup` or rule labels)
were available in templating. All collisions were resolved in favour
of extra labels.
In Prometheus, only labels from the received metric are available in
templating, so no collisions are possible.
This change makes vmalert's behaviour similar to Prometheus.
For example, consider alerting rule which is triggered by time series
with `alertname` label. In vmalert, this label would be overriden
by alerting rule's name everywhere: for alert labels, for annotations, etc.
In Prometheus, it would be overriden for alert's labels only, but in annotations
the original label value would be available.
See more details here https://github.com/prometheus/compliance/issues/80
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2022-04-06 18:24:45 +00:00
|
|
|
processed: make(map[string]string),
|
|
|
|
}
|
|
|
|
for _, l := range m.Labels {
|
2022-06-27 07:57:56 +00:00
|
|
|
ls.origin[l.Name] = l.Value
|
vmalert: fix labels and annotations processing for alerts (#2403)
To improve compatibility with Prometheus alerting the order of
templates processing has changed.
Before, vmalert did all labels processing beforehand. It meant
all extra labels (such as `alertname`, `alertgroup` or rule labels)
were available in templating. All collisions were resolved in favour
of extra labels.
In Prometheus, only labels from the received metric are available in
templating, so no collisions are possible.
This change makes vmalert's behaviour similar to Prometheus.
For example, consider alerting rule which is triggered by time series
with `alertname` label. In vmalert, this label would be overriden
by alerting rule's name everywhere: for alert labels, for annotations, etc.
In Prometheus, it would be overriden for alert's labels only, but in annotations
the original label value would be available.
See more details here https://github.com/prometheus/compliance/issues/80
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2022-04-06 18:24:45 +00:00
|
|
|
// drop __name__ to be consistent with Prometheus alerting
|
|
|
|
if l.Name == "__name__" {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
ls.processed[l.Name] = l.Value
|
|
|
|
}
|
|
|
|
|
|
|
|
extraLabels, err := notifier.ExecTemplate(qFn, ar.Labels, notifier.AlertTplData{
|
|
|
|
Labels: ls.origin,
|
|
|
|
Value: m.Values[0],
|
|
|
|
Expr: ar.Expr,
|
|
|
|
})
|
|
|
|
if err != nil {
|
2023-10-25 19:24:01 +00:00
|
|
|
return nil, fmt.Errorf("failed to expand labels: %w", err)
|
vmalert: fix labels and annotations processing for alerts (#2403)
To improve compatibility with Prometheus alerting the order of
templates processing has changed.
Before, vmalert did all labels processing beforehand. It meant
all extra labels (such as `alertname`, `alertgroup` or rule labels)
were available in templating. All collisions were resolved in favour
of extra labels.
In Prometheus, only labels from the received metric are available in
templating, so no collisions are possible.
This change makes vmalert's behaviour similar to Prometheus.
For example, consider alerting rule which is triggered by time series
with `alertname` label. In vmalert, this label would be overriden
by alerting rule's name everywhere: for alert labels, for annotations, etc.
In Prometheus, it would be overriden for alert's labels only, but in annotations
the original label value would be available.
See more details here https://github.com/prometheus/compliance/issues/80
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2022-04-06 18:24:45 +00:00
|
|
|
}
|
|
|
|
for k, v := range extraLabels {
|
2023-12-22 15:07:47 +00:00
|
|
|
ls.add(k, v)
|
vmalert: fix labels and annotations processing for alerts (#2403)
To improve compatibility with Prometheus alerting the order of
templates processing has changed.
Before, vmalert did all labels processing beforehand. It meant
all extra labels (such as `alertname`, `alertgroup` or rule labels)
were available in templating. All collisions were resolved in favour
of extra labels.
In Prometheus, only labels from the received metric are available in
templating, so no collisions are possible.
This change makes vmalert's behaviour similar to Prometheus.
For example, consider alerting rule which is triggered by time series
with `alertname` label. In vmalert, this label would be overriden
by alerting rule's name everywhere: for alert labels, for annotations, etc.
In Prometheus, it would be overriden for alert's labels only, but in annotations
the original label value would be available.
See more details here https://github.com/prometheus/compliance/issues/80
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2022-04-06 18:24:45 +00:00
|
|
|
}
|
|
|
|
// set additional labels to identify group and rule name
|
|
|
|
if ar.Name != "" {
|
2023-12-22 15:07:47 +00:00
|
|
|
ls.add(alertNameLabel, ar.Name)
|
vmalert: fix labels and annotations processing for alerts (#2403)
To improve compatibility with Prometheus alerting the order of
templates processing has changed.
Before, vmalert did all labels processing beforehand. It meant
all extra labels (such as `alertname`, `alertgroup` or rule labels)
were available in templating. All collisions were resolved in favour
of extra labels.
In Prometheus, only labels from the received metric are available in
templating, so no collisions are possible.
This change makes vmalert's behaviour similar to Prometheus.
For example, consider alerting rule which is triggered by time series
with `alertname` label. In vmalert, this label would be overriden
by alerting rule's name everywhere: for alert labels, for annotations, etc.
In Prometheus, it would be overriden for alert's labels only, but in annotations
the original label value would be available.
See more details here https://github.com/prometheus/compliance/issues/80
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2022-04-06 18:24:45 +00:00
|
|
|
}
|
|
|
|
if !*disableAlertGroupLabel && ar.GroupName != "" {
|
2023-12-22 15:07:47 +00:00
|
|
|
ls.add(alertGroupNameLabel, ar.GroupName)
|
vmalert: fix labels and annotations processing for alerts (#2403)
To improve compatibility with Prometheus alerting the order of
templates processing has changed.
Before, vmalert did all labels processing beforehand. It meant
all extra labels (such as `alertname`, `alertgroup` or rule labels)
were available in templating. All collisions were resolved in favour
of extra labels.
In Prometheus, only labels from the received metric are available in
templating, so no collisions are possible.
This change makes vmalert's behaviour similar to Prometheus.
For example, consider alerting rule which is triggered by time series
with `alertname` label. In vmalert, this label would be overriden
by alerting rule's name everywhere: for alert labels, for annotations, etc.
In Prometheus, it would be overriden for alert's labels only, but in annotations
the original label value would be available.
See more details here https://github.com/prometheus/compliance/issues/80
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2022-04-06 18:24:45 +00:00
|
|
|
}
|
|
|
|
return ls, nil
|
|
|
|
}
|
|
|
|
|
2023-10-13 11:54:33 +00:00
|
|
|
// execRange executes alerting rule on the given time range similarly to exec.
|
2023-10-30 12:54:18 +00:00
|
|
|
// When making consecutive calls make sure to respect time linearity for start and end params,
|
|
|
|
// as this function modifies AlertingRule alerts state.
|
|
|
|
// It is not thread safe.
|
|
|
|
// It returns ALERT and ALERT_FOR_STATE time series as a result.
|
2023-10-13 11:54:33 +00:00
|
|
|
func (ar *AlertingRule) execRange(ctx context.Context, start, end time.Time) ([]prompbmarshal.TimeSeries, error) {
|
2023-05-08 07:36:39 +00:00
|
|
|
res, err := ar.q.QueryRange(ctx, ar.Expr, start, end)
|
2021-06-09 09:20:38 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
var result []prompbmarshal.TimeSeries
|
2023-10-30 12:54:18 +00:00
|
|
|
holdAlertState := make(map[uint64]*notifier.Alert)
|
2024-04-02 20:16:24 +00:00
|
|
|
qFn := func(_ string) ([]datasource.Metric, error) {
|
2021-06-09 09:20:38 +00:00
|
|
|
return nil, fmt.Errorf("`query` template isn't supported in replay mode")
|
|
|
|
}
|
2023-05-08 07:36:39 +00:00
|
|
|
for _, s := range res.Data {
|
2024-04-19 07:16:26 +00:00
|
|
|
ls, as, err := ar.expandTemplates(s, qFn, time.Time{})
|
2023-10-30 12:54:18 +00:00
|
|
|
if err != nil {
|
2024-04-19 07:16:26 +00:00
|
|
|
return nil, fmt.Errorf("failed to expand templates: %s", err)
|
2021-06-09 09:20:38 +00:00
|
|
|
}
|
2024-04-19 07:16:26 +00:00
|
|
|
alertID := hash(ls.processed)
|
|
|
|
a := ar.newAlert(s, time.Time{}, ls.processed, as) // initial alert
|
2023-10-30 12:54:18 +00:00
|
|
|
|
2021-06-09 09:20:38 +00:00
|
|
|
prevT := time.Time{}
|
|
|
|
for i := range s.Values {
|
|
|
|
at := time.Unix(s.Timestamps[i], 0)
|
2023-10-30 12:54:18 +00:00
|
|
|
// try to restore alert's state on the first iteration
|
|
|
|
if at.Equal(start) {
|
2024-04-19 07:16:26 +00:00
|
|
|
if _, ok := ar.alerts[alertID]; ok {
|
|
|
|
a = ar.alerts[alertID]
|
2023-10-30 12:54:18 +00:00
|
|
|
prevT = at
|
|
|
|
}
|
|
|
|
}
|
2021-06-09 09:20:38 +00:00
|
|
|
if at.Sub(prevT) > ar.EvalInterval {
|
|
|
|
// reset to Pending if there are gaps > EvalInterval between DPs
|
|
|
|
a.State = notifier.StatePending
|
2022-03-29 13:09:07 +00:00
|
|
|
a.ActiveAt = at
|
2024-09-20 09:07:40 +00:00
|
|
|
// re-template the annotations as active timestamp is changed
|
|
|
|
_, a.Annotations, _ = ar.expandTemplates(s, qFn, at)
|
2023-10-30 12:54:18 +00:00
|
|
|
a.Start = time.Time{}
|
|
|
|
} else if at.Sub(a.ActiveAt) >= ar.For && a.State != notifier.StateFiring {
|
2021-06-09 09:20:38 +00:00
|
|
|
a.State = notifier.StateFiring
|
2022-03-29 13:09:07 +00:00
|
|
|
a.Start = at
|
2021-06-09 09:20:38 +00:00
|
|
|
}
|
|
|
|
prevT = at
|
2024-01-25 14:42:57 +00:00
|
|
|
if ar.For == 0 {
|
|
|
|
// rules with `for: 0` are always firing when they have Value
|
|
|
|
a.State = notifier.StateFiring
|
|
|
|
}
|
2022-06-09 06:58:25 +00:00
|
|
|
result = append(result, ar.alertToTimeSeries(a, s.Timestamps[i])...)
|
2023-10-30 12:54:18 +00:00
|
|
|
|
|
|
|
// save alert's state on last iteration, so it can be used on the next execRange call
|
|
|
|
if at.Equal(end) {
|
2024-04-19 07:16:26 +00:00
|
|
|
holdAlertState[alertID] = a
|
2023-10-30 12:54:18 +00:00
|
|
|
}
|
2021-06-09 09:20:38 +00:00
|
|
|
}
|
|
|
|
}
|
2023-10-30 12:54:18 +00:00
|
|
|
ar.alerts = holdAlertState
|
2021-06-09 09:20:38 +00:00
|
|
|
return result, nil
|
|
|
|
}
|
|
|
|
|
2022-03-29 13:09:07 +00:00
|
|
|
// resolvedRetention is the duration for which a resolved alert instance
|
|
|
|
// is kept in memory state and consequently repeatedly sent to the AlertManager.
|
|
|
|
const resolvedRetention = 15 * time.Minute
|
|
|
|
|
2023-10-13 11:54:33 +00:00
|
|
|
// exec executes AlertingRule expression via the given Querier.
|
2020-06-01 10:46:37 +00:00
|
|
|
// Based on the Querier results AlertingRule maintains notifier.Alerts
|
2023-10-13 11:54:33 +00:00
|
|
|
func (ar *AlertingRule) exec(ctx context.Context, ts time.Time, limit int) ([]prompbmarshal.TimeSeries, error) {
|
2022-03-15 11:54:53 +00:00
|
|
|
start := time.Now()
|
2023-05-08 07:36:39 +00:00
|
|
|
res, req, err := ar.q.Query(ctx, ar.Expr, ts)
|
2023-10-13 11:54:33 +00:00
|
|
|
curState := StateEntry{
|
|
|
|
Time: start,
|
|
|
|
At: ts,
|
|
|
|
Duration: time.Since(start),
|
|
|
|
Samples: len(res.Data),
|
|
|
|
SeriesFetched: res.SeriesFetched,
|
|
|
|
Err: err,
|
|
|
|
Curl: requestToCurl(req),
|
2022-09-14 12:04:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
defer func() {
|
|
|
|
ar.state.add(curState)
|
2023-12-06 18:39:35 +00:00
|
|
|
if curState.Err != nil {
|
|
|
|
ar.metrics.errors.Inc()
|
|
|
|
}
|
2022-09-14 12:04:24 +00:00
|
|
|
}()
|
|
|
|
|
2020-06-01 10:46:37 +00:00
|
|
|
if err != nil {
|
2020-06-30 19:58:18 +00:00
|
|
|
return nil, fmt.Errorf("failed to execute query %q: %w", ar.Expr, err)
|
2020-06-01 10:46:37 +00:00
|
|
|
}
|
2023-10-13 11:54:33 +00:00
|
|
|
ar.logDebugf(ts, nil, "query returned %d samples (elapsed: %s)", curState.Samples, curState.Duration)
|
2022-09-13 13:25:43 +00:00
|
|
|
|
2024-04-19 07:16:26 +00:00
|
|
|
qFn := func(query string) ([]datasource.Metric, error) {
|
|
|
|
res, _, err := ar.q.Query(ctx, query, ts)
|
|
|
|
return res.Data, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// template labels and annotations before updating ar.alerts,
|
|
|
|
// since they could use `query` function which takes a while to execute,
|
|
|
|
// see https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6079.
|
|
|
|
expandedLabels := make([]*labelSet, len(res.Data))
|
|
|
|
expandedAnnotations := make([]map[string]string, len(res.Data))
|
|
|
|
for i, m := range res.Data {
|
|
|
|
ls, as, err := ar.expandTemplates(m, qFn, ts)
|
|
|
|
if err != nil {
|
|
|
|
curState.Err = fmt.Errorf("failed to expand templates: %w", err)
|
|
|
|
return nil, curState.Err
|
|
|
|
}
|
|
|
|
expandedLabels[i] = ls
|
|
|
|
expandedAnnotations[i] = as
|
|
|
|
}
|
|
|
|
|
|
|
|
ar.alertsMu.Lock()
|
|
|
|
defer ar.alertsMu.Unlock()
|
|
|
|
|
2020-06-01 10:46:37 +00:00
|
|
|
for h, a := range ar.alerts {
|
|
|
|
// cleanup inactive alerts from previous Exec
|
2022-03-29 13:09:07 +00:00
|
|
|
if a.State == notifier.StateInactive && ts.Sub(a.ResolvedAt) > resolvedRetention {
|
2022-09-13 13:33:00 +00:00
|
|
|
ar.logDebugf(ts, a, "deleted as inactive")
|
2020-06-01 10:46:37 +00:00
|
|
|
delete(ar.alerts, h)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
updated := make(map[uint64]struct{})
|
|
|
|
// update list of active alerts
|
2024-04-19 07:16:26 +00:00
|
|
|
for i, m := range res.Data {
|
|
|
|
labels, annotations := expandedLabels[i], expandedAnnotations[i]
|
|
|
|
alertID := hash(labels.processed)
|
|
|
|
if _, ok := updated[alertID]; ok {
|
2023-12-22 15:07:47 +00:00
|
|
|
// duplicate may be caused the removal of `__name__` label
|
2024-04-19 07:16:26 +00:00
|
|
|
curState.Err = fmt.Errorf("labels %v: %w", labels.processed, errDuplicate)
|
2023-10-13 11:54:33 +00:00
|
|
|
return nil, curState.Err
|
2020-11-09 22:27:32 +00:00
|
|
|
}
|
2024-04-19 07:16:26 +00:00
|
|
|
updated[alertID] = struct{}{}
|
|
|
|
if a, ok := ar.alerts[alertID]; ok {
|
2022-03-29 13:09:07 +00:00
|
|
|
if a.State == notifier.StateInactive {
|
|
|
|
// alert could be in inactive state for resolvedRetention
|
|
|
|
// so when we again receive metrics for it - we switch it
|
|
|
|
// back to notifier.StatePending
|
|
|
|
a.State = notifier.StatePending
|
|
|
|
a.ActiveAt = ts
|
2022-09-13 13:33:00 +00:00
|
|
|
ar.logDebugf(ts, a, "INACTIVE => PENDING")
|
2022-03-29 13:09:07 +00:00
|
|
|
}
|
2022-09-16 14:19:10 +00:00
|
|
|
a.Value = m.Values[0]
|
2024-04-19 07:16:26 +00:00
|
|
|
a.Annotations = annotations
|
2023-07-27 13:13:13 +00:00
|
|
|
a.KeepFiringSince = time.Time{}
|
2020-06-01 10:46:37 +00:00
|
|
|
continue
|
|
|
|
}
|
2024-04-19 07:16:26 +00:00
|
|
|
a := ar.newAlert(m, ts, labels.processed, annotations)
|
|
|
|
a.ID = alertID
|
2020-06-01 10:46:37 +00:00
|
|
|
a.State = notifier.StatePending
|
2024-04-19 07:16:26 +00:00
|
|
|
ar.alerts[alertID] = a
|
2022-09-13 13:33:00 +00:00
|
|
|
ar.logDebugf(ts, a, "created in state PENDING")
|
2020-06-01 10:46:37 +00:00
|
|
|
}
|
2022-06-09 06:21:30 +00:00
|
|
|
var numActivePending int
|
2024-11-14 11:23:39 +00:00
|
|
|
var tss []prompbmarshal.TimeSeries
|
2020-06-01 10:46:37 +00:00
|
|
|
for h, a := range ar.alerts {
|
|
|
|
// if alert wasn't updated in this iteration
|
|
|
|
// means it is resolved already
|
|
|
|
if _, ok := updated[h]; !ok {
|
|
|
|
if a.State == notifier.StatePending {
|
2024-11-14 11:23:39 +00:00
|
|
|
// alert was in Pending state - it is not active anymore
|
|
|
|
// add stale time series
|
|
|
|
tss = append(tss, pendingAlertStaleTimeSeries(a.Labels, ts.Unix(), true)...)
|
|
|
|
|
2020-06-01 10:46:37 +00:00
|
|
|
delete(ar.alerts, h)
|
2022-09-13 13:33:00 +00:00
|
|
|
ar.logDebugf(ts, a, "PENDING => DELETED: is absent in current evaluation round")
|
2020-06-01 10:46:37 +00:00
|
|
|
continue
|
|
|
|
}
|
2023-07-27 13:13:13 +00:00
|
|
|
// check if alert should keep StateFiring if rule has
|
|
|
|
// `keep_firing_for` field
|
2022-03-29 13:09:07 +00:00
|
|
|
if a.State == notifier.StateFiring {
|
2023-07-27 13:13:13 +00:00
|
|
|
if ar.KeepFiringFor > 0 {
|
|
|
|
if a.KeepFiringSince.IsZero() {
|
|
|
|
a.KeepFiringSince = ts
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// alerts with ar.KeepFiringFor>0 may remain FIRING
|
|
|
|
// even if their expression isn't true anymore
|
2024-01-29 11:02:02 +00:00
|
|
|
if ts.Sub(a.KeepFiringSince) >= ar.KeepFiringFor {
|
2023-07-27 13:13:13 +00:00
|
|
|
a.State = notifier.StateInactive
|
|
|
|
a.ResolvedAt = ts
|
2024-11-14 11:23:39 +00:00
|
|
|
// add stale time series
|
|
|
|
tss = append(tss, firingAlertStaleTimeSeries(a.Labels, ts.Unix())...)
|
|
|
|
|
2023-07-27 13:13:13 +00:00
|
|
|
ar.logDebugf(ts, a, "FIRING => INACTIVE: is absent in current evaluation round")
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
ar.logDebugf(ts, a, "KEEP_FIRING: will keep firing for %fs since %v", ar.KeepFiringFor.Seconds(), a.KeepFiringSince)
|
2022-03-29 13:09:07 +00:00
|
|
|
}
|
2020-06-01 10:46:37 +00:00
|
|
|
}
|
2022-06-09 06:21:30 +00:00
|
|
|
numActivePending++
|
2022-05-09 08:11:06 +00:00
|
|
|
if a.State == notifier.StatePending && ts.Sub(a.ActiveAt) >= ar.For {
|
2020-06-01 10:46:37 +00:00
|
|
|
a.State = notifier.StateFiring
|
2022-03-29 13:09:07 +00:00
|
|
|
a.Start = ts
|
2020-06-01 10:46:37 +00:00
|
|
|
alertsFired.Inc()
|
2024-11-14 11:23:39 +00:00
|
|
|
if ar.For > 0 {
|
|
|
|
// add stale time series
|
|
|
|
tss = append(tss, pendingAlertStaleTimeSeries(a.Labels, ts.Unix(), false)...)
|
|
|
|
}
|
2022-09-13 13:33:00 +00:00
|
|
|
ar.logDebugf(ts, a, "PENDING => FIRING: %s since becoming active at %v", ts.Sub(a.ActiveAt), a.ActiveAt)
|
2020-06-01 10:46:37 +00:00
|
|
|
}
|
|
|
|
}
|
2022-06-09 06:21:30 +00:00
|
|
|
if limit > 0 && numActivePending > limit {
|
|
|
|
ar.alerts = map[uint64]*notifier.Alert{}
|
2023-10-13 11:54:33 +00:00
|
|
|
curState.Err = fmt.Errorf("exec exceeded limit of %d with %d alerts", limit, numActivePending)
|
|
|
|
return nil, curState.Err
|
2022-06-09 06:21:30 +00:00
|
|
|
}
|
2024-11-14 11:23:39 +00:00
|
|
|
return append(tss, ar.toTimeSeries(ts.Unix())...), nil
|
2020-06-01 10:46:37 +00:00
|
|
|
}
|
|
|
|
|
2024-04-19 07:16:26 +00:00
|
|
|
func (ar *AlertingRule) expandTemplates(m datasource.Metric, qFn templates.QueryFn, ts time.Time) (*labelSet, map[string]string, error) {
|
|
|
|
ls, err := ar.toLabels(m, qFn)
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, fmt.Errorf("failed to expand labels: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
tplData := notifier.AlertTplData{
|
|
|
|
Value: m.Values[0],
|
|
|
|
Labels: ls.origin,
|
|
|
|
Expr: ar.Expr,
|
|
|
|
AlertID: hash(ls.processed),
|
|
|
|
GroupID: ar.GroupID,
|
|
|
|
ActiveAt: ts,
|
|
|
|
For: ar.For,
|
|
|
|
}
|
|
|
|
as, err := notifier.ExecTemplate(qFn, ar.Annotations, tplData)
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, fmt.Errorf("failed to template annotations: %w", err)
|
|
|
|
}
|
|
|
|
return ls, as, nil
|
|
|
|
}
|
|
|
|
|
2024-11-14 11:23:39 +00:00
|
|
|
// toTimeSeries creates `ALERTS` and `ALERTS_FOR_STATE` for active alerts
|
2021-06-09 09:20:38 +00:00
|
|
|
func (ar *AlertingRule) toTimeSeries(timestamp int64) []prompbmarshal.TimeSeries {
|
2020-06-01 10:46:37 +00:00
|
|
|
var tss []prompbmarshal.TimeSeries
|
|
|
|
for _, a := range ar.alerts {
|
|
|
|
if a.State == notifier.StateInactive {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
ts := ar.alertToTimeSeries(a, timestamp)
|
|
|
|
tss = append(tss, ts...)
|
|
|
|
}
|
|
|
|
return tss
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO: consider hashing algorithm in VM
|
vmalert: fix labels and annotations processing for alerts (#2403)
To improve compatibility with Prometheus alerting the order of
templates processing has changed.
Before, vmalert did all labels processing beforehand. It meant
all extra labels (such as `alertname`, `alertgroup` or rule labels)
were available in templating. All collisions were resolved in favour
of extra labels.
In Prometheus, only labels from the received metric are available in
templating, so no collisions are possible.
This change makes vmalert's behaviour similar to Prometheus.
For example, consider alerting rule which is triggered by time series
with `alertname` label. In vmalert, this label would be overriden
by alerting rule's name everywhere: for alert labels, for annotations, etc.
In Prometheus, it would be overriden for alert's labels only, but in annotations
the original label value would be available.
See more details here https://github.com/prometheus/compliance/issues/80
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2022-04-06 18:24:45 +00:00
|
|
|
func hash(labels map[string]string) uint64 {
|
2020-06-01 10:46:37 +00:00
|
|
|
hash := fnv.New64a()
|
vmalert: fix labels and annotations processing for alerts (#2403)
To improve compatibility with Prometheus alerting the order of
templates processing has changed.
Before, vmalert did all labels processing beforehand. It meant
all extra labels (such as `alertname`, `alertgroup` or rule labels)
were available in templating. All collisions were resolved in favour
of extra labels.
In Prometheus, only labels from the received metric are available in
templating, so no collisions are possible.
This change makes vmalert's behaviour similar to Prometheus.
For example, consider alerting rule which is triggered by time series
with `alertname` label. In vmalert, this label would be overriden
by alerting rule's name everywhere: for alert labels, for annotations, etc.
In Prometheus, it would be overriden for alert's labels only, but in annotations
the original label value would be available.
See more details here https://github.com/prometheus/compliance/issues/80
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2022-04-06 18:24:45 +00:00
|
|
|
keys := make([]string, 0, len(labels))
|
|
|
|
for k := range labels {
|
|
|
|
keys = append(keys, k)
|
|
|
|
}
|
|
|
|
sort.Strings(keys)
|
|
|
|
for _, k := range keys {
|
2020-06-01 10:46:37 +00:00
|
|
|
// drop __name__ to be consistent with Prometheus alerting
|
vmalert: fix labels and annotations processing for alerts (#2403)
To improve compatibility with Prometheus alerting the order of
templates processing has changed.
Before, vmalert did all labels processing beforehand. It meant
all extra labels (such as `alertname`, `alertgroup` or rule labels)
were available in templating. All collisions were resolved in favour
of extra labels.
In Prometheus, only labels from the received metric are available in
templating, so no collisions are possible.
This change makes vmalert's behaviour similar to Prometheus.
For example, consider alerting rule which is triggered by time series
with `alertname` label. In vmalert, this label would be overriden
by alerting rule's name everywhere: for alert labels, for annotations, etc.
In Prometheus, it would be overriden for alert's labels only, but in annotations
the original label value would be available.
See more details here https://github.com/prometheus/compliance/issues/80
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2022-04-06 18:24:45 +00:00
|
|
|
if k == "__name__" {
|
2020-06-01 10:46:37 +00:00
|
|
|
continue
|
|
|
|
}
|
vmalert: fix labels and annotations processing for alerts (#2403)
To improve compatibility with Prometheus alerting the order of
templates processing has changed.
Before, vmalert did all labels processing beforehand. It meant
all extra labels (such as `alertname`, `alertgroup` or rule labels)
were available in templating. All collisions were resolved in favour
of extra labels.
In Prometheus, only labels from the received metric are available in
templating, so no collisions are possible.
This change makes vmalert's behaviour similar to Prometheus.
For example, consider alerting rule which is triggered by time series
with `alertname` label. In vmalert, this label would be overriden
by alerting rule's name everywhere: for alert labels, for annotations, etc.
In Prometheus, it would be overriden for alert's labels only, but in annotations
the original label value would be available.
See more details here https://github.com/prometheus/compliance/issues/80
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2022-04-06 18:24:45 +00:00
|
|
|
name, value := k, labels[k]
|
|
|
|
hash.Write([]byte(name))
|
|
|
|
hash.Write([]byte(value))
|
2020-06-01 10:46:37 +00:00
|
|
|
hash.Write([]byte("\xff"))
|
|
|
|
}
|
|
|
|
return hash.Sum64()
|
|
|
|
}
|
|
|
|
|
2024-04-19 07:16:26 +00:00
|
|
|
func (ar *AlertingRule) newAlert(m datasource.Metric, start time.Time, labels, annotations map[string]string) *notifier.Alert {
|
|
|
|
as := make(map[string]string)
|
|
|
|
if annotations != nil {
|
|
|
|
as = annotations
|
|
|
|
}
|
|
|
|
ls := make(map[string]string)
|
|
|
|
if labels != nil {
|
|
|
|
ls = labels
|
|
|
|
}
|
|
|
|
return ¬ifier.Alert{
|
|
|
|
GroupID: ar.GroupID,
|
|
|
|
Name: ar.Name,
|
|
|
|
Expr: ar.Expr,
|
|
|
|
For: ar.For,
|
|
|
|
ActiveAt: start,
|
|
|
|
Value: m.Values[0],
|
|
|
|
Labels: ls,
|
|
|
|
Annotations: as,
|
vmalert: fix labels and annotations processing for alerts (#2403)
To improve compatibility with Prometheus alerting the order of
templates processing has changed.
Before, vmalert did all labels processing beforehand. It meant
all extra labels (such as `alertname`, `alertgroup` or rule labels)
were available in templating. All collisions were resolved in favour
of extra labels.
In Prometheus, only labels from the received metric are available in
templating, so no collisions are possible.
This change makes vmalert's behaviour similar to Prometheus.
For example, consider alerting rule which is triggered by time series
with `alertname` label. In vmalert, this label would be overriden
by alerting rule's name everywhere: for alert labels, for annotations, etc.
In Prometheus, it would be overriden for alert's labels only, but in annotations
the original label value would be available.
See more details here https://github.com/prometheus/compliance/issues/80
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2022-04-06 18:24:45 +00:00
|
|
|
}
|
2020-06-01 10:46:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
const (
|
2024-01-25 14:42:57 +00:00
|
|
|
// alertMetricName is the metric name for time series reflecting the alert state.
|
2020-06-01 10:46:37 +00:00
|
|
|
alertMetricName = "ALERTS"
|
2024-01-25 14:42:57 +00:00
|
|
|
// alertForStateMetricName is the metric name for time series reflecting the moment of time when alert became active.
|
2020-06-01 10:46:37 +00:00
|
|
|
alertForStateMetricName = "ALERTS_FOR_STATE"
|
|
|
|
|
2020-09-11 19:52:56 +00:00
|
|
|
// alertNameLabel is the label name indicating the name of an alert.
|
2020-06-01 10:46:37 +00:00
|
|
|
alertNameLabel = "alertname"
|
2020-09-11 19:52:56 +00:00
|
|
|
// alertStateLabel is the label name indicating the state of an alert.
|
2020-06-01 10:46:37 +00:00
|
|
|
alertStateLabel = "alertstate"
|
2020-09-11 19:52:56 +00:00
|
|
|
|
|
|
|
// alertGroupNameLabel defines the label name attached for generated time series.
|
2021-10-22 09:30:38 +00:00
|
|
|
// attaching this label may be disabled via `-disableAlertgroupLabel` flag.
|
2020-09-11 19:52:56 +00:00
|
|
|
alertGroupNameLabel = "alertgroup"
|
2020-06-01 10:46:37 +00:00
|
|
|
)
|
|
|
|
|
2022-03-29 13:09:07 +00:00
|
|
|
// alertToTimeSeries converts the given alert with the given timestamp to time series
|
2021-06-09 09:20:38 +00:00
|
|
|
func (ar *AlertingRule) alertToTimeSeries(a *notifier.Alert, timestamp int64) []prompbmarshal.TimeSeries {
|
2024-01-25 14:42:57 +00:00
|
|
|
return []prompbmarshal.TimeSeries{
|
|
|
|
alertToTimeSeries(a, timestamp),
|
|
|
|
alertForToTimeSeries(a, timestamp),
|
2020-06-01 10:46:37 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-10-22 09:30:38 +00:00
|
|
|
func alertToTimeSeries(a *notifier.Alert, timestamp int64) prompbmarshal.TimeSeries {
|
2024-11-14 11:23:39 +00:00
|
|
|
var labels []prompbmarshal.Label
|
2020-06-01 10:46:37 +00:00
|
|
|
for k, v := range a.Labels {
|
2024-11-14 11:23:39 +00:00
|
|
|
labels = append(labels, prompbmarshal.Label{
|
|
|
|
Name: k,
|
|
|
|
Value: v,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
// __name__ already been dropped, no need to check duplication
|
|
|
|
labels = append(labels, prompbmarshal.Label{Name: "__name__", Value: alertMetricName})
|
|
|
|
if ol := promrelabel.GetLabelByName(labels, alertStateLabel); ol != nil {
|
|
|
|
ol.Value = a.State.String()
|
|
|
|
} else {
|
|
|
|
labels = append(labels, prompbmarshal.Label{Name: alertStateLabel, Value: a.State.String()})
|
2020-06-01 10:46:37 +00:00
|
|
|
}
|
2021-06-09 09:20:38 +00:00
|
|
|
return newTimeSeries([]float64{1}, []int64{timestamp}, labels)
|
2020-06-01 10:46:37 +00:00
|
|
|
}
|
|
|
|
|
2024-11-14 11:23:39 +00:00
|
|
|
// alertForToTimeSeries returns a time series that represents
|
2020-06-01 10:46:37 +00:00
|
|
|
// state of active alerts, where value is time when alert become active
|
2021-10-22 09:30:38 +00:00
|
|
|
func alertForToTimeSeries(a *notifier.Alert, timestamp int64) prompbmarshal.TimeSeries {
|
2024-11-14 11:23:39 +00:00
|
|
|
var labels []prompbmarshal.Label
|
2020-06-01 10:46:37 +00:00
|
|
|
for k, v := range a.Labels {
|
2024-11-14 11:23:39 +00:00
|
|
|
labels = append(labels, prompbmarshal.Label{
|
|
|
|
Name: k,
|
|
|
|
Value: v,
|
|
|
|
})
|
2020-06-01 10:46:37 +00:00
|
|
|
}
|
2024-11-14 11:23:39 +00:00
|
|
|
// __name__ already been dropped, no need to check duplication
|
|
|
|
labels = append(labels, prompbmarshal.Label{Name: "__name__", Value: alertForStateMetricName})
|
2022-03-29 13:09:07 +00:00
|
|
|
return newTimeSeries([]float64{float64(a.ActiveAt.Unix())}, []int64{timestamp}, labels)
|
2020-06-01 10:46:37 +00:00
|
|
|
}
|
|
|
|
|
2024-11-14 11:23:39 +00:00
|
|
|
// pendingAlertStaleTimeSeries returns stale `ALERTS` and `ALERTS_FOR_STATE` time series
|
|
|
|
// for alerts which changed their state from Pending to Inactive or Firing.
|
|
|
|
func pendingAlertStaleTimeSeries(ls map[string]string, timestamp int64, includeAlertForState bool) []prompbmarshal.TimeSeries {
|
|
|
|
var result []prompbmarshal.TimeSeries
|
|
|
|
var baseLabels []prompbmarshal.Label
|
|
|
|
for k, v := range ls {
|
|
|
|
baseLabels = append(baseLabels, prompbmarshal.Label{
|
|
|
|
Name: k,
|
|
|
|
Value: v,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
// __name__ already been dropped, no need to check duplication
|
|
|
|
alertsLabels := append(baseLabels, prompbmarshal.Label{Name: "__name__", Value: alertMetricName})
|
|
|
|
alertsLabels = append(alertsLabels, prompbmarshal.Label{Name: alertStateLabel, Value: notifier.StatePending.String()})
|
|
|
|
result = append(result, newTimeSeries([]float64{decimal.StaleNaN}, []int64{timestamp}, alertsLabels))
|
|
|
|
|
|
|
|
if includeAlertForState {
|
|
|
|
alertsForStateLabels := append(baseLabels, prompbmarshal.Label{Name: "__name__", Value: alertForStateMetricName})
|
|
|
|
result = append(result, newTimeSeries([]float64{decimal.StaleNaN}, []int64{timestamp}, alertsForStateLabels))
|
|
|
|
}
|
|
|
|
return result
|
|
|
|
}
|
|
|
|
|
|
|
|
// firingAlertStaleTimeSeries returns stale `ALERTS` and `ALERTS_FOR_STATE` time series
|
|
|
|
// for alerts which changed their state from Firing to Inactive.
|
|
|
|
func firingAlertStaleTimeSeries(ls map[string]string, timestamp int64) []prompbmarshal.TimeSeries {
|
|
|
|
var baseLabels []prompbmarshal.Label
|
|
|
|
for k, v := range ls {
|
|
|
|
baseLabels = append(baseLabels, prompbmarshal.Label{
|
|
|
|
Name: k,
|
|
|
|
Value: v,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
// __name__ already been dropped, no need to check duplication
|
|
|
|
alertsLabels := append(baseLabels, prompbmarshal.Label{Name: "__name__", Value: alertMetricName})
|
|
|
|
alertsLabels = append(alertsLabels, prompbmarshal.Label{Name: alertStateLabel, Value: notifier.StateFiring.String()})
|
|
|
|
|
|
|
|
alertsForStateLabels := append(baseLabels, prompbmarshal.Label{Name: "__name__", Value: alertForStateMetricName})
|
|
|
|
|
|
|
|
return []prompbmarshal.TimeSeries{
|
|
|
|
newTimeSeries([]float64{decimal.StaleNaN}, []int64{timestamp}, alertsLabels),
|
|
|
|
newTimeSeries([]float64{decimal.StaleNaN}, []int64{timestamp}, alertsForStateLabels),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-10-13 11:54:33 +00:00
|
|
|
// restore restores the value of ActiveAt field for active alerts,
|
2023-02-04 03:46:13 +00:00
|
|
|
// based on previously written time series `alertForStateMetricName`.
|
|
|
|
// Only rules with For > 0 can be restored.
|
2023-10-13 11:54:33 +00:00
|
|
|
func (ar *AlertingRule) restore(ctx context.Context, q datasource.Querier, ts time.Time, lookback time.Duration) error {
|
2023-02-04 03:46:13 +00:00
|
|
|
if ar.For < 1 {
|
|
|
|
return nil
|
2022-09-15 10:40:22 +00:00
|
|
|
}
|
2020-12-14 18:11:45 +00:00
|
|
|
|
2023-02-04 03:46:13 +00:00
|
|
|
if len(ar.alerts) < 1 {
|
|
|
|
return nil
|
2020-06-01 10:46:37 +00:00
|
|
|
}
|
|
|
|
|
2023-11-02 14:22:13 +00:00
|
|
|
nameStr := fmt.Sprintf("%s=%q", alertNameLabel, ar.Name)
|
|
|
|
if !*disableAlertGroupLabel {
|
|
|
|
nameStr = fmt.Sprintf("%s=%q,%s=%q", alertGroupNameLabel, ar.GroupName, alertNameLabel, ar.Name)
|
|
|
|
}
|
|
|
|
var labelsFilter string
|
|
|
|
for k, v := range ar.Labels {
|
|
|
|
labelsFilter += fmt.Sprintf(",%s=%q", k, v)
|
|
|
|
}
|
2024-11-22 08:11:31 +00:00
|
|
|
// use `default_rollup()` instead of `last_over_time()` here to accounts for possible staleness markers
|
|
|
|
expr := fmt.Sprintf("default_rollup(%s{%s%s}[%ds])",
|
2023-11-02 14:22:13 +00:00
|
|
|
alertForStateMetricName, nameStr, labelsFilter, int(lookback.Seconds()))
|
2023-02-04 03:46:13 +00:00
|
|
|
|
2023-11-02 14:22:13 +00:00
|
|
|
res, _, err := q.Query(ctx, expr, ts)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to execute restore query %q: %w ", expr, err)
|
|
|
|
}
|
2023-02-04 03:46:13 +00:00
|
|
|
|
2023-11-02 14:22:13 +00:00
|
|
|
if len(res.Data) < 1 {
|
|
|
|
ar.logDebugf(ts, nil, "no response was received from restore query")
|
|
|
|
return nil
|
|
|
|
}
|
2024-04-19 07:16:26 +00:00
|
|
|
|
|
|
|
ar.alertsMu.Lock()
|
|
|
|
defer ar.alertsMu.Unlock()
|
|
|
|
|
2023-11-02 14:22:13 +00:00
|
|
|
for _, series := range res.Data {
|
|
|
|
series.DelLabel("__name__")
|
|
|
|
labelSet := make(map[string]string, len(series.Labels))
|
|
|
|
for _, v := range series.Labels {
|
|
|
|
labelSet[v.Name] = v.Value
|
2020-06-01 10:46:37 +00:00
|
|
|
}
|
2023-11-02 14:22:13 +00:00
|
|
|
id := hash(labelSet)
|
|
|
|
a, ok := ar.alerts[id]
|
|
|
|
if !ok {
|
2023-02-04 03:46:13 +00:00
|
|
|
continue
|
|
|
|
}
|
2023-11-02 14:22:13 +00:00
|
|
|
if a.Restored || a.State != notifier.StatePending {
|
2023-02-04 03:46:13 +00:00
|
|
|
continue
|
|
|
|
}
|
2023-11-02 14:22:13 +00:00
|
|
|
a.ActiveAt = time.Unix(int64(series.Values[0]), 0)
|
2021-10-22 09:30:38 +00:00
|
|
|
a.Restored = true
|
vmalert: fix labels and annotations processing for alerts (#2403)
To improve compatibility with Prometheus alerting the order of
templates processing has changed.
Before, vmalert did all labels processing beforehand. It meant
all extra labels (such as `alertname`, `alertgroup` or rule labels)
were available in templating. All collisions were resolved in favour
of extra labels.
In Prometheus, only labels from the received metric are available in
templating, so no collisions are possible.
This change makes vmalert's behaviour similar to Prometheus.
For example, consider alerting rule which is triggered by time series
with `alertname` label. In vmalert, this label would be overriden
by alerting rule's name everywhere: for alert labels, for annotations, etc.
In Prometheus, it would be overriden for alert's labels only, but in annotations
the original label value would be available.
See more details here https://github.com/prometheus/compliance/issues/80
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2022-04-06 18:24:45 +00:00
|
|
|
logger.Infof("alert %q (%d) restored to state at %v", a.Name, a.ID, a.ActiveAt)
|
2020-06-01 10:46:37 +00:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
2022-03-16 15:26:33 +00:00
|
|
|
|
|
|
|
// alertsToSend walks through the current alerts of AlertingRule
|
|
|
|
// and returns only those which should be sent to notifier.
|
|
|
|
// Isn't concurrent safe.
|
2024-03-28 07:55:10 +00:00
|
|
|
func (ar *AlertingRule) alertsToSend(resolveDuration, resendDelay time.Duration) []notifier.Alert {
|
|
|
|
currentTime := time.Now()
|
2022-03-29 13:09:07 +00:00
|
|
|
needsSending := func(a *notifier.Alert) bool {
|
|
|
|
if a.State == notifier.StatePending {
|
|
|
|
return false
|
|
|
|
}
|
2024-03-28 07:55:10 +00:00
|
|
|
if a.State == notifier.StateFiring && a.End.Before(a.LastSent) {
|
2022-03-29 13:09:07 +00:00
|
|
|
return true
|
|
|
|
}
|
2024-03-28 07:55:10 +00:00
|
|
|
if a.State == notifier.StateInactive && a.ResolvedAt.After(a.LastSent) {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
return a.LastSent.Add(resendDelay).Before(currentTime)
|
2022-03-29 13:09:07 +00:00
|
|
|
}
|
|
|
|
|
2022-03-16 15:26:33 +00:00
|
|
|
var alerts []notifier.Alert
|
|
|
|
for _, a := range ar.alerts {
|
2022-03-29 13:09:07 +00:00
|
|
|
if !needsSending(a) {
|
|
|
|
continue
|
|
|
|
}
|
2024-03-28 07:55:10 +00:00
|
|
|
a.End = currentTime.Add(resolveDuration)
|
2022-03-29 13:09:07 +00:00
|
|
|
if a.State == notifier.StateInactive {
|
|
|
|
a.End = a.ResolvedAt
|
2022-03-16 15:26:33 +00:00
|
|
|
}
|
2024-03-28 07:55:10 +00:00
|
|
|
a.LastSent = currentTime
|
2022-03-29 13:09:07 +00:00
|
|
|
alerts = append(alerts, *a)
|
2022-03-16 15:26:33 +00:00
|
|
|
}
|
|
|
|
return alerts
|
|
|
|
}
|