2020-05-10 16:58:17 +00:00
|
|
|
package main
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"fmt"
|
|
|
|
"hash/fnv"
|
2020-06-01 10:46:37 +00:00
|
|
|
"sync"
|
2020-05-10 16:58:17 +00:00
|
|
|
"time"
|
|
|
|
|
2020-06-01 10:46:37 +00:00
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/config"
|
2020-05-10 16:58:17 +00:00
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/datasource"
|
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/notifier"
|
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/remotewrite"
|
2020-06-29 19:21:03 +00:00
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/utils"
|
2020-05-10 16:58:17 +00:00
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
|
|
|
"github.com/VictoriaMetrics/metrics"
|
|
|
|
)
|
|
|
|
|
|
|
|
// Group is an entity for grouping rules
|
|
|
|
type Group struct {
|
2020-06-09 12:21:20 +00:00
|
|
|
mu sync.RWMutex
|
|
|
|
Name string
|
|
|
|
File string
|
|
|
|
Rules []Rule
|
|
|
|
Interval time.Duration
|
|
|
|
Concurrency int
|
2020-05-10 16:58:17 +00:00
|
|
|
|
2020-05-17 14:12:09 +00:00
|
|
|
doneCh chan struct{}
|
|
|
|
finishedCh chan struct{}
|
|
|
|
// channel accepts new Group obj
|
|
|
|
// which supposed to update current group
|
2020-06-01 10:46:37 +00:00
|
|
|
updateCh chan *Group
|
|
|
|
}
|
|
|
|
|
2020-07-28 11:20:31 +00:00
|
|
|
func newGroup(cfg config.Group, defaultInterval time.Duration, labels map[string]string) *Group {
|
2020-06-01 10:46:37 +00:00
|
|
|
g := &Group{
|
2020-06-09 12:21:20 +00:00
|
|
|
Name: cfg.Name,
|
|
|
|
File: cfg.File,
|
|
|
|
Interval: cfg.Interval,
|
|
|
|
Concurrency: cfg.Concurrency,
|
|
|
|
doneCh: make(chan struct{}),
|
|
|
|
finishedCh: make(chan struct{}),
|
|
|
|
updateCh: make(chan *Group),
|
2020-06-01 10:46:37 +00:00
|
|
|
}
|
|
|
|
if g.Interval == 0 {
|
|
|
|
g.Interval = defaultInterval
|
|
|
|
}
|
2020-06-09 12:21:20 +00:00
|
|
|
if g.Concurrency < 1 {
|
|
|
|
g.Concurrency = 1
|
|
|
|
}
|
2020-06-01 10:46:37 +00:00
|
|
|
rules := make([]Rule, len(cfg.Rules))
|
|
|
|
for i, r := range cfg.Rules {
|
2020-07-28 11:20:31 +00:00
|
|
|
// override rule labels with external labels
|
|
|
|
for k, v := range labels {
|
|
|
|
if prevV, ok := r.Labels[k]; ok {
|
|
|
|
logger.Infof("label %q=%q for rule %q.%q overwritten with external label %q=%q",
|
|
|
|
k, prevV, g.Name, r.Name(), k, v)
|
|
|
|
}
|
|
|
|
if r.Labels == nil {
|
|
|
|
r.Labels = map[string]string{}
|
|
|
|
}
|
|
|
|
r.Labels[k] = v
|
|
|
|
}
|
2020-06-01 10:46:37 +00:00
|
|
|
rules[i] = g.newRule(r)
|
|
|
|
}
|
|
|
|
g.Rules = rules
|
|
|
|
return g
|
|
|
|
}
|
|
|
|
|
|
|
|
func (g *Group) newRule(rule config.Rule) Rule {
|
|
|
|
if rule.Alert != "" {
|
|
|
|
return newAlertingRule(g.ID(), rule)
|
|
|
|
}
|
|
|
|
return newRecordingRule(g.ID(), rule)
|
2020-05-10 16:58:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// ID return unique group ID that consists of
|
|
|
|
// rules file and group name
|
2020-05-17 14:12:09 +00:00
|
|
|
func (g *Group) ID() uint64 {
|
2020-05-10 16:58:17 +00:00
|
|
|
hash := fnv.New64a()
|
|
|
|
hash.Write([]byte(g.File))
|
|
|
|
hash.Write([]byte("\xff"))
|
|
|
|
hash.Write([]byte(g.Name))
|
|
|
|
return hash.Sum64()
|
|
|
|
}
|
|
|
|
|
2020-06-01 10:46:37 +00:00
|
|
|
// Restore restores alerts state for group rules
|
2020-07-28 11:20:31 +00:00
|
|
|
func (g *Group) Restore(ctx context.Context, q datasource.Querier, lookback time.Duration, labels map[string]string) error {
|
2020-05-10 16:58:17 +00:00
|
|
|
for _, rule := range g.Rules {
|
2020-06-01 10:46:37 +00:00
|
|
|
rr, ok := rule.(*AlertingRule)
|
|
|
|
if !ok {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if rr.For < 1 {
|
|
|
|
continue
|
2020-05-10 16:58:17 +00:00
|
|
|
}
|
2020-07-28 11:20:31 +00:00
|
|
|
if err := rr.Restore(ctx, q, lookback, labels); err != nil {
|
2020-06-30 19:58:18 +00:00
|
|
|
return fmt.Errorf("error while restoring rule %q: %w", rule, err)
|
2020-05-10 16:58:17 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// updateWith updates existing group with
|
2020-06-01 10:46:37 +00:00
|
|
|
// passed group object. This function ignores group
|
|
|
|
// evaluation interval change. It supposed to be updated
|
|
|
|
// in group.start function.
|
2020-05-17 14:12:09 +00:00
|
|
|
// Not thread-safe.
|
2020-06-01 10:46:37 +00:00
|
|
|
func (g *Group) updateWith(newGroup *Group) error {
|
|
|
|
rulesRegistry := make(map[uint64]Rule)
|
2020-05-10 16:58:17 +00:00
|
|
|
for _, nr := range newGroup.Rules {
|
2020-06-01 10:46:37 +00:00
|
|
|
rulesRegistry[nr.ID()] = nr
|
2020-05-10 16:58:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for i, or := range g.Rules {
|
2020-06-01 10:46:37 +00:00
|
|
|
nr, ok := rulesRegistry[or.ID()]
|
2020-05-10 16:58:17 +00:00
|
|
|
if !ok {
|
|
|
|
// old rule is not present in the new list
|
2020-05-15 06:55:22 +00:00
|
|
|
// so we mark it for removing
|
|
|
|
g.Rules[i] = nil
|
2020-05-10 16:58:17 +00:00
|
|
|
continue
|
|
|
|
}
|
2020-06-01 10:46:37 +00:00
|
|
|
if err := or.UpdateWith(nr); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
delete(rulesRegistry, nr.ID())
|
2020-05-10 16:58:17 +00:00
|
|
|
}
|
|
|
|
|
2020-06-01 10:46:37 +00:00
|
|
|
var newRules []Rule
|
2020-05-15 06:55:22 +00:00
|
|
|
for _, r := range g.Rules {
|
|
|
|
if r == nil {
|
|
|
|
// skip nil rules
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
newRules = append(newRules, r)
|
|
|
|
}
|
|
|
|
// add the rest of rules from registry
|
2020-05-10 16:58:17 +00:00
|
|
|
for _, nr := range rulesRegistry {
|
2020-05-15 06:55:22 +00:00
|
|
|
newRules = append(newRules, nr)
|
2020-05-10 16:58:17 +00:00
|
|
|
}
|
2020-06-09 12:21:20 +00:00
|
|
|
g.Concurrency = newGroup.Concurrency
|
2020-05-15 06:55:22 +00:00
|
|
|
g.Rules = newRules
|
2020-06-01 10:46:37 +00:00
|
|
|
return nil
|
2020-05-10 16:58:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
var (
|
|
|
|
iterationTotal = metrics.NewCounter(`vmalert_iteration_total`)
|
|
|
|
iterationDuration = metrics.NewSummary(`vmalert_iteration_duration_seconds`)
|
|
|
|
|
|
|
|
execTotal = metrics.NewCounter(`vmalert_execution_total`)
|
|
|
|
execErrors = metrics.NewCounter(`vmalert_execution_errors_total`)
|
|
|
|
execDuration = metrics.NewSummary(`vmalert_execution_duration_seconds`)
|
|
|
|
|
|
|
|
alertsFired = metrics.NewCounter(`vmalert_alerts_fired_total`)
|
|
|
|
alertsSent = metrics.NewCounter(`vmalert_alerts_sent_total`)
|
|
|
|
alertsSendErrors = metrics.NewCounter(`vmalert_alerts_send_errors_total`)
|
|
|
|
|
|
|
|
remoteWriteErrors = metrics.NewCounter(`vmalert_remotewrite_errors_total`)
|
|
|
|
)
|
|
|
|
|
|
|
|
func (g *Group) close() {
|
2020-05-17 14:12:09 +00:00
|
|
|
if g.doneCh == nil {
|
2020-05-10 16:58:17 +00:00
|
|
|
return
|
|
|
|
}
|
2020-05-17 14:12:09 +00:00
|
|
|
close(g.doneCh)
|
|
|
|
<-g.finishedCh
|
2020-05-10 16:58:17 +00:00
|
|
|
}
|
|
|
|
|
2020-06-29 19:21:03 +00:00
|
|
|
func (g *Group) start(ctx context.Context, querier datasource.Querier, nts []notifier.Notifier, rw *remotewrite.Client) {
|
2020-06-09 12:21:20 +00:00
|
|
|
defer func() { close(g.finishedCh) }()
|
|
|
|
logger.Infof("group %q started; interval=%v; concurrency=%d", g.Name, g.Interval, g.Concurrency)
|
2020-06-29 19:21:03 +00:00
|
|
|
e := &executor{querier, nts, rw}
|
2020-06-01 10:46:37 +00:00
|
|
|
t := time.NewTicker(g.Interval)
|
2020-05-10 16:58:17 +00:00
|
|
|
defer t.Stop()
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
logger.Infof("group %q: context cancelled", g.Name)
|
|
|
|
return
|
2020-05-17 14:12:09 +00:00
|
|
|
case <-g.doneCh:
|
2020-05-10 16:58:17 +00:00
|
|
|
logger.Infof("group %q: received stop signal", g.Name)
|
|
|
|
return
|
2020-05-17 14:12:09 +00:00
|
|
|
case ng := <-g.updateCh:
|
2020-06-01 10:46:37 +00:00
|
|
|
g.mu.Lock()
|
|
|
|
err := g.updateWith(ng)
|
|
|
|
if err != nil {
|
|
|
|
logger.Errorf("group %q: failed to update: %s", g.Name, err)
|
|
|
|
g.mu.Unlock()
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if g.Interval != ng.Interval {
|
|
|
|
g.Interval = ng.Interval
|
|
|
|
t.Stop()
|
|
|
|
t = time.NewTicker(g.Interval)
|
|
|
|
}
|
|
|
|
g.mu.Unlock()
|
2020-06-09 12:21:20 +00:00
|
|
|
logger.Infof("group %q re-started; interval=%v; concurrency=%d", g.Name, g.Interval, g.Concurrency)
|
2020-05-10 16:58:17 +00:00
|
|
|
case <-t.C:
|
|
|
|
iterationTotal.Inc()
|
|
|
|
iterationStart := time.Now()
|
|
|
|
|
2020-06-09 12:21:20 +00:00
|
|
|
errs := e.execConcurrently(ctx, g.Rules, g.Concurrency, g.Interval)
|
|
|
|
for err := range errs {
|
2020-05-10 16:58:17 +00:00
|
|
|
if err != nil {
|
2020-06-09 12:21:20 +00:00
|
|
|
logger.Errorf("group %q: %s", g.Name, err)
|
2020-05-10 16:58:17 +00:00
|
|
|
}
|
2020-06-09 12:21:20 +00:00
|
|
|
}
|
2020-05-10 16:58:17 +00:00
|
|
|
|
2020-06-09 12:21:20 +00:00
|
|
|
iterationDuration.UpdateDuration(iterationStart)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-06-01 10:46:37 +00:00
|
|
|
|
2020-06-09 12:21:20 +00:00
|
|
|
type executor struct {
|
2020-06-29 19:21:03 +00:00
|
|
|
querier datasource.Querier
|
|
|
|
notifiers []notifier.Notifier
|
|
|
|
rw *remotewrite.Client
|
2020-06-09 12:21:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (e *executor) execConcurrently(ctx context.Context, rules []Rule, concurrency int, interval time.Duration) chan error {
|
|
|
|
res := make(chan error, len(rules))
|
|
|
|
var returnSeries bool
|
|
|
|
if e.rw != nil {
|
|
|
|
returnSeries = true
|
|
|
|
}
|
|
|
|
|
|
|
|
if concurrency == 1 {
|
|
|
|
// fast path
|
|
|
|
for _, rule := range rules {
|
|
|
|
res <- e.exec(ctx, rule, returnSeries, interval)
|
|
|
|
}
|
|
|
|
close(res)
|
|
|
|
return res
|
|
|
|
}
|
|
|
|
|
|
|
|
sem := make(chan struct{}, concurrency)
|
|
|
|
go func() {
|
|
|
|
wg := sync.WaitGroup{}
|
|
|
|
for _, rule := range rules {
|
|
|
|
sem <- struct{}{}
|
|
|
|
wg.Add(1)
|
|
|
|
go func(r Rule) {
|
|
|
|
res <- e.exec(ctx, r, returnSeries, interval)
|
|
|
|
<-sem
|
|
|
|
wg.Done()
|
|
|
|
}(rule)
|
|
|
|
}
|
|
|
|
wg.Wait()
|
|
|
|
close(res)
|
|
|
|
}()
|
|
|
|
return res
|
|
|
|
}
|
|
|
|
|
|
|
|
func (e *executor) exec(ctx context.Context, rule Rule, returnSeries bool, interval time.Duration) error {
|
|
|
|
execTotal.Inc()
|
|
|
|
execStart := time.Now()
|
|
|
|
defer func() {
|
|
|
|
execDuration.UpdateDuration(execStart)
|
|
|
|
}()
|
|
|
|
|
|
|
|
tss, err := rule.Exec(ctx, e.querier, returnSeries)
|
|
|
|
if err != nil {
|
|
|
|
execErrors.Inc()
|
2020-06-30 19:58:18 +00:00
|
|
|
return fmt.Errorf("rule %q: failed to execute: %w", rule, err)
|
2020-06-09 12:21:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if len(tss) > 0 && e.rw != nil {
|
|
|
|
for _, ts := range tss {
|
|
|
|
if err := e.rw.Push(ts); err != nil {
|
|
|
|
remoteWriteErrors.Inc()
|
2020-06-30 19:58:18 +00:00
|
|
|
return fmt.Errorf("rule %q: remote write failure: %w", rule, err)
|
2020-05-10 16:58:17 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-06-09 12:21:20 +00:00
|
|
|
|
|
|
|
ar, ok := rule.(*AlertingRule)
|
|
|
|
if !ok {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
var alerts []notifier.Alert
|
|
|
|
for _, a := range ar.alerts {
|
|
|
|
switch a.State {
|
|
|
|
case notifier.StateFiring:
|
|
|
|
// set End to execStart + 3 intervals
|
|
|
|
// so notifier can resolve it automatically if `vmalert`
|
|
|
|
// won't be able to send resolve for some reason
|
|
|
|
a.End = time.Now().Add(3 * interval)
|
|
|
|
alerts = append(alerts, *a)
|
|
|
|
case notifier.StateInactive:
|
|
|
|
// set End to execStart to notify
|
|
|
|
// that it was just resolved
|
|
|
|
a.End = time.Now()
|
|
|
|
alerts = append(alerts, *a)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if len(alerts) < 1 {
|
|
|
|
return nil
|
|
|
|
}
|
2020-06-29 19:21:03 +00:00
|
|
|
|
2020-06-09 12:21:20 +00:00
|
|
|
alertsSent.Add(len(alerts))
|
2020-06-29 19:21:03 +00:00
|
|
|
errGr := new(utils.ErrGroup)
|
|
|
|
for _, nt := range e.notifiers {
|
|
|
|
if err := nt.Send(ctx, alerts); err != nil {
|
|
|
|
alertsSendErrors.Inc()
|
2020-06-30 19:58:18 +00:00
|
|
|
errGr.Add(fmt.Errorf("rule %q: failed to send alerts: %w", rule, err))
|
2020-06-29 19:21:03 +00:00
|
|
|
}
|
2020-06-09 12:21:20 +00:00
|
|
|
}
|
2020-06-29 19:21:03 +00:00
|
|
|
return errGr.Err()
|
2020-05-10 16:58:17 +00:00
|
|
|
}
|