Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files

This commit is contained in:
Aliaksandr Valialkin 2020-11-10 00:28:50 +02:00
commit c625dc5b96
12 changed files with 95 additions and 55 deletions

View file

@ -140,7 +140,17 @@ func (ar *AlertingRule) Exec(ctx context.Context, q datasource.Querier, series b
updated := make(map[uint64]struct{})
// update list of active alerts
for _, m := range qMetrics {
for k, v := range ar.Labels {
// apply extra labels
m.SetLabel(k, v)
}
h := hash(m)
if _, ok := updated[h]; ok {
// duplicate may be caused by extra labels
// conflicting with the metric labels
return nil, fmt.Errorf("labels %v: %w", m.Labels, errDuplicate)
}
updated[h] = struct{}{}
if a, ok := ar.alerts[h]; ok {
if a.Value != m.Value {
@ -258,25 +268,11 @@ func (ar *AlertingRule) newAlert(m datasource.Metric, start time.Time) (*notifie
}
func (ar *AlertingRule) template(a *notifier.Alert) error {
// 1. template rule labels with data labels
rLabels, err := a.ExecTemplate(ar.Labels)
if err != nil {
return err
}
// 2. merge data labels and rule labels
// metric labels may be overridden by
// rule labels
for k, v := range rLabels {
a.Labels[k] = v
}
// 3. template merged labels
var err error
a.Labels, err = a.ExecTemplate(a.Labels)
if err != nil {
return err
}
a.Annotations, err = a.ExecTemplate(ar.Annotations)
return err
}
@ -419,14 +415,7 @@ func (ar *AlertingRule) Restore(ctx context.Context, q datasource.Querier, lookb
// drop all extra labels, so hash key will
// be identical to time series received in Exec
for _, l := range labels {
if l.Name == alertNameLabel {
continue
}
if l.Name == alertGroupNameLabel {
continue
}
// drop all overridden labels
if _, ok := ar.Labels[l.Name]; ok {
if l.Name == alertNameLabel || l.Name == alertGroupNameLabel {
continue
}
m.Labels = append(m.Labels, l)

View file

@ -2,6 +2,8 @@ package main
import (
"context"
"errors"
"strings"
"testing"
"time"
@ -218,19 +220,6 @@ func TestAlertingRule_Exec(t *testing.T) {
hash(metricWithLabels(t, "name", "foo2")): {State: notifier.StateFiring},
},
},
{
newTestAlertingRule("duplicate", 0),
[][]datasource.Metric{
{
// metrics with the same labelset should result in one alert
metricWithLabels(t, "name", "foo", "type", "bar"),
metricWithLabels(t, "type", "bar", "name", "foo"),
},
},
map[uint64]*notifier.Alert{
hash(metricWithLabels(t, "name", "foo", "type", "bar")): {State: notifier.StateFiring},
},
},
{
newTestAlertingRule("for-pending", time.Minute),
[][]datasource.Metric{
@ -376,7 +365,7 @@ func TestAlertingRule_Restore(t *testing.T) {
alertNameLabel, "",
"foo", "bar",
"namespace", "baz",
// following pair supposed to be dropped
// extra labels set by rule
"source", "vm",
),
},
@ -384,6 +373,7 @@ func TestAlertingRule_Restore(t *testing.T) {
hash(metricWithLabels(t,
"foo", "bar",
"namespace", "baz",
"source", "vm",
)): {State: notifier.StatePending,
Start: time.Now().Truncate(time.Hour)},
},
@ -442,6 +432,38 @@ func TestAlertingRule_Restore(t *testing.T) {
}
}
func TestAlertingRule_Exec_Negative(t *testing.T) {
fq := &fakeQuerier{}
ar := newTestAlertingRule("test", 0)
ar.Labels = map[string]string{"job": "test"}
// successful attempt
fq.add(metricWithValueAndLabels(t, 1, "__name__", "foo", "job", "bar"))
_, err := ar.Exec(context.TODO(), fq, false)
if err != nil {
t.Fatal(err)
}
// label `job` will collide with rule extra label and will make both time series equal
fq.add(metricWithValueAndLabels(t, 1, "__name__", "foo", "job", "baz"))
_, err = ar.Exec(context.TODO(), fq, false)
if !errors.Is(err, errDuplicate) {
t.Fatalf("expected to have %s error; got %s", errDuplicate, err)
}
fq.reset()
expErr := "connection reset by peer"
fq.setErr(errors.New(expErr))
_, err = ar.Exec(context.TODO(), fq, false)
if err == nil {
t.Fatalf("expected to get err; got nil")
}
if !strings.Contains(err.Error(), expErr) {
t.Fatalf("expected to get err %q; got %q insterad", expErr, err)
}
}
func newTestRuleWithLabels(name string, labels ...string) *AlertingRule {
r := newTestAlertingRule(name, 0)
r.Labels = make(map[string]string)

View file

@ -17,6 +17,23 @@ type Metric struct {
Value float64
}
// SetLabel adds or updates existing one label
// by the given key and label
func (m *Metric) SetLabel(key, value string) {
for i, l := range m.Labels {
if l.Name == key {
m.Labels[i].Value = value
return
}
}
m.AddLabel(key, value)
}
// AddLabel appends the given label to the label set
func (m *Metric) AddLabel(key, value string) {
m.Labels = append(m.Labels, Label{Name: key, Value: value})
}
// Label represents metric's label
type Label struct {
Name string

View file

@ -37,7 +37,7 @@ func (r response) metrics() ([]Metric, error) {
}
m.Labels = nil
for k, v := range r.Data.Result[i].Labels {
m.Labels = append(m.Labels, Label{Name: k, Value: v})
m.AddLabel(k, v)
}
m.Timestamp = int64(res.TV[0].(float64))
m.Value = f

View file

@ -172,6 +172,11 @@ func TestGroupStart(t *testing.T) {
t.Fatalf("faield to create alert: %s", err)
}
alert1.State = notifier.StateFiring
// add external label
alert1.Labels["cluster"] = "east-1"
// add rule labels - see config/testdata/rules1-good.rules
alert1.Labels["label"] = "bar"
alert1.Labels["host"] = inst1
alert1.ID = hash(m1)
alert2, err := r.newAlert(m2, time.Now())
@ -179,6 +184,11 @@ func TestGroupStart(t *testing.T) {
t.Fatalf("faield to create alert: %s", err)
}
alert2.State = notifier.StateFiring
// add external label
alert2.Labels["cluster"] = "east-1"
// add rule labels - see config/testdata/rules1-good.rules
alert2.Labels["label"] = "bar"
alert2.Labels["host"] = inst2
alert2.ID = hash(m2)
finished := make(chan struct{})

View file

@ -2,7 +2,6 @@ package main
import (
"context"
"errors"
"fmt"
"hash/fnv"
"sort"
@ -79,8 +78,6 @@ func (rr *RecordingRule) Close() {
metrics.UnregisterMetric(rr.metrics.errors.name)
}
var errDuplicate = errors.New("result contains metrics with the same labelset after applying rule labels")
// Exec executes RecordingRule expression via the given Querier.
func (rr *RecordingRule) Exec(ctx context.Context, q datasource.Querier, series bool) ([]prompbmarshal.TimeSeries, error) {
if !series {

View file

@ -2,6 +2,7 @@ package main
import (
"context"
"errors"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/datasource"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
@ -25,3 +26,5 @@ type Rule interface {
// such as metrics unregister
Close()
}
var errDuplicate = errors.New("result contains metrics with the same labelset after applying rule labels")

View file

@ -1,11 +1,11 @@
# OpenBSD ports
Tested with Release 6.7
Tested with Release 6.8
The VictoriaMetrics DB must be place in `/usr/ports/sysutils` directory
and the file `/usr/ports/infrastructure/db/user.list`
should be modified with a new line
```
855 _vmetrics _vmetrics sysutils/VictoriaMetrics
862 _vmetrics _vmetrics sysutils/VictoriaMetrics
```

View file

@ -4,7 +4,7 @@ COMMENT = fast, cost-effective and scalable time series database
GH_ACCOUNT = VictoriaMetrics
GH_PROJECT = VictoriaMetrics
GH_TAGNAME = v1.44.0
GH_TAGNAME = v1.46.0
CATEGORIES = sysutils
@ -19,8 +19,10 @@ WANTLIB = c pthread
USE_GMAKE = Yes
MODULES= lang/go
MODGO_GOPATH= ${MODGO_WORKSPACE}
MODULES = lang/go
MODGO_GOPATH = ${MODGO_WORKSPACE}
SUBST_VARS = VARBASE
do-build:
cd ${WRKSRC} && GOOS=openbsd ${MAKE_ENV} ${MAKE_PROGRAM} victoria-metrics-pure

View file

@ -1,2 +1,2 @@
SHA256 (VictoriaMetrics-1.44.0.tar.gz) = OIXIyqiijWvAPDgq5wMoDpv1rENcIOWIcXmz4T5v1lU=
SIZE (VictoriaMetrics-1.44.0.tar.gz) = 8898365
SHA256 (VictoriaMetrics-1.46.0.tar.gz) = s9wWGc/VTF9LrK7Ingg1qEhD0lN198hfUqXOut6CH3Y=
SIZE (VictoriaMetrics-1.46.0.tar.gz) = 9101866

View file

@ -1,3 +1,2 @@
VictoriaMetrics is fast,
cost-effective and scalable time-series database.
VictoriaMetrics is fast, cost-effective and scalable time-series
database.

View file

@ -1,13 +1,14 @@
@comment $OpenBSD$
@newgroup _vmetrics:855
@newuser _vmetrics:855:_vmetrics:daemon:VictoriaMetrics:${VARBASE}/db/vmetrics:/sbin/nologin
@comment $OpenBSD: PLIST,v$
@newgroup _vmetrics:862
@newuser _vmetrics:862:_vmetrics:daemon:VictoriaMetrics:${VARBASE}/db/vmetrics:/sbin/nologin
@sample ${SYSCONFDIR}/prometheus/
@rcscript ${RCDIR}/vmetrics
@bin bin/vmetricslogger.pl
@bin bin/vmetrics
@bin bin/vmetricsbackup
@bin bin/vmetricslogger.pl
share/doc/vmetrics/
share/doc/vmetrics/Articles.md
share/doc/vmetrics/CHANGELOG.md
share/doc/vmetrics/CaseStudies.md
share/doc/vmetrics/Cluster-VictoriaMetrics.md
share/doc/vmetrics/ExtendedPromQL.md