mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2025-03-11 15:34:56 +00:00
vmalert: reduce allocations for Prometheus resp parse (#3435)
Method `metrics()` now pre-allocates slices for labels and results from query responses. This reduces the number of allocations on the hot path for instant requests. Signed-off-by: hagen1778 <roman@victoriametrics.com>
This commit is contained in:
parent
9ac1174493
commit
a922308438
4 changed files with 70 additions and 17 deletions
|
@ -54,6 +54,19 @@ func (m *Metric) SetLabel(key, value string) {
|
|||
m.AddLabel(key, value)
|
||||
}
|
||||
|
||||
// SetLabels sets the given map as Metric labels
|
||||
func (m *Metric) SetLabels(ls map[string]string) {
|
||||
var i int
|
||||
m.Labels = make([]Label, len(ls))
|
||||
for k, v := range ls {
|
||||
m.Labels[i] = Label{
|
||||
Name: k,
|
||||
Value: v,
|
||||
}
|
||||
i++
|
||||
}
|
||||
}
|
||||
|
||||
// AddLabel appends the given label to the label set
|
||||
func (m *Metric) AddLabel(key, value string) {
|
||||
m.Labels = append(m.Labels, Label{Name: key, Value: value})
|
||||
|
|
|
@ -32,19 +32,17 @@ type promInstant struct {
|
|||
}
|
||||
|
||||
func (r promInstant) metrics() ([]Metric, error) {
|
||||
var result []Metric
|
||||
result := make([]Metric, len(r.Result))
|
||||
for i, res := range r.Result {
|
||||
f, err := strconv.ParseFloat(res.TV[1].(string), 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("metric %v, unable to parse float64 from %s: %w", res, res.TV[1], err)
|
||||
}
|
||||
var m Metric
|
||||
for k, v := range r.Result[i].Labels {
|
||||
m.AddLabel(k, v)
|
||||
}
|
||||
m.SetLabels(res.Labels)
|
||||
m.Timestamps = append(m.Timestamps, int64(res.TV[0].(float64)))
|
||||
m.Values = append(m.Values, f)
|
||||
result = append(result, m)
|
||||
result[i] = m
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
|
20
app/vmalert/datasource/vm_prom_api_test.go
Normal file
20
app/vmalert/datasource/vm_prom_api_test.go
Normal file
|
@ -0,0 +1,20 @@
|
|||
package datasource
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func BenchmarkMetrics(b *testing.B) {
|
||||
payload := []byte(`[{"metric":{"__name__":"vm_rows"},"value":[1583786142,"13763"]},{"metric":{"__name__":"vm_requests", "foo":"bar", "baz": "qux"},"value":[1583786140,"2000"]}]`)
|
||||
|
||||
var pi promInstant
|
||||
if err := json.Unmarshal(payload, &pi.Result); err != nil {
|
||||
b.Fatalf(err.Error())
|
||||
}
|
||||
b.Run("Instant", func(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, _ = pi.metrics()
|
||||
}
|
||||
})
|
||||
}
|
|
@ -7,6 +7,7 @@ import (
|
|||
"net/http/httptest"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
|
@ -74,7 +75,7 @@ func TestVMInstantQuery(t *testing.T) {
|
|||
case 5:
|
||||
w.Write([]byte(`{"status":"success","data":{"resultType":"matrix"}}`))
|
||||
case 6:
|
||||
w.Write([]byte(`{"status":"success","data":{"resultType":"vector","result":[{"metric":{"__name__":"vm_rows"},"value":[1583786142,"13763"]},{"metric":{"__name__":"vm_requests"},"value":[1583786140,"2000"]}]}}`))
|
||||
w.Write([]byte(`{"status":"success","data":{"resultType":"vector","result":[{"metric":{"__name__":"vm_rows","foo":"bar"},"value":[1583786142,"13763"]},{"metric":{"__name__":"vm_requests","foo":"baz"},"value":[1583786140,"2000"]}]}}`))
|
||||
case 7:
|
||||
w.Write([]byte(`{"status":"success","data":{"resultType":"scalar","result":[1583786142, "1"]}}`))
|
||||
}
|
||||
|
@ -115,19 +116,17 @@ func TestVMInstantQuery(t *testing.T) {
|
|||
}
|
||||
expected := []Metric{
|
||||
{
|
||||
Labels: []Label{{Value: "vm_rows", Name: "__name__"}},
|
||||
Labels: []Label{{Value: "vm_rows", Name: "__name__"}, {Value: "bar", Name: "foo"}},
|
||||
Timestamps: []int64{1583786142},
|
||||
Values: []float64{13763},
|
||||
},
|
||||
{
|
||||
Labels: []Label{{Value: "vm_requests", Name: "__name__"}},
|
||||
Labels: []Label{{Value: "vm_requests", Name: "__name__"}, {Value: "baz", Name: "foo"}},
|
||||
Timestamps: []int64{1583786140},
|
||||
Values: []float64{2000},
|
||||
},
|
||||
}
|
||||
if !reflect.DeepEqual(m, expected) {
|
||||
t.Fatalf("unexpected metric %+v want %+v", m, expected)
|
||||
}
|
||||
metricsEqual(t, m, expected)
|
||||
|
||||
m, req, err := pq.Query(ctx, query, ts) // 7 - scalar
|
||||
if err != nil {
|
||||
|
@ -158,13 +157,36 @@ func TestVMInstantQuery(t *testing.T) {
|
|||
if len(m) != 1 {
|
||||
t.Fatalf("expected 1 metric got %d in %+v", len(m), m)
|
||||
}
|
||||
exp := Metric{
|
||||
Labels: []Label{{Value: "constantLine(10)", Name: "name"}},
|
||||
Timestamps: []int64{1611758403},
|
||||
Values: []float64{10},
|
||||
exp := []Metric{
|
||||
{
|
||||
Labels: []Label{{Value: "constantLine(10)", Name: "name"}},
|
||||
Timestamps: []int64{1611758403},
|
||||
Values: []float64{10},
|
||||
},
|
||||
}
|
||||
if !reflect.DeepEqual(m[0], exp) {
|
||||
t.Fatalf("unexpected metric %+v want %+v", m[0], expected)
|
||||
metricsEqual(t, m, exp)
|
||||
}
|
||||
|
||||
func metricsEqual(t *testing.T, gotM, expectedM []Metric) {
|
||||
for i, exp := range expectedM {
|
||||
got := gotM[i]
|
||||
gotTS, expTS := got.Timestamps, exp.Timestamps
|
||||
if !reflect.DeepEqual(gotTS, expTS) {
|
||||
t.Fatalf("unexpected timestamps %+v want %+v", gotTS, expTS)
|
||||
}
|
||||
gotV, expV := got.Values, exp.Values
|
||||
if !reflect.DeepEqual(gotV, expV) {
|
||||
t.Fatalf("unexpected values %+v want %+v", gotV, expV)
|
||||
}
|
||||
sort.Slice(got.Labels, func(i, j int) bool {
|
||||
return got.Labels[i].Name < got.Labels[j].Name
|
||||
})
|
||||
sort.Slice(exp.Labels, func(i, j int) bool {
|
||||
return exp.Labels[i].Name < exp.Labels[j].Name
|
||||
})
|
||||
if !reflect.DeepEqual(exp.Labels, got.Labels) {
|
||||
t.Fatalf("unexpected labels %+v want %+v", got.Labels, exp.Labels)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue