mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2025-01-20 15:16:42 +00:00
fbfd7415da
It allows to modify exist series values. User must write modified series into vminsert API /insert/0/prometheus/api/v1/update/series vminsert will generate id and add it to the series as __generation_id label. Modified series merged at vmselect side. Only last series modify request at given time range will be applied. Modification request could be exported with the following API request: `curl localhost:8481/select/0/prometheus/api/v1/export -g -d 'reduce_mem_usage=true' -d 'match[]={__generation_id!=""}'` https://github.com/VictoriaMetrics/VictoriaMetrics/issues/844 adds guide allow single datapoint modification vmselectapi: prevent MetricBlockRef corruption Modofying of MetricName byte slice may result into MetricBlockRef corruption, since `ctx.mb.MetricName` is a pointer to `MetricBlockRef.MetricName`. Signed-off-by: hagen1778 <roman@victoriametrics.com> Revert "vmselectapi: prevent MetricBlockRef corruption" This reverts commit cf36bfa1895885fcc7dc2673248ee56c78180ea0. app/vmstorage/servers: properly copy MetricName into MetricBlock inside blockIterator.NextBlock This should fix the issue atcf36bfa189
(cherry picked from commit916f1ab86c
) app/vmselect: correctly update single datapoint at merge app/vmselect: adds mutex for series update map previously it was sync api, but function signature was changed for performance optimizations
80 lines
2.7 KiB
Go
80 lines
2.7 KiB
Go
package seriesupdate
|
|
|
|
import (
|
|
"net/http"
|
|
"strconv"
|
|
"time"
|
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/netstorage"
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/auth"
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
|
parser "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/vmimport"
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/tenantmetrics"
|
|
"github.com/VictoriaMetrics/metrics"
|
|
)
|
|
|
|
var (
|
|
rowsInserted = metrics.NewCounter(`vm_rows_inserted_total{type="update_series"}`)
|
|
rowsTenantInserted = tenantmetrics.NewCounterMap(`vm_tenant_inserted_rows_total{type="update_series"}`)
|
|
rowsPerInsert = metrics.NewHistogram(`vm_rows_per_insert{type="update_series"}`)
|
|
)
|
|
|
|
// Returns local unique generationID.
|
|
func generateUniqueGenerationID() []byte {
|
|
nextID := time.Now().UnixNano()
|
|
return []byte(strconv.FormatInt(nextID, 10))
|
|
}
|
|
|
|
// InsertHandler processes `/api/v1/update/series` request.
|
|
func InsertHandler(at *auth.Token, req *http.Request) error {
|
|
isGzipped := req.Header.Get("Content-Encoding") == "gzip"
|
|
return parser.ParseStream(req.Body, isGzipped, func(rows []parser.Row) error {
|
|
return insertRows(at, rows)
|
|
})
|
|
}
|
|
|
|
func insertRows(at *auth.Token, rows []parser.Row) error {
|
|
ctx := netstorage.GetInsertCtx()
|
|
defer netstorage.PutInsertCtx(ctx)
|
|
|
|
ctx.Reset() // This line is required for initializing ctx internals.
|
|
rowsTotal := 0
|
|
generationID := generateUniqueGenerationID()
|
|
for i := range rows {
|
|
r := &rows[i]
|
|
rowsTotal += len(r.Values)
|
|
ctx.Labels = ctx.Labels[:0]
|
|
for j := range r.Tags {
|
|
tag := &r.Tags[j]
|
|
ctx.AddLabelBytes(tag.Key, tag.Value)
|
|
}
|
|
|
|
if len(ctx.Labels) == 0 {
|
|
// Skip metric without labels.
|
|
continue
|
|
}
|
|
// there is no need in relabeling and extra_label adding
|
|
// since modified series already passed this phase during ingestion,
|
|
// and it may lead to unexpected result for user.
|
|
ctx.AddLabelBytes([]byte(`__generation_id`), generationID)
|
|
ctx.MetricNameBuf = storage.MarshalMetricNameRaw(ctx.MetricNameBuf[:0], at.AccountID, at.ProjectID, ctx.Labels)
|
|
values := r.Values
|
|
timestamps := r.Timestamps
|
|
if len(timestamps) != len(values) {
|
|
logger.Panicf("BUG: len(timestamps)=%d must match len(values)=%d", len(timestamps), len(values))
|
|
}
|
|
atLocal := ctx.GetLocalAuthToken(at)
|
|
storageNodeIdx := ctx.GetStorageNodeIdx(atLocal, ctx.Labels)
|
|
for j, value := range values {
|
|
timestamp := timestamps[j]
|
|
if err := ctx.WriteDataPointExt(storageNodeIdx, ctx.MetricNameBuf, timestamp, value); err != nil {
|
|
return err
|
|
}
|
|
}
|
|
}
|
|
rowsInserted.Add(rowsTotal)
|
|
rowsTenantInserted.Get(at).Add(rowsTotal)
|
|
rowsPerInsert.Update(float64(rowsTotal))
|
|
return ctx.FlushBufs()
|
|
}
|