package csvimport import ( "net/http" "github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/netstorage" "github.com/VictoriaMetrics/VictoriaMetrics/lib/auth" parser "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/csvimport" "github.com/VictoriaMetrics/VictoriaMetrics/lib/tenantmetrics" "github.com/VictoriaMetrics/VictoriaMetrics/lib/writeconcurrencylimiter" "github.com/VictoriaMetrics/metrics" ) var ( rowsInserted = tenantmetrics.NewCounterMap(`vm_rows_inserted_total{type="csvimport"}`) rowsPerInsert = metrics.NewHistogram(`vm_rows_per_insert{type="csvimport"}`) ) // InsertHandler processes /api/v1/import/csv requests. func InsertHandler(at *auth.Token, req *http.Request) error { return writeconcurrencylimiter.Do(func() error { return parser.ParseStream(req, func(rows []parser.Row) error { return insertRows(at, rows) }) }) } func insertRows(at *auth.Token, rows []parser.Row) error { ctx := netstorage.GetInsertCtx() defer netstorage.PutInsertCtx(ctx) ctx.Reset() // This line is required for initializing ctx internals. for i := range rows { r := &rows[i] ctx.Labels = ctx.Labels[:0] ctx.AddLabel("", r.Metric) for j := range r.Tags { tag := &r.Tags[j] ctx.AddLabel(tag.Key, tag.Value) } if err := ctx.WriteDataPoint(at, ctx.Labels, r.Timestamp, r.Value); err != nil { return err } } rowsInserted.Get(at).Add(len(rows)) rowsPerInsert.Update(float64(len(rows))) return ctx.FlushBufs() }