lib/storage: properly update vm_slow_row_inserts_total metric when importing multiple data points per time series at once

Previously the `vm_slow_row_inserts_total` metric may be incremented multiple times for different data points per a single time series,
while only a single increment is needed when inserting the first data point for this time series.
This commit is contained in:
Aliaksandr Valialkin 2020-07-30 16:14:51 +03:00
parent bafd475f2c
commit 96039dcb40

View file

@ -1204,7 +1204,6 @@ func (s *Storage) add(rows []rawRow, mrs []MetricRow, precisionBits uint8) ([]ra
}
}
if pmrs != nil {
atomic.AddUint64(&s.slowRowInserts, uint64(len(pmrs.pmrs)))
// Sort pendingMetricRows by canonical metric name in order to speed up search via `is` in the loop below.
pendingMetricRows := pmrs.pmrs
sort.Slice(pendingMetricRows, func(i, j int) bool {
@ -1212,6 +1211,7 @@ func (s *Storage) add(rows []rawRow, mrs []MetricRow, precisionBits uint8) ([]ra
})
is := idb.getIndexSearch(0, 0, noDeadline)
prevMetricNameRaw = nil
var slowInsertsCount uint64
for i := range pendingMetricRows {
pmr := &pendingMetricRows[i]
mr := &pmr.mr
@ -1235,6 +1235,7 @@ func (s *Storage) add(rows []rawRow, mrs []MetricRow, precisionBits uint8) ([]ra
prevMetricNameRaw = mr.MetricNameRaw
continue
}
slowInsertsCount++
if err := is.GetOrCreateTSIDByName(&r.TSID, pmr.MetricName); err != nil {
// Do not stop adding rows on error - just skip invalid row.
// This guarantees that invalid rows don't prevent
@ -1249,6 +1250,7 @@ func (s *Storage) add(rows []rawRow, mrs []MetricRow, precisionBits uint8) ([]ra
}
idb.putIndexSearch(is)
putPendingMetricRows(pmrs)
atomic.AddUint64(&s.slowRowInserts, slowInsertsCount)
}
if firstWarn != nil {
logger.Errorf("warn occurred during rows addition: %s", firstWarn)