diff --git a/app/vmagent/remotewrite/client.go b/app/vmagent/remotewrite/client.go index da6a8960b5..d556f6cb34 100644 --- a/app/vmagent/remotewrite/client.go +++ b/app/vmagent/remotewrite/client.go @@ -345,13 +345,12 @@ again: if statusCode == 409 || statusCode == 400 { body, err := ioutil.ReadAll(resp.Body) _ = resp.Body.Close() - l := logger.WithThrottler("remoteWriteRejected", 5*time.Second) if err != nil { - l.Errorf("sending a block with size %d bytes to %q was rejected (skipping the block): status code %d; "+ + remoteWriteRejectedLogger.Errorf("sending a block with size %d bytes to %q was rejected (skipping the block): status code %d; "+ "failed to read response body: %s", len(block), c.sanitizedURL, statusCode, err) } else { - l.Errorf("sending a block with size %d bytes to %q was rejected (skipping the block): status code %d; response body: %s", + remoteWriteRejectedLogger.Errorf("sending a block with size %d bytes to %q was rejected (skipping the block): status code %d; response body: %s", len(block), c.sanitizedURL, statusCode, string(body)) } // Just drop block on 409 and 400 status codes like Prometheus does. @@ -388,6 +387,8 @@ again: goto again } +var remoteWriteRejectedLogger = logger.WithThrottler("remoteWriteRejected", 5*time.Second) + type rateLimiter struct { perSecondLimit int64 diff --git a/lib/storage/partition.go b/lib/storage/partition.go index ddd13fc48e..969211c83e 100644 --- a/lib/storage/partition.go +++ b/lib/storage/partition.go @@ -836,8 +836,7 @@ func (pt *partition) ForceMergeAllParts() error { maxOutBytes := fs.MustGetFreeSpace(pt.bigPartsPath) if newPartSize > maxOutBytes { freeSpaceNeededBytes := newPartSize - maxOutBytes - logger.WithThrottler("forceMerge", time.Minute).Warnf("cannot initiate force merge for the partition %s; additional space needed: %d bytes", - pt.name, freeSpaceNeededBytes) + forceMergeLogger.Warnf("cannot initiate force merge for the partition %s; additional space needed: %d bytes", pt.name, freeSpaceNeededBytes) return nil } @@ -848,6 +847,8 @@ func (pt *partition) ForceMergeAllParts() error { return nil } +var forceMergeLogger = logger.WithThrottler("forceMerge", time.Minute) + func appendAllPartsToMerge(dst, src []*partWrapper) []*partWrapper { for _, pw := range src { if pw.isInMerge { diff --git a/lib/storage/storage.go b/lib/storage/storage.go index 26091a55e8..cde974619c 100644 --- a/lib/storage/storage.go +++ b/lib/storage/storage.go @@ -1861,7 +1861,7 @@ func (s *Storage) add(rows []rawRow, dstMrs []*MetricRow, mrs []MetricRow, preci atomic.AddUint64(&s.slowRowInserts, slowInsertsCount) } if firstWarn != nil { - logger.WithThrottler("storageAddRows", 5*time.Second).Warnf("warn occurred during rows addition: %s", firstWarn) + storageAddRowsLogger.Warnf("warn occurred during rows addition: %s", firstWarn) } dstMrs = dstMrs[:j] rows = rows[:j] @@ -1881,6 +1881,8 @@ func (s *Storage) add(rows []rawRow, dstMrs []*MetricRow, mrs []MetricRow, preci return nil } +var storageAddRowsLogger = logger.WithThrottler("storageAddRows", 5*time.Second) + func (s *Storage) registerSeriesCardinality(metricID uint64, metricNameRaw []byte) error { if sl := s.hourlySeriesLimiter; sl != nil && !sl.Add(metricID) { atomic.AddUint64(&s.hourlySeriesLimitRowsDropped, 1)