From 10476738a861746943eae04fc5785f50d77198b3 Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Tue, 8 Feb 2022 00:11:19 +0200 Subject: [PATCH] lib/blockcache: increase the lifetime for rarely accessed blocks from 2 minutes to 5 minutes This should improve data ingestion speed if time series samples are ingested with interval bigger than 2 minutes. The actual interval could exceed 2 minutes if the original interval between samples doesn't exceed 2 minutes in the case of slow inserts. Slow inserts may appear in the following cases: * Big number of new time series are pushed to VictoriaMetrics, so they couldn't be registered in 2 minutes. * MetricName->tsid cache reset on indexdb rotation or due to unclean shutdown. In this case VictoriaMetrics needs to load MetricName->tsid entries for all the incoming series from IndexDB. IndexDB uses the block cache for increasing lookup performance. If the cache has no the needed block, then IndexDB reads and unpacks the block from disk. This requires an extra disk read IO and CPU. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1401 Updates https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2007 This also should increase performance for periodically executed queries with intervals from 2 minutes to 5 minutes. See the previous similar commit - 43103be011955d398a899fbea71e4116eb0d6412 It is possible that the timeout can be increased further. Let's collect production numbers for this change so the timeout could be adjusted further. --- lib/blockcache/blockcache.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/blockcache/blockcache.go b/lib/blockcache/blockcache.go index 0b2c5e7483..e3cc45465b 100644 --- a/lib/blockcache/blockcache.go +++ b/lib/blockcache/blockcache.go @@ -92,7 +92,7 @@ func (c *Cache) updateSizeBytes(n int) { // cleaner periodically cleans least recently used entries in c. func (c *Cache) cleaner() { - ticker := time.NewTicker(30 * time.Second) + ticker := time.NewTicker(57 * time.Second) defer ticker.Stop() perKeyMissesTicker := time.NewTicker(2 * time.Minute) defer perKeyMissesTicker.Stop() @@ -113,9 +113,9 @@ func (c *Cache) cleanByTimeout() { c.mu.Lock() for _, pes := range c.m { for offset, e := range pes { - // Delete items accessed more than two minutes ago. + // Delete items accessed more than five minutes ago. // This time should be enough for repeated queries. - if currentTime-atomic.LoadUint64(&e.lastAccessTime) > 2*60 { + if currentTime-atomic.LoadUint64(&e.lastAccessTime) > 5*60 { c.updateSizeBytes(-e.block.SizeBytes()) delete(pes, offset) // do not delete the entry from c.perKeyMisses, since it is removed by Cache.cleaner later.