mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2025-02-09 15:27:11 +00:00
lib/{mergeset,storage}: properly update lastAccessTime
in index and data block cache entries
This commit is contained in:
parent
b297fec515
commit
62b041e90a
2 changed files with 11 additions and 35 deletions
|
@ -315,7 +315,7 @@ type inmemoryBlockCache struct {
|
||||||
requests uint64
|
requests uint64
|
||||||
misses uint64
|
misses uint64
|
||||||
|
|
||||||
m map[inmemoryBlockCacheKey]inmemoryBlockCacheEntry
|
m map[inmemoryBlockCacheKey]*inmemoryBlockCacheEntry
|
||||||
mu sync.RWMutex
|
mu sync.RWMutex
|
||||||
|
|
||||||
cleanerStopCh chan struct{}
|
cleanerStopCh chan struct{}
|
||||||
|
@ -346,7 +346,7 @@ type inmemoryBlockCacheEntry struct {
|
||||||
|
|
||||||
func newInmemoryBlockCache() *inmemoryBlockCache {
|
func newInmemoryBlockCache() *inmemoryBlockCache {
|
||||||
var ibc inmemoryBlockCache
|
var ibc inmemoryBlockCache
|
||||||
ibc.m = make(map[inmemoryBlockCacheKey]inmemoryBlockCacheEntry)
|
ibc.m = make(map[inmemoryBlockCacheKey]*inmemoryBlockCacheEntry)
|
||||||
|
|
||||||
ibc.cleanerStopCh = make(chan struct{})
|
ibc.cleanerStopCh = make(chan struct{})
|
||||||
ibc.cleanerWG.Add(1)
|
ibc.cleanerWG.Add(1)
|
||||||
|
@ -406,10 +406,10 @@ func (ibc *inmemoryBlockCache) Get(k inmemoryBlockCacheKey) *inmemoryBlock {
|
||||||
atomic.AddUint64(&ibc.requests, 1)
|
atomic.AddUint64(&ibc.requests, 1)
|
||||||
|
|
||||||
ibc.mu.RLock()
|
ibc.mu.RLock()
|
||||||
ibe, ok := ibc.m[k]
|
ibe := ibc.m[k]
|
||||||
ibc.mu.RUnlock()
|
ibc.mu.RUnlock()
|
||||||
|
|
||||||
if ok {
|
if ibe != nil {
|
||||||
currentTime := atomic.LoadUint64(¤tTimestamp)
|
currentTime := atomic.LoadUint64(¤tTimestamp)
|
||||||
if atomic.LoadUint64(&ibe.lastAccessTime) != currentTime {
|
if atomic.LoadUint64(&ibe.lastAccessTime) != currentTime {
|
||||||
atomic.StoreUint64(&ibe.lastAccessTime, currentTime)
|
atomic.StoreUint64(&ibe.lastAccessTime, currentTime)
|
||||||
|
@ -442,7 +442,7 @@ func (ibc *inmemoryBlockCache) Put(k inmemoryBlockCacheKey, ib *inmemoryBlock) b
|
||||||
}
|
}
|
||||||
|
|
||||||
// Store ib in the cache.
|
// Store ib in the cache.
|
||||||
ibe := inmemoryBlockCacheEntry{
|
ibe := &inmemoryBlockCacheEntry{
|
||||||
lastAccessTime: atomic.LoadUint64(¤tTimestamp),
|
lastAccessTime: atomic.LoadUint64(¤tTimestamp),
|
||||||
ib: ib,
|
ib: ib,
|
||||||
}
|
}
|
||||||
|
|
|
@ -173,8 +173,7 @@ type indexBlockCache struct {
|
||||||
requests uint64
|
requests uint64
|
||||||
misses uint64
|
misses uint64
|
||||||
|
|
||||||
m map[uint64]indexBlockCacheEntry
|
m map[uint64]*indexBlockCacheEntry
|
||||||
missesMap map[uint64]uint64
|
|
||||||
mu sync.RWMutex
|
mu sync.RWMutex
|
||||||
|
|
||||||
cleanerStopCh chan struct{}
|
cleanerStopCh chan struct{}
|
||||||
|
@ -192,8 +191,7 @@ type indexBlockCacheEntry struct {
|
||||||
|
|
||||||
func newIndexBlockCache() *indexBlockCache {
|
func newIndexBlockCache() *indexBlockCache {
|
||||||
var ibc indexBlockCache
|
var ibc indexBlockCache
|
||||||
ibc.m = make(map[uint64]indexBlockCacheEntry)
|
ibc.m = make(map[uint64]*indexBlockCacheEntry)
|
||||||
ibc.missesMap = make(map[uint64]uint64)
|
|
||||||
|
|
||||||
ibc.cleanerStopCh = make(chan struct{})
|
ibc.cleanerStopCh = make(chan struct{})
|
||||||
ibc.cleanerWG.Add(1)
|
ibc.cleanerWG.Add(1)
|
||||||
|
@ -261,10 +259,10 @@ func (ibc *indexBlockCache) Get(k uint64) *indexBlock {
|
||||||
atomic.AddUint64(&ibc.requests, 1)
|
atomic.AddUint64(&ibc.requests, 1)
|
||||||
|
|
||||||
ibc.mu.RLock()
|
ibc.mu.RLock()
|
||||||
ibe, ok := ibc.m[k]
|
ibe := ibc.m[k]
|
||||||
ibc.mu.RUnlock()
|
ibc.mu.RUnlock()
|
||||||
|
|
||||||
if ok {
|
if ibe != nil {
|
||||||
currentTime := atomic.LoadUint64(¤tTimestamp)
|
currentTime := atomic.LoadUint64(¤tTimestamp)
|
||||||
if atomic.LoadUint64(&ibe.lastAccessTime) != currentTime {
|
if atomic.LoadUint64(&ibe.lastAccessTime) != currentTime {
|
||||||
atomic.StoreUint64(&ibe.lastAccessTime, currentTime)
|
atomic.StoreUint64(&ibe.lastAccessTime, currentTime)
|
||||||
|
@ -272,22 +270,12 @@ func (ibc *indexBlockCache) Get(k uint64) *indexBlock {
|
||||||
return ibe.ib
|
return ibe.ib
|
||||||
}
|
}
|
||||||
atomic.AddUint64(&ibc.misses, 1)
|
atomic.AddUint64(&ibc.misses, 1)
|
||||||
ibc.mu.Lock()
|
|
||||||
ibc.missesMap[k]++
|
|
||||||
ibc.mu.Unlock()
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ibc *indexBlockCache) Put(k uint64, ib *indexBlock) bool {
|
func (ibc *indexBlockCache) Put(k uint64, ib *indexBlock) bool {
|
||||||
ibc.mu.Lock()
|
ibc.mu.Lock()
|
||||||
|
|
||||||
if ibc.missesMap[k] < 2 {
|
|
||||||
// Do not store infrequently accessed ib in the cache,
|
|
||||||
// so it don't evict frequently accessed items.
|
|
||||||
ibc.mu.Unlock()
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Clean superflouos cache entries.
|
// Clean superflouos cache entries.
|
||||||
if overflow := len(ibc.m) - getMaxCachedIndexBlocksPerPart(); overflow > 0 {
|
if overflow := len(ibc.m) - getMaxCachedIndexBlocksPerPart(); overflow > 0 {
|
||||||
// Remove 10% of items from the cache.
|
// Remove 10% of items from the cache.
|
||||||
|
@ -301,21 +289,9 @@ func (ibc *indexBlockCache) Put(k uint64, ib *indexBlock) bool {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if overflow := len(ibc.missesMap) - 8*getMaxCachedIndexBlocksPerPart(); overflow > 0 {
|
|
||||||
// Remove 10% of items from the cache.
|
|
||||||
overflow = int(float64(len(ibc.missesMap)) * 0.1)
|
|
||||||
for k := range ibc.missesMap {
|
|
||||||
delete(ibc.missesMap, k)
|
|
||||||
overflow--
|
|
||||||
if overflow == 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Store frequently requested ib in the cache.
|
// Store frequently requested ib in the cache.
|
||||||
delete(ibc.missesMap, k)
|
ibe := &indexBlockCacheEntry{
|
||||||
ibe := indexBlockCacheEntry{
|
|
||||||
lastAccessTime: atomic.LoadUint64(¤tTimestamp),
|
lastAccessTime: atomic.LoadUint64(¤tTimestamp),
|
||||||
ib: ib,
|
ib: ib,
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue