mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2024-11-21 14:44:00 +00:00
lib/blockcache: split the cache into multiple shards
This should reduce contention on cache mutex on hosts with many CPU cores, which, in turn, should increase overall throughput for the cache. This should help https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2007
This commit is contained in:
parent
a0a56d6c1c
commit
fae3040868
1 changed files with 114 additions and 28 deletions
|
@ -5,6 +5,7 @@ import (
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -12,6 +13,103 @@ import (
|
||||||
//
|
//
|
||||||
// Call NewCache() for creating new Cache.
|
// Call NewCache() for creating new Cache.
|
||||||
type Cache struct {
|
type Cache struct {
|
||||||
|
shards []*cache
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewCache creates new cache.
|
||||||
|
//
|
||||||
|
// Cache size in bytes is limited by the value returned by getMaxSizeBytes() callback.
|
||||||
|
func NewCache(getMaxSizeBytes func() int) *Cache {
|
||||||
|
shardsCount := cgroup.AvailableCPUs()
|
||||||
|
shards := make([]*cache, shardsCount)
|
||||||
|
getMaxShardBytes := func() int {
|
||||||
|
n := getMaxSizeBytes()
|
||||||
|
return n / shardsCount
|
||||||
|
}
|
||||||
|
for i := range shards {
|
||||||
|
shards[i] = newCache(getMaxShardBytes)
|
||||||
|
}
|
||||||
|
return &Cache{
|
||||||
|
shards: shards,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveBlocksForPart removes all the blocks for the given part from the cache.
|
||||||
|
func (c *Cache) RemoveBlocksForPart(p interface{}) {
|
||||||
|
for _, shard := range c.shards {
|
||||||
|
shard.RemoveBlocksForPart(p)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetBlock returns a block for the given key k from c.
|
||||||
|
func (c *Cache) GetBlock(k Key) Block {
|
||||||
|
h := fastHashUint64(k.Offset)
|
||||||
|
idx := h % uint64(len(c.shards))
|
||||||
|
shard := c.shards[idx]
|
||||||
|
return shard.GetBlock(k)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PutBlock puts the given block b under the given key k into c.
|
||||||
|
func (c *Cache) PutBlock(k Key, b Block) {
|
||||||
|
h := fastHashUint64(k.Offset)
|
||||||
|
idx := h % uint64(len(c.shards))
|
||||||
|
shard := c.shards[idx]
|
||||||
|
shard.PutBlock(k, b)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Len returns the number of blocks in the cache c.
|
||||||
|
func (c *Cache) Len() int {
|
||||||
|
n := 0
|
||||||
|
for _, shard := range c.shards {
|
||||||
|
n += shard.Len()
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
// SizeBytes returns an approximate size in bytes of all the blocks stored in the cache c.
|
||||||
|
func (c *Cache) SizeBytes() int {
|
||||||
|
n := 0
|
||||||
|
for _, shard := range c.shards {
|
||||||
|
n += shard.SizeBytes()
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
// SizeMaxBytes returns the max allowed size in bytes for c.
|
||||||
|
func (c *Cache) SizeMaxBytes() int {
|
||||||
|
n := 0
|
||||||
|
for _, shard := range c.shards {
|
||||||
|
n += shard.SizeMaxBytes()
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
// Requests returns the number of requests served by c.
|
||||||
|
func (c *Cache) Requests() uint64 {
|
||||||
|
n := uint64(0)
|
||||||
|
for _, shard := range c.shards {
|
||||||
|
n += shard.Requests()
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
// Misses returns the number of cache misses for c.
|
||||||
|
func (c *Cache) Misses() uint64 {
|
||||||
|
n := uint64(0)
|
||||||
|
for _, shard := range c.shards {
|
||||||
|
n += shard.Misses()
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func fastHashUint64(x uint64) uint64 {
|
||||||
|
x ^= x >> 12 // a
|
||||||
|
x ^= x << 25 // b
|
||||||
|
x ^= x >> 27 // c
|
||||||
|
return x * 2685821657736338717
|
||||||
|
}
|
||||||
|
|
||||||
|
type cache struct {
|
||||||
// Atomically updated fields must go first in the struct, so they are properly
|
// Atomically updated fields must go first in the struct, so they are properly
|
||||||
// aligned to 8 bytes on 32-bit architectures.
|
// aligned to 8 bytes on 32-bit architectures.
|
||||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/212
|
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/212
|
||||||
|
@ -61,11 +159,8 @@ type cacheEntry struct {
|
||||||
block Block
|
block Block
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewCache creates new cache.
|
func newCache(getMaxSizeBytes func() int) *cache {
|
||||||
//
|
var c cache
|
||||||
// Cache size in bytes is limited by the value returned by getMaxSizeBytes() callback.
|
|
||||||
func NewCache(getMaxSizeBytes func() int) *Cache {
|
|
||||||
var c Cache
|
|
||||||
c.getMaxSizeBytes = getMaxSizeBytes
|
c.getMaxSizeBytes = getMaxSizeBytes
|
||||||
c.m = make(map[interface{}]map[uint64]*cacheEntry)
|
c.m = make(map[interface{}]map[uint64]*cacheEntry)
|
||||||
c.perKeyMisses = make(map[Key]int)
|
c.perKeyMisses = make(map[Key]int)
|
||||||
|
@ -73,25 +168,23 @@ func NewCache(getMaxSizeBytes func() int) *Cache {
|
||||||
return &c
|
return &c
|
||||||
}
|
}
|
||||||
|
|
||||||
// RemoveBlocksForPart removes all the blocks for the given part from the cache.
|
func (c *cache) RemoveBlocksForPart(p interface{}) {
|
||||||
func (c *Cache) RemoveBlocksForPart(p interface{}) {
|
|
||||||
c.mu.Lock()
|
c.mu.Lock()
|
||||||
sizeBytes := 0
|
sizeBytes := 0
|
||||||
for _, e := range c.m[p] {
|
for _, e := range c.m[p] {
|
||||||
sizeBytes += e.block.SizeBytes()
|
sizeBytes += e.block.SizeBytes()
|
||||||
// do not delete the entry from c.perKeyMisses, since it is removed by Cache.cleaner later.
|
// do not delete the entry from c.perKeyMisses, since it is removed by cache.cleaner later.
|
||||||
}
|
}
|
||||||
c.updateSizeBytes(-sizeBytes)
|
c.updateSizeBytes(-sizeBytes)
|
||||||
delete(c.m, p)
|
delete(c.m, p)
|
||||||
c.mu.Unlock()
|
c.mu.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Cache) updateSizeBytes(n int) {
|
func (c *cache) updateSizeBytes(n int) {
|
||||||
atomic.AddInt64(&c.sizeBytes, int64(n))
|
atomic.AddInt64(&c.sizeBytes, int64(n))
|
||||||
}
|
}
|
||||||
|
|
||||||
// cleaner periodically cleans least recently used entries in c.
|
func (c *cache) cleaner() {
|
||||||
func (c *Cache) cleaner() {
|
|
||||||
ticker := time.NewTicker(57 * time.Second)
|
ticker := time.NewTicker(57 * time.Second)
|
||||||
defer ticker.Stop()
|
defer ticker.Stop()
|
||||||
perKeyMissesTicker := time.NewTicker(2 * time.Minute)
|
perKeyMissesTicker := time.NewTicker(2 * time.Minute)
|
||||||
|
@ -108,7 +201,7 @@ func (c *Cache) cleaner() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Cache) cleanByTimeout() {
|
func (c *cache) cleanByTimeout() {
|
||||||
// Delete items accessed more than five minutes ago.
|
// Delete items accessed more than five minutes ago.
|
||||||
// This time should be enough for repeated queries.
|
// This time should be enough for repeated queries.
|
||||||
lastAccessTime := fasttime.UnixTimestamp() - 5*60
|
lastAccessTime := fasttime.UnixTimestamp() - 5*60
|
||||||
|
@ -120,14 +213,13 @@ func (c *Cache) cleanByTimeout() {
|
||||||
if lastAccessTime > atomic.LoadUint64(&e.lastAccessTime) {
|
if lastAccessTime > atomic.LoadUint64(&e.lastAccessTime) {
|
||||||
c.updateSizeBytes(-e.block.SizeBytes())
|
c.updateSizeBytes(-e.block.SizeBytes())
|
||||||
delete(pes, offset)
|
delete(pes, offset)
|
||||||
// do not delete the entry from c.perKeyMisses, since it is removed by Cache.cleaner later.
|
// do not delete the entry from c.perKeyMisses, since it is removed by cache.cleaner later.
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetBlock returns a block for the given key k from c.
|
func (c *cache) GetBlock(k Key) Block {
|
||||||
func (c *Cache) GetBlock(k Key) Block {
|
|
||||||
atomic.AddUint64(&c.requests, 1)
|
atomic.AddUint64(&c.requests, 1)
|
||||||
var e *cacheEntry
|
var e *cacheEntry
|
||||||
c.mu.RLock()
|
c.mu.RLock()
|
||||||
|
@ -152,8 +244,7 @@ func (c *Cache) GetBlock(k Key) Block {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// PutBlock puts the given block b under the given key k into c.
|
func (c *cache) PutBlock(k Key, b Block) {
|
||||||
func (c *Cache) PutBlock(k Key, b Block) {
|
|
||||||
c.mu.RLock()
|
c.mu.RLock()
|
||||||
// If the entry wasn't accessed yet (e.g. c.perKeyMisses[k] == 0), then cache it, since it is likely it will be accessed soon.
|
// If the entry wasn't accessed yet (e.g. c.perKeyMisses[k] == 0), then cache it, since it is likely it will be accessed soon.
|
||||||
// Do not cache the entry only if there was only a single unsuccessful attempt to access it.
|
// Do not cache the entry only if there was only a single unsuccessful attempt to access it.
|
||||||
|
@ -191,7 +282,7 @@ func (c *Cache) PutBlock(k Key, b Block) {
|
||||||
for offset, e := range pes {
|
for offset, e := range pes {
|
||||||
c.updateSizeBytes(-e.block.SizeBytes())
|
c.updateSizeBytes(-e.block.SizeBytes())
|
||||||
delete(pes, offset)
|
delete(pes, offset)
|
||||||
// do not delete the entry from c.perKeyMisses, since it is removed by Cache.cleaner later.
|
// do not delete the entry from c.perKeyMisses, since it is removed by cache.cleaner later.
|
||||||
if c.SizeBytes() < maxSizeBytes {
|
if c.SizeBytes() < maxSizeBytes {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -200,8 +291,7 @@ func (c *Cache) PutBlock(k Key, b Block) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Len returns the number of blocks in the cache c.
|
func (c *cache) Len() int {
|
||||||
func (c *Cache) Len() int {
|
|
||||||
c.mu.RLock()
|
c.mu.RLock()
|
||||||
n := 0
|
n := 0
|
||||||
for _, m := range c.m {
|
for _, m := range c.m {
|
||||||
|
@ -211,22 +301,18 @@ func (c *Cache) Len() int {
|
||||||
return n
|
return n
|
||||||
}
|
}
|
||||||
|
|
||||||
// SizeBytes returns an approximate size in bytes of all the blocks stored in the cache c.
|
func (c *cache) SizeBytes() int {
|
||||||
func (c *Cache) SizeBytes() int {
|
|
||||||
return int(atomic.LoadInt64(&c.sizeBytes))
|
return int(atomic.LoadInt64(&c.sizeBytes))
|
||||||
}
|
}
|
||||||
|
|
||||||
// SizeMaxBytes returns the max allowed size in bytes for c.
|
func (c *cache) SizeMaxBytes() int {
|
||||||
func (c *Cache) SizeMaxBytes() int {
|
|
||||||
return c.getMaxSizeBytes()
|
return c.getMaxSizeBytes()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Requests returns the number of requests served by c.
|
func (c *cache) Requests() uint64 {
|
||||||
func (c *Cache) Requests() uint64 {
|
|
||||||
return atomic.LoadUint64(&c.requests)
|
return atomic.LoadUint64(&c.requests)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Misses returns the number of cache misses for c.
|
func (c *cache) Misses() uint64 {
|
||||||
func (c *Cache) Misses() uint64 {
|
|
||||||
return atomic.LoadUint64(&c.misses)
|
return atomic.LoadUint64(&c.misses)
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue