lib/mergeset: move storageBlock from inmemoryPart to a sync.Pool

The lifetime of storageBlock is much shorter comparing to the lifetime of inmemoryPart,
so sync.Pool usage should reduce overall memory usage and improve performance
because of better locality of reference when marshaling inmemoryBlock to inmemoryPart.

https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2247
This commit is contained in:
Aliaksandr Valialkin 2022-03-03 14:38:03 +02:00
parent b47f18f555
commit f4e466955d
No known key found for this signature in database
GPG key ID: A72BEC6CD3D0DED1
2 changed files with 22 additions and 7 deletions

View file

@ -148,6 +148,21 @@ func (sb *storageBlock) Reset() {
sb.lensData = sb.lensData[:0]
}
func getStorageBlock() *storageBlock {
v := storageBlockPool.Get()
if v == nil {
return &storageBlock{}
}
return v.(*storageBlock)
}
func putStorageBlock(sb *storageBlock) {
sb.Reset()
storageBlockPool.Put(sb)
}
var storageBlockPool sync.Pool
type marshalType uint8
const (

View file

@ -10,7 +10,6 @@ import (
type inmemoryPart struct {
ph partHeader
sb storageBlock
bh blockHeader
mr metaindexRow
@ -28,7 +27,6 @@ type inmemoryPart struct {
func (mp *inmemoryPart) Reset() {
mp.ph.Reset()
mp.sb.Reset()
mp.bh.Reset()
mp.mr.Reset()
@ -47,25 +45,27 @@ func (mp *inmemoryPart) Reset() {
// Init initializes mp from ib.
func (mp *inmemoryPart) Init(ib *inmemoryBlock) {
mp.Reset()
sb := getStorageBlock()
defer putStorageBlock(sb)
// Use the minimum possible compressLevel for compressing inmemoryPart,
// since it will be merged into file part soon.
// See https://github.com/facebook/zstd/releases/tag/v1.3.4 for details about negative compression level
compressLevel := -5
mp.bh.firstItem, mp.bh.commonPrefix, mp.bh.itemsCount, mp.bh.marshalType = ib.MarshalUnsortedData(&mp.sb, mp.bh.firstItem[:0], mp.bh.commonPrefix[:0], compressLevel)
mp.bh.firstItem, mp.bh.commonPrefix, mp.bh.itemsCount, mp.bh.marshalType = ib.MarshalUnsortedData(sb, mp.bh.firstItem[:0], mp.bh.commonPrefix[:0], compressLevel)
mp.ph.itemsCount = uint64(len(ib.items))
mp.ph.blocksCount = 1
mp.ph.firstItem = append(mp.ph.firstItem[:0], ib.items[0].String(ib.data)...)
mp.ph.lastItem = append(mp.ph.lastItem[:0], ib.items[len(ib.items)-1].String(ib.data)...)
fs.MustWriteData(&mp.itemsData, mp.sb.itemsData)
fs.MustWriteData(&mp.itemsData, sb.itemsData)
mp.bh.itemsBlockOffset = 0
mp.bh.itemsBlockSize = uint32(len(mp.sb.itemsData))
mp.bh.itemsBlockSize = uint32(len(mp.itemsData.B))
fs.MustWriteData(&mp.lensData, mp.sb.lensData)
fs.MustWriteData(&mp.lensData, sb.lensData)
mp.bh.lensBlockOffset = 0
mp.bh.lensBlockSize = uint32(len(mp.sb.lensData))
mp.bh.lensBlockSize = uint32(len(mp.lensData.B))
mp.unpackedIndexBlockBuf = mp.bh.Marshal(mp.unpackedIndexBlockBuf[:0])
mp.packedIndexBlockBuf = encoding.CompressZSTDLevel(mp.packedIndexBlockBuf[:0], mp.unpackedIndexBlockBuf, 0)