From e8fdb27625a2985a23f11332683d3a99f1e53832 Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin <valyala@victoriametrics.com> Date: Thu, 3 Mar 2022 14:38:03 +0200 Subject: [PATCH] lib/mergeset: move storageBlock from inmemoryPart to a sync.Pool The lifetime of storageBlock is much shorter comparing to the lifetime of inmemoryPart, so sync.Pool usage should reduce overall memory usage and improve performance because of better locality of reference when marshaling inmemoryBlock to inmemoryPart. https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2247 --- lib/mergeset/encoding.go | 15 +++++++++++++++ lib/mergeset/inmemory_part.go | 14 +++++++------- 2 files changed, 22 insertions(+), 7 deletions(-) diff --git a/lib/mergeset/encoding.go b/lib/mergeset/encoding.go index b7774d720b..7dd6dd3f92 100644 --- a/lib/mergeset/encoding.go +++ b/lib/mergeset/encoding.go @@ -148,6 +148,21 @@ func (sb *storageBlock) Reset() { sb.lensData = sb.lensData[:0] } +func getStorageBlock() *storageBlock { + v := storageBlockPool.Get() + if v == nil { + return &storageBlock{} + } + return v.(*storageBlock) +} + +func putStorageBlock(sb *storageBlock) { + sb.Reset() + storageBlockPool.Put(sb) +} + +var storageBlockPool sync.Pool + type marshalType uint8 const ( diff --git a/lib/mergeset/inmemory_part.go b/lib/mergeset/inmemory_part.go index 9b556fd0e3..3816fee24b 100644 --- a/lib/mergeset/inmemory_part.go +++ b/lib/mergeset/inmemory_part.go @@ -10,7 +10,6 @@ import ( type inmemoryPart struct { ph partHeader - sb storageBlock bh blockHeader mr metaindexRow @@ -28,7 +27,6 @@ type inmemoryPart struct { func (mp *inmemoryPart) Reset() { mp.ph.Reset() - mp.sb.Reset() mp.bh.Reset() mp.mr.Reset() @@ -47,25 +45,27 @@ func (mp *inmemoryPart) Reset() { // Init initializes mp from ib. func (mp *inmemoryPart) Init(ib *inmemoryBlock) { mp.Reset() + sb := getStorageBlock() + defer putStorageBlock(sb) // Use the minimum possible compressLevel for compressing inmemoryPart, // since it will be merged into file part soon. // See https://github.com/facebook/zstd/releases/tag/v1.3.4 for details about negative compression level compressLevel := -5 - mp.bh.firstItem, mp.bh.commonPrefix, mp.bh.itemsCount, mp.bh.marshalType = ib.MarshalUnsortedData(&mp.sb, mp.bh.firstItem[:0], mp.bh.commonPrefix[:0], compressLevel) + mp.bh.firstItem, mp.bh.commonPrefix, mp.bh.itemsCount, mp.bh.marshalType = ib.MarshalUnsortedData(sb, mp.bh.firstItem[:0], mp.bh.commonPrefix[:0], compressLevel) mp.ph.itemsCount = uint64(len(ib.items)) mp.ph.blocksCount = 1 mp.ph.firstItem = append(mp.ph.firstItem[:0], ib.items[0].String(ib.data)...) mp.ph.lastItem = append(mp.ph.lastItem[:0], ib.items[len(ib.items)-1].String(ib.data)...) - fs.MustWriteData(&mp.itemsData, mp.sb.itemsData) + fs.MustWriteData(&mp.itemsData, sb.itemsData) mp.bh.itemsBlockOffset = 0 - mp.bh.itemsBlockSize = uint32(len(mp.sb.itemsData)) + mp.bh.itemsBlockSize = uint32(len(mp.itemsData.B)) - fs.MustWriteData(&mp.lensData, mp.sb.lensData) + fs.MustWriteData(&mp.lensData, sb.lensData) mp.bh.lensBlockOffset = 0 - mp.bh.lensBlockSize = uint32(len(mp.sb.lensData)) + mp.bh.lensBlockSize = uint32(len(mp.lensData.B)) mp.unpackedIndexBlockBuf = mp.bh.Marshal(mp.unpackedIndexBlockBuf[:0]) mp.packedIndexBlockBuf = encoding.CompressZSTDLevel(mp.packedIndexBlockBuf[:0], mp.unpackedIndexBlockBuf, 0)