diff --git a/lib/logstorage/block_stream_merger.go b/lib/logstorage/block_stream_merger.go index 7e57dfa66..49e736854 100644 --- a/lib/logstorage/block_stream_merger.go +++ b/lib/logstorage/block_stream_merger.go @@ -146,7 +146,7 @@ func (bsm *blockStreamMerger) mustWriteBlock(bd *blockData, bsw *blockStreamWrit bsm.bd.copyFrom(&bsm.a, bd) bsm.uniqueFields = uniqueFields } - case bsm.uniqueFields+uniqueFields >= maxColumnsPerBlock: + case bsm.uniqueFields+uniqueFields > maxColumnsPerBlock: // Cannot merge bd with bsm.rows, because too many columns will be created. // See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4762 // diff --git a/lib/logstorage/consts.go b/lib/logstorage/consts.go index d1de8faea..3bd8da81d 100644 --- a/lib/logstorage/consts.go +++ b/lib/logstorage/consts.go @@ -26,9 +26,8 @@ const maxRowsPerBlock = 8 * 1024 * 1024 // maxColumnsPerBlock is the maximum number of columns per block. // // It isn't recommended setting this value to too big value, because this may result -// in excess memory usage during data ingestion and significant slowdown during query execution, -// since every column header is unpacked in every matching block during query execution. -const maxColumnsPerBlock = 1_000 +// in excess memory usage during data ingestion and significant slowdown during query execution. +const maxColumnsPerBlock = 2_000 // MaxFieldNameSize is the maximum size in bytes for field name. //