mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2024-11-21 14:44:00 +00:00
lib/logstorage: increase the the maximum number of columns per block from 1000 to 2000
This will allow storing wide events with up to 2K fields per event into VictoriaLogs.
While at it, remove the misleading comment that columnsHeader is read in full per each matching block.
This is no longer the case after the improvements made at 202eb429a7
.
Now only the needed columnHeader is read for the column mentioned in the query.
Updates https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6425#issuecomment-2418337124
Updates https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4762
This commit is contained in:
parent
5e2cb78cce
commit
9ba6be4179
2 changed files with 3 additions and 4 deletions
|
@ -146,7 +146,7 @@ func (bsm *blockStreamMerger) mustWriteBlock(bd *blockData, bsw *blockStreamWrit
|
||||||
bsm.bd.copyFrom(&bsm.a, bd)
|
bsm.bd.copyFrom(&bsm.a, bd)
|
||||||
bsm.uniqueFields = uniqueFields
|
bsm.uniqueFields = uniqueFields
|
||||||
}
|
}
|
||||||
case bsm.uniqueFields+uniqueFields >= maxColumnsPerBlock:
|
case bsm.uniqueFields+uniqueFields > maxColumnsPerBlock:
|
||||||
// Cannot merge bd with bsm.rows, because too many columns will be created.
|
// Cannot merge bd with bsm.rows, because too many columns will be created.
|
||||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4762
|
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4762
|
||||||
//
|
//
|
||||||
|
|
|
@ -26,9 +26,8 @@ const maxRowsPerBlock = 8 * 1024 * 1024
|
||||||
// maxColumnsPerBlock is the maximum number of columns per block.
|
// maxColumnsPerBlock is the maximum number of columns per block.
|
||||||
//
|
//
|
||||||
// It isn't recommended setting this value to too big value, because this may result
|
// It isn't recommended setting this value to too big value, because this may result
|
||||||
// in excess memory usage during data ingestion and significant slowdown during query execution,
|
// in excess memory usage during data ingestion and significant slowdown during query execution.
|
||||||
// since every column header is unpacked in every matching block during query execution.
|
const maxColumnsPerBlock = 2_000
|
||||||
const maxColumnsPerBlock = 1_000
|
|
||||||
|
|
||||||
// MaxFieldNameSize is the maximum size in bytes for field name.
|
// MaxFieldNameSize is the maximum size in bytes for field name.
|
||||||
//
|
//
|
||||||
|
|
Loading…
Reference in a new issue