mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2025-01-10 15:14:09 +00:00
lib/storage: try generating initial parts from inmemory rows with identical sizes under high ingestion rate
This should improve background merge rate under high load a bit
This commit is contained in:
parent
4d71023eb9
commit
edf3b7be47
1 changed files with 16 additions and 6 deletions
|
@ -479,14 +479,24 @@ func (rrs *rawRowsShard) addRows(pt *partition, rows []rawRow) {
|
||||||
maxRowsCount := cap(rrs.rows)
|
maxRowsCount := cap(rrs.rows)
|
||||||
capacity := maxRowsCount - len(rrs.rows)
|
capacity := maxRowsCount - len(rrs.rows)
|
||||||
if capacity >= len(rows) {
|
if capacity >= len(rows) {
|
||||||
// Fast path - rows fit capacity.
|
// Fast path - rows fit rrs.rows capacity.
|
||||||
rrs.rows = append(rrs.rows, rows...)
|
rrs.rows = append(rrs.rows, rows...)
|
||||||
} else {
|
} else {
|
||||||
// Slow path - rows don't fit capacity.
|
// Slow path - rows don't fit rrs.rows capacity.
|
||||||
// Put rrs.rows and rows to rowsToFlush and convert it to a part.
|
// Fill rrs.rows with rows until capacity,
|
||||||
rowsToFlush = append(rowsToFlush, rrs.rows...)
|
// then put rrs.rows to rowsToFlush and convert it to a part.
|
||||||
rowsToFlush = append(rowsToFlush, rows...)
|
n := copy(rrs.rows[:cap(rrs.rows)], rows)
|
||||||
rrs.rows = rrs.rows[:0]
|
rows = rows[n:]
|
||||||
|
rowsToFlush = rrs.rows
|
||||||
|
n = getMaxRawRowsPerShard()
|
||||||
|
rrs.rows = make([]rawRow, 0, n)
|
||||||
|
if len(rows) <= n {
|
||||||
|
rrs.rows = append(rrs.rows[:0], rows...)
|
||||||
|
} else {
|
||||||
|
// The slowest path - rows do not fit rrs.rows capacity.
|
||||||
|
// So append them directly to rowsToFlush.
|
||||||
|
rowsToFlush = append(rowsToFlush, rows...)
|
||||||
|
}
|
||||||
atomic.StoreUint64(&rrs.lastFlushTime, fasttime.UnixTimestamp())
|
atomic.StoreUint64(&rrs.lastFlushTime, fasttime.UnixTimestamp())
|
||||||
}
|
}
|
||||||
rrs.mu.Unlock()
|
rrs.mu.Unlock()
|
||||||
|
|
Loading…
Reference in a new issue