lib/storage: stop exposing vm_merge_need_free_disk_space metric

This metric confuses users and has no any useful information.

See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/686#issuecomment-1733844128
This commit is contained in:
Aliaksandr Valialkin 2023-09-25 16:52:37 +02:00
parent e453069dcd
commit 717c53af27
No known key found for this signature in database
GPG key ID: A72BEC6CD3D0DED1
7 changed files with 16 additions and 65 deletions

View file

@ -1937,9 +1937,6 @@ and [cardinality explorer docs](#cardinality-explorer).
has at least 20% of free space. The remaining amount of free space
can be [monitored](#monitoring) via `vm_free_disk_space_bytes` metric. The total size of data
stored on the disk can be monitored via sum of `vm_data_size_bytes` metrics.
See also `vm_merge_need_free_disk_space` metrics, which are set to values higher than 0
if background merge cannot be initiated due to free disk space shortage. The value shows the number of per-month partitions,
which would start background merge if they had more free disk space.
* VictoriaMetrics buffers incoming data in memory for up to a few seconds before flushing it to persistent storage.
This may lead to the following "issues":

View file

@ -585,11 +585,6 @@ func registerStorageMetrics(strg *storage.Storage) {
return float64(idbm().ItemsAddedSizeBytes)
})
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/686
metrics.NewGauge(`vm_merge_need_free_disk_space`, func() float64 {
return float64(tm().MergeNeedFreeDiskSpace)
})
metrics.NewGauge(`vm_pending_rows{type="storage"}`, func() float64 {
return float64(tm().PendingRows)
})

View file

@ -50,6 +50,7 @@ The sandbox cluster installation is running under the constant load generated by
* FEATURE: [vmalert](https://docs.victoriametrics.com/vmalert.html): validate [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html) function names in alerting and recording rules when `vmalert` runs with `-dryRun` command-line flag. Previously it was allowed to use unknown (aka invalid) MetricsQL function names there. For example, `foo()` was counted as a valid query. See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4933).
* FEATURE: limit the length of string params in log messages to 500 chars. Longer string params are replaced with the `first_250_chars..last_250_chars`. This prevents from too long log lines, which can be emitted by VictoriaMetrics components.
* FEATURE: [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html): make sure that `q2` series are returned after `q1` series in the results of `q1 or q2` query, in the same way as Prometheus does. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4763).
* FEATURE: stop exposing `vm_merge_need_free_disk_space` metric, since it has been appeared that it confuses users while doesn't bring any useful information. See [this comment](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/686#issuecomment-1733844128).
* BUGFIX: [Official Grafana dashboards for VictoriaMetrics](https://grafana.com/orgs/victoriametrics): fix display of ingested rows rate for `Samples ingested/s` and `Samples rate` panels for vmagent's dasbhoard. Previously, not all ingested protocols were accounted in these panels. An extra panel `Rows rate` was added to `Ingestion` section to display the split for rows ingested rate by protocol.
* BUGFIX: [vmui](https://docs.victoriametrics.com/#vmui): fix the bug causing render looping when switching to heatmap.

View file

@ -1940,9 +1940,6 @@ and [cardinality explorer docs](#cardinality-explorer).
has at least 20% of free space. The remaining amount of free space
can be [monitored](#monitoring) via `vm_free_disk_space_bytes` metric. The total size of data
stored on the disk can be monitored via sum of `vm_data_size_bytes` metrics.
See also `vm_merge_need_free_disk_space` metrics, which are set to values higher than 0
if background merge cannot be initiated due to free disk space shortage. The value shows the number of per-month partitions,
which would start background merge if they had more free disk space.
* VictoriaMetrics buffers incoming data in memory for up to a few seconds before flushing it to persistent storage.
This may lead to the following "issues":

View file

@ -1948,9 +1948,6 @@ and [cardinality explorer docs](#cardinality-explorer).
has at least 20% of free space. The remaining amount of free space
can be [monitored](#monitoring) via `vm_free_disk_space_bytes` metric. The total size of data
stored on the disk can be monitored via sum of `vm_data_size_bytes` metrics.
See also `vm_merge_need_free_disk_space` metrics, which are set to values higher than 0
if background merge cannot be initiated due to free disk space shortage. The value shows the number of per-month partitions,
which would start background merge if they had more free disk space.
* VictoriaMetrics buffers incoming data in memory for up to a few seconds before flushing it to persistent storage.
This may lead to the following "issues":

View file

@ -119,8 +119,6 @@ type partition struct {
inmemoryAssistedMerges uint64
smallAssistedMerges uint64
mergeNeedFreeDiskSpace uint64
mergeIdx uint64
smallPartsPath string
@ -354,8 +352,6 @@ type partitionMetrics struct {
InmemoryAssistedMerges uint64
SmallAssistedMerges uint64
MergeNeedFreeDiskSpace uint64
}
// TotalRowsCount returns total number of rows in tm.
@ -421,8 +417,6 @@ func (pt *partition) UpdateMetrics(m *partitionMetrics) {
m.InmemoryAssistedMerges += atomic.LoadUint64(&pt.inmemoryAssistedMerges)
m.SmallAssistedMerges += atomic.LoadUint64(&pt.smallAssistedMerges)
m.MergeNeedFreeDiskSpace += atomic.LoadUint64(&pt.mergeNeedFreeDiskSpace)
}
// AddRows adds the given rows to the partition pt.
@ -1146,10 +1140,9 @@ func (pt *partition) mergeInmemoryParts() error {
maxOutBytes := pt.getMaxBigPartSize()
pt.partsLock.Lock()
pws, needFreeSpace := getPartsToMerge(pt.inmemoryParts, maxOutBytes, false)
pws := getPartsToMerge(pt.inmemoryParts, maxOutBytes, false)
pt.partsLock.Unlock()
atomicSetBool(&pt.mergeNeedFreeDiskSpace, needFreeSpace)
return pt.mergeParts(pws, pt.stopCh, false)
}
@ -1166,10 +1159,9 @@ func (pt *partition) mergeExistingParts(isFinal bool) error {
dst = append(dst, pt.inmemoryParts...)
dst = append(dst, pt.smallParts...)
dst = append(dst, pt.bigParts...)
pws, needFreeSpace := getPartsToMerge(dst, maxOutBytes, isFinal)
pws := getPartsToMerge(dst, maxOutBytes, isFinal)
pt.partsLock.Unlock()
atomicSetBool(&pt.mergeNeedFreeDiskSpace, needFreeSpace)
return pt.mergeParts(pws, pt.stopCh, isFinal)
}
@ -1629,8 +1621,7 @@ func (pt *partition) removeStaleParts() {
// getPartsToMerge returns optimal parts to merge from pws.
//
// The summary size of the returned parts must be smaller than maxOutBytes.
// The function returns true if pws contains parts, which cannot be merged because of maxOutBytes limit.
func getPartsToMerge(pws []*partWrapper, maxOutBytes uint64, isFinal bool) ([]*partWrapper, bool) {
func getPartsToMerge(pws []*partWrapper, maxOutBytes uint64, isFinal bool) []*partWrapper {
pwsRemaining := make([]*partWrapper, 0, len(pws))
for _, pw := range pws {
if !pw.isInMerge {
@ -1639,14 +1630,13 @@ func getPartsToMerge(pws []*partWrapper, maxOutBytes uint64, isFinal bool) ([]*p
}
maxPartsToMerge := defaultPartsToMerge
var pms []*partWrapper
needFreeSpace := false
if isFinal {
for len(pms) == 0 && maxPartsToMerge >= finalPartsToMerge {
pms, needFreeSpace = appendPartsToMerge(pms[:0], pwsRemaining, maxPartsToMerge, maxOutBytes)
pms = appendPartsToMerge(pms[:0], pwsRemaining, maxPartsToMerge, maxOutBytes)
maxPartsToMerge--
}
} else {
pms, needFreeSpace = appendPartsToMerge(pms[:0], pwsRemaining, maxPartsToMerge, maxOutBytes)
pms = appendPartsToMerge(pms[:0], pwsRemaining, maxPartsToMerge, maxOutBytes)
}
for _, pw := range pms {
if pw.isInMerge {
@ -1654,7 +1644,7 @@ func getPartsToMerge(pws []*partWrapper, maxOutBytes uint64, isFinal bool) ([]*p
}
pw.isInMerge = true
}
return pms, needFreeSpace
return pms
}
// minMergeMultiplier is the minimum multiplier for the size of the output part
@ -1665,13 +1655,11 @@ func getPartsToMerge(pws []*partWrapper, maxOutBytes uint64, isFinal bool) ([]*p
// The 1.7 is good enough for production workloads.
const minMergeMultiplier = 1.7
// appendPartsToMerge finds optimal parts to merge from src, appends
// them to dst and returns the result.
// The function returns true if src contains parts, which cannot be merged because of maxOutBytes limit.
func appendPartsToMerge(dst, src []*partWrapper, maxPartsToMerge int, maxOutBytes uint64) ([]*partWrapper, bool) {
// appendPartsToMerge finds optimal parts to merge from src, appends them to dst and returns the result.
func appendPartsToMerge(dst, src []*partWrapper, maxPartsToMerge int, maxOutBytes uint64) []*partWrapper {
if len(src) < 2 {
// There is no need in merging zero or one part :)
return dst, false
return dst
}
if maxPartsToMerge < 2 {
logger.Panicf("BUG: maxPartsToMerge cannot be smaller than 2; got %d", maxPartsToMerge)
@ -1679,18 +1667,15 @@ func appendPartsToMerge(dst, src []*partWrapper, maxPartsToMerge int, maxOutByte
// Filter out too big parts.
// This should reduce N for O(N^2) algorithm below.
skippedBigParts := 0
maxInPartBytes := uint64(float64(maxOutBytes) / minMergeMultiplier)
tmp := make([]*partWrapper, 0, len(src))
for _, pw := range src {
if pw.p.size > maxInPartBytes {
skippedBigParts++
continue
}
tmp = append(tmp, pw)
}
src = tmp
needFreeSpace := skippedBigParts > 1
sortPartsForOptimalMerge(src)
@ -1709,15 +1694,12 @@ func appendPartsToMerge(dst, src []*partWrapper, maxPartsToMerge int, maxOutByte
for i := minSrcParts; i <= maxSrcParts; i++ {
for j := 0; j <= len(src)-i; j++ {
a := src[j : j+i]
outSize := getPartsSize(a)
if outSize > maxOutBytes {
needFreeSpace = true
}
if a[0].p.size*uint64(len(a)) < a[len(a)-1].p.size {
// Do not merge parts with too big difference in size,
// since this results in unbalanced merges.
continue
}
outSize := getPartsSize(a)
if outSize > maxOutBytes {
// There is no need in verifying remaining parts with bigger sizes.
break
@ -1738,9 +1720,9 @@ func appendPartsToMerge(dst, src []*partWrapper, maxPartsToMerge int, maxOutByte
if maxM < minM {
// There is no sense in merging parts with too small m,
// since this leads to high disk write IO.
return dst, needFreeSpace
return dst
}
return append(dst, pws...), needFreeSpace
return append(dst, pws...)
}
func sortPartsForOptimalMerge(pws []*partWrapper) {

View file

@ -34,24 +34,6 @@ func TestAppendPartsToMerge(t *testing.T) {
testAppendPartsToMerge(t, 3, []uint64{11, 1, 10, 100, 10}, []uint64{10, 10, 11})
}
func TestAppendPartsToMergeNeedFreeSpace(t *testing.T) {
f := func(sizes []uint64, maxOutBytes int, expectedNeedFreeSpace bool) {
t.Helper()
pws := newTestPartWrappersForSizes(sizes)
_, needFreeSpace := appendPartsToMerge(nil, pws, defaultPartsToMerge, uint64(maxOutBytes))
if needFreeSpace != expectedNeedFreeSpace {
t.Fatalf("unexpected needFreeSpace; got %v; want %v", needFreeSpace, expectedNeedFreeSpace)
}
}
f(nil, 1000, false)
f([]uint64{1000}, 100, false)
f([]uint64{1000}, 1100, false)
f([]uint64{120, 200}, 180, true)
f([]uint64{100, 200}, 310, false)
f([]uint64{100, 110, 109, 1}, 300, true)
f([]uint64{100, 110, 109, 1}, 330, false)
}
func TestAppendPartsToMergeManyParts(t *testing.T) {
// Verify that big number of parts are merged into minimal number of parts
// using minimum merges.
@ -69,7 +51,7 @@ func TestAppendPartsToMergeManyParts(t *testing.T) {
iterationsCount := 0
sizeMergedTotal := uint64(0)
for {
pms, _ := appendPartsToMerge(nil, pws, defaultPartsToMerge, maxOutSize)
pms := appendPartsToMerge(nil, pws, defaultPartsToMerge, maxOutSize)
if len(pms) == 0 {
break
}
@ -118,7 +100,7 @@ func testAppendPartsToMerge(t *testing.T, maxPartsToMerge int, initialSizes, exp
pws := newTestPartWrappersForSizes(initialSizes)
// Verify appending to nil.
pms, _ := appendPartsToMerge(nil, pws, maxPartsToMerge, 1e9)
pms := appendPartsToMerge(nil, pws, maxPartsToMerge, 1e9)
sizes := newTestSizesFromPartWrappers(pms)
if !reflect.DeepEqual(sizes, expectedSizes) {
t.Fatalf("unexpected size for maxPartsToMerge=%d, initialSizes=%d; got\n%d; want\n%d",
@ -135,7 +117,7 @@ func testAppendPartsToMerge(t *testing.T, maxPartsToMerge int, initialSizes, exp
{},
{},
}
pms, _ = appendPartsToMerge(prefix, pws, maxPartsToMerge, 1e9)
pms = appendPartsToMerge(prefix, pws, maxPartsToMerge, 1e9)
if !reflect.DeepEqual(pms[:len(prefix)], prefix) {
t.Fatalf("unexpected prefix for maxPartsToMerge=%d, initialSizes=%d; got\n%+v; want\n%+v",
maxPartsToMerge, initialSizes, pms[:len(prefix)], prefix)