app/vmagent,lib/persistentqueue: show warning message if --remoteWrite.maxDiskUsagePerURL flag lower than 500MB (#4196)

* app/vmagent,lib/persistentqueue: show warning message if `--remoteWrite.maxDiskUsagePerURL` flag lower than 500MB

* app/vmagent,lib/persistentqueue: linter fix

* app/vmagent,lib/persistentqueue: fix comment
This commit is contained in:
Dmytro Kozlov 2023-04-26 13:23:01 +03:00 committed by GitHub
parent 368571be00
commit bc17f4828c
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
5 changed files with 13 additions and 9 deletions

View file

@ -1485,7 +1485,7 @@ See the docs at https://docs.victoriametrics.com/vmagent.html .
-remoteWrite.maxDailySeries int -remoteWrite.maxDailySeries int
The maximum number of unique series vmagent can send to remote storage systems during the last 24 hours. Excess series are logged and dropped. This can be useful for limiting series churn rate. See https://docs.victoriametrics.com/vmagent.html#cardinality-limiter The maximum number of unique series vmagent can send to remote storage systems during the last 24 hours. Excess series are logged and dropped. This can be useful for limiting series churn rate. See https://docs.victoriametrics.com/vmagent.html#cardinality-limiter
-remoteWrite.maxDiskUsagePerURL array -remoteWrite.maxDiskUsagePerURL array
The maximum file-based buffer size in bytes at -remoteWrite.tmpDataPath for each -remoteWrite.url. When buffer size reaches the configured maximum, then old data is dropped when adding new data to the buffer. Buffered data is stored in ~500MB chunks, so the minimum practical value for this flag is 500MB. Disk usage is unlimited if the value is set to 0 The maximum file-based buffer size in bytes at -remoteWrite.tmpDataPath for each -remoteWrite.url. When buffer size reaches the configured maximum, then old data is dropped when adding new data to the buffer. Buffered data is stored in ~500MB chunks. It is recommended to set the value for this flag to a multiple of the block size 500MB. Disk usage is unlimited if the value is set to 0
Supports the following optional suffixes for size values: KB, MB, GB, TB, KiB, MiB, GiB, TiB. Supports the following optional suffixes for size values: KB, MB, GB, TB, KiB, MiB, GiB, TiB.
Supports array of values separated by comma or specified via multiple flags. Supports array of values separated by comma or specified via multiple flags.
-remoteWrite.maxHourlySeries int -remoteWrite.maxHourlySeries int

View file

@ -47,7 +47,7 @@ var (
"It is hidden by default, since it can contain sensitive info such as auth key") "It is hidden by default, since it can contain sensitive info such as auth key")
maxPendingBytesPerURL = flagutil.NewArrayBytes("remoteWrite.maxDiskUsagePerURL", "The maximum file-based buffer size in bytes at -remoteWrite.tmpDataPath "+ maxPendingBytesPerURL = flagutil.NewArrayBytes("remoteWrite.maxDiskUsagePerURL", "The maximum file-based buffer size in bytes at -remoteWrite.tmpDataPath "+
"for each -remoteWrite.url. When buffer size reaches the configured maximum, then old data is dropped when adding new data to the buffer. "+ "for each -remoteWrite.url. When buffer size reaches the configured maximum, then old data is dropped when adding new data to the buffer. "+
"Buffered data is stored in ~500MB chunks, so the minimum practical value for this flag is 500MB. "+ "Buffered data is stored in ~500MB chunks. It is recommended to set the value for this flag to a multiple of the block size 500MB. "+
"Disk usage is unlimited if the value is set to 0") "Disk usage is unlimited if the value is set to 0")
significantFigures = flagutil.NewArrayInt("remoteWrite.significantFigures", "The number of significant figures to leave in metric values before writing them "+ significantFigures = flagutil.NewArrayInt("remoteWrite.significantFigures", "The number of significant figures to leave in metric values before writing them "+
"to remote storage. See https://en.wikipedia.org/wiki/Significant_figures . Zero value saves all the significant figures. "+ "to remote storage. See https://en.wikipedia.org/wiki/Significant_figures . Zero value saves all the significant figures. "+
@ -523,6 +523,9 @@ func newRemoteWriteCtx(argIdx int, at *auth.Token, remoteWriteURL *url.URL, maxI
h := xxhash.Sum64([]byte(pqURL.String())) h := xxhash.Sum64([]byte(pqURL.String()))
queuePath := filepath.Join(*tmpDataPath, persistentQueueDirname, fmt.Sprintf("%d_%016X", argIdx+1, h)) queuePath := filepath.Join(*tmpDataPath, persistentQueueDirname, fmt.Sprintf("%d_%016X", argIdx+1, h))
maxPendingBytes := maxPendingBytesPerURL.GetOptionalArgOrDefault(argIdx, 0) maxPendingBytes := maxPendingBytesPerURL.GetOptionalArgOrDefault(argIdx, 0)
if maxPendingBytes != 0 && maxPendingBytes < persistentqueue.DefaultChunkFileSize {
logger.Warnf("specified flag `--remoteWrite.maxDiskUsagePerURL` is lower than buffered data chunks. It is recommended to set the value for this flag to a multiple of the block size 500MB.")
}
fq := persistentqueue.MustOpenFastQueue(queuePath, sanitizedURL, maxInmemoryBlocks, maxPendingBytes) fq := persistentqueue.MustOpenFastQueue(queuePath, sanitizedURL, maxInmemoryBlocks, maxPendingBytes)
_ = metrics.GetOrCreateGauge(fmt.Sprintf(`vmagent_remotewrite_pending_data_bytes{path=%q, url=%q}`, queuePath, sanitizedURL), func() float64 { _ = metrics.GetOrCreateGauge(fmt.Sprintf(`vmagent_remotewrite_pending_data_bytes{path=%q, url=%q}`, queuePath, sanitizedURL), func() float64 {
return float64(fq.GetPendingBytes()) return float64(fq.GetPendingBytes())

View file

@ -1489,7 +1489,7 @@ See the docs at https://docs.victoriametrics.com/vmagent.html .
-remoteWrite.maxDailySeries int -remoteWrite.maxDailySeries int
The maximum number of unique series vmagent can send to remote storage systems during the last 24 hours. Excess series are logged and dropped. This can be useful for limiting series churn rate. See https://docs.victoriametrics.com/vmagent.html#cardinality-limiter The maximum number of unique series vmagent can send to remote storage systems during the last 24 hours. Excess series are logged and dropped. This can be useful for limiting series churn rate. See https://docs.victoriametrics.com/vmagent.html#cardinality-limiter
-remoteWrite.maxDiskUsagePerURL array -remoteWrite.maxDiskUsagePerURL array
The maximum file-based buffer size in bytes at -remoteWrite.tmpDataPath for each -remoteWrite.url. When buffer size reaches the configured maximum, then old data is dropped when adding new data to the buffer. Buffered data is stored in ~500MB chunks, so the minimum practical value for this flag is 500MB. Disk usage is unlimited if the value is set to 0 The maximum file-based buffer size in bytes at -remoteWrite.tmpDataPath for each -remoteWrite.url. When buffer size reaches the configured maximum, then old data is dropped when adding new data to the buffer. Buffered data is stored in ~500MB chunks. It is recommended to set the value for this flag to a multiple of the block size 500MB. Disk usage is unlimited if the value is set to 0
Supports the following optional suffixes for size values: KB, MB, GB, TB, KiB, MiB, GiB, TiB. Supports the following optional suffixes for size values: KB, MB, GB, TB, KiB, MiB, GiB, TiB.
Supports array of values separated by comma or specified via multiple flags. Supports array of values separated by comma or specified via multiple flags.
-remoteWrite.maxHourlySeries int -remoteWrite.maxHourlySeries int

View file

@ -22,7 +22,8 @@ import (
// MaxBlockSize is the maximum size of the block persistent queue can work with. // MaxBlockSize is the maximum size of the block persistent queue can work with.
const MaxBlockSize = 32 * 1024 * 1024 const MaxBlockSize = 32 * 1024 * 1024
const defaultChunkFileSize = (MaxBlockSize + 8) * 16 // DefaultChunkFileSize represents default chunk file size
const DefaultChunkFileSize = (MaxBlockSize + 8) * 16
var chunkFileNameRegex = regexp.MustCompile("^[0-9A-F]{16}$") var chunkFileNameRegex = regexp.MustCompile("^[0-9A-F]{16}$")
@ -122,7 +123,7 @@ func mustOpen(path, name string, maxPendingBytes int64) *queue {
if maxPendingBytes < 0 { if maxPendingBytes < 0 {
maxPendingBytes = 0 maxPendingBytes = 0
} }
return mustOpenInternal(path, name, defaultChunkFileSize, MaxBlockSize, uint64(maxPendingBytes)) return mustOpenInternal(path, name, DefaultChunkFileSize, MaxBlockSize, uint64(maxPendingBytes))
} }
func mustOpenInternal(path, name string, chunkFileSize, maxBlockSize, maxPendingBytes uint64) *queue { func mustOpenInternal(path, name string, chunkFileSize, maxBlockSize, maxPendingBytes uint64) *queue {

View file

@ -53,7 +53,7 @@ func TestQueueOpen(t *testing.T) {
path := "queue-open-too-new-chunk" path := "queue-open-too-new-chunk"
mustCreateDir(path) mustCreateDir(path)
mustCreateEmptyMetainfo(path, "foobar") mustCreateEmptyMetainfo(path, "foobar")
mustCreateFile(filepath.Join(path, fmt.Sprintf("%016X", 100*uint64(defaultChunkFileSize))), "asdf") mustCreateFile(filepath.Join(path, fmt.Sprintf("%016X", 100*uint64(DefaultChunkFileSize))), "asdf")
q := mustOpen(path, "foobar", 0) q := mustOpen(path, "foobar", 0)
q.MustClose() q.MustClose()
mustDeleteDir(path) mustDeleteDir(path)
@ -63,8 +63,8 @@ func TestQueueOpen(t *testing.T) {
mustCreateDir(path) mustCreateDir(path)
mi := &metainfo{ mi := &metainfo{
Name: "foobar", Name: "foobar",
ReaderOffset: defaultChunkFileSize, ReaderOffset: DefaultChunkFileSize,
WriterOffset: defaultChunkFileSize, WriterOffset: DefaultChunkFileSize,
} }
if err := mi.WriteToFile(filepath.Join(path, metainfoFilename)); err != nil { if err := mi.WriteToFile(filepath.Join(path, metainfoFilename)); err != nil {
t.Fatalf("unexpected error: %s", err) t.Fatalf("unexpected error: %s", err)
@ -79,7 +79,7 @@ func TestQueueOpen(t *testing.T) {
mustCreateDir(path) mustCreateDir(path)
mi := &metainfo{ mi := &metainfo{
Name: "foobar", Name: "foobar",
ReaderOffset: defaultChunkFileSize + 123, ReaderOffset: DefaultChunkFileSize + 123,
} }
if err := mi.WriteToFile(filepath.Join(path, metainfoFilename)); err != nil { if err := mi.WriteToFile(filepath.Join(path, metainfoFilename)); err != nil {
t.Fatalf("unexpected error: %s", err) t.Fatalf("unexpected error: %s", err)