diff --git a/app/vminsert/concurrencylimiter/concurrencylimiter.go b/app/vminsert/concurrencylimiter/concurrencylimiter.go index d94deee9f..079bf03f4 100644 --- a/app/vminsert/concurrencylimiter/concurrencylimiter.go +++ b/app/vminsert/concurrencylimiter/concurrencylimiter.go @@ -13,8 +13,8 @@ import ( ) var ( - maxConcurrentInserts = flag.Int("maxConcurrentInserts", runtime.GOMAXPROCS(-1)*4, "The maximum number of concurrent inserts; see also `-insert.maxQueueDuration`") - maxQueueDuration = flag.Duration("insert.maxQueueDuration", time.Minute, "The maximum duration for waiting in the queue for insert requests due to `-maxConcurrentInserts`") + maxConcurrentInserts = flag.Int("maxConcurrentInserts", runtime.GOMAXPROCS(-1)*4, "The maximum number of concurrent inserts; see also -insert.maxQueueDuration") + maxQueueDuration = flag.Duration("insert.maxQueueDuration", time.Minute, "The maximum duration for waiting in the queue for insert requests due to -maxConcurrentInserts") ) // ch is the channel for limiting concurrent calls to Do. diff --git a/app/vminsert/influx/request_handler.go b/app/vminsert/influx/request_handler.go index d27d20ad0..5cb3f6de8 100644 --- a/app/vminsert/influx/request_handler.go +++ b/app/vminsert/influx/request_handler.go @@ -21,8 +21,8 @@ import ( ) var ( - measurementFieldSeparator = flag.String("influxMeasurementFieldSeparator", "_", "Separator for `{measurement}{separator}{field_name}` metric name when inserted via Influx line protocol") - skipSingleField = flag.Bool("influxSkipSingleField", false, "Uses `{measurement}` instead of `{measurement}{separator}{field_name}` for metic name if Influx line contains only a single field") + measurementFieldSeparator = flag.String("influxMeasurementFieldSeparator", "_", "Separator for '{measurement}{separator}{field_name}' metric name when inserted via Influx line protocol") + skipSingleField = flag.Bool("influxSkipSingleField", false, "Uses '{measurement}' instead of '{measurement}{separator}{field_name}' for metic name if Influx line contains only a single field") ) var ( diff --git a/app/vminsert/vmimport/request_handler.go b/app/vminsert/vmimport/request_handler.go index ba8ed40dc..3ef1cd9b0 100644 --- a/app/vminsert/vmimport/request_handler.go +++ b/app/vminsert/vmimport/request_handler.go @@ -18,7 +18,7 @@ import ( "github.com/VictoriaMetrics/metrics" ) -var maxLineLen = flag.Int("import.maxLineLen", 100*1024*1024, "The maximum length in bytes of a single line accepted by `/api/v1/import`") +var maxLineLen = flag.Int("import.maxLineLen", 100*1024*1024, "The maximum length in bytes of a single line accepted by /api/v1/import") var ( rowsInserted = tenantmetrics.NewCounterMap(`vm_rows_inserted_total{type="vmimport"}`) diff --git a/app/vmrestore/main.go b/app/vmrestore/main.go index 865a4f0a4..9bd399e3f 100644 --- a/app/vmrestore/main.go +++ b/app/vmrestore/main.go @@ -18,7 +18,7 @@ var ( "VictoriaMetrics must be stopped when restoring from backup. -storageDataPath dir can be non-empty. In this case only missing data is downloaded from backup") concurrency = flag.Int("concurrency", 10, "The number of concurrent workers. Higher concurrency may reduce restore duration") maxBytesPerSecond = flag.Int("maxBytesPerSecond", 0, "The maximum download speed. There is no limit if it is set to 0") - skipBackupCompleteCheck = flag.Bool("skipBackupCompleteCheck", false, "Whether to skip checking for `backup complete` file in `-src`. This may be useful for restoring from old backups, which were created without `backup complete` file") + skipBackupCompleteCheck = flag.Bool("skipBackupCompleteCheck", false, "Whether to skip checking for 'backup complete' file in -src. This may be useful for restoring from old backups, which were created without 'backup complete' file") ) func main() { diff --git a/app/vmselect/main.go b/app/vmselect/main.go index 5c9f4f78d..36a35d72f 100644 --- a/app/vmselect/main.go +++ b/app/vmselect/main.go @@ -26,7 +26,7 @@ var ( httpListenAddr = flag.String("httpListenAddr", ":8481", "Address to listen for http connections") cacheDataPath = flag.String("cacheDataPath", "", "Path to directory for cache files. Cache isn't saved if empty") maxConcurrentRequests = flag.Int("search.maxConcurrentRequests", getDefaultMaxConcurrentRequests(), "The maximum number of concurrent search requests. "+ - "It shouldn't be high, since a single request can saturate all the CPU cores. See also `-search.maxQueueDuration`") + "It shouldn't be high, since a single request can saturate all the CPU cores. See also -search.maxQueueDuration") maxQueueDuration = flag.Duration("search.maxQueueDuration", 10*time.Second, "The maximum time the request waits for execution when -search.maxConcurrentRequests limit is reached") storageNodes = flagutil.NewArray("storageNode", "Addresses of vmstorage nodes; usage: -storageNode=vmstorage-host1:8401 -storageNode=vmstorage-host2:8401") ) diff --git a/app/vmselect/prometheus/prometheus.go b/app/vmselect/prometheus/prometheus.go index 1cbc3d9ad..e6f3b125a 100644 --- a/app/vmselect/prometheus/prometheus.go +++ b/app/vmselect/prometheus/prometheus.go @@ -26,11 +26,11 @@ import ( var ( latencyOffset = flag.Duration("search.latencyOffset", time.Second*30, "The time when data points become visible in query results after the colection. "+ "Too small value can result in incomplete last points for query results") - maxExportDuration = flag.Duration("search.maxExportDuration", time.Hour*24*30, "The maximum duration for `/api/v1/export` call") + maxExportDuration = flag.Duration("search.maxExportDuration", time.Hour*24*30, "The maximum duration for /api/v1/export call") maxQueryDuration = flag.Duration("search.maxQueryDuration", time.Second*30, "The maximum duration for search query execution") maxQueryLen = flag.Int("search.maxQueryLen", 16*1024, "The maximum search query length in bytes") - maxLookback = flag.Duration("search.maxLookback", 0, "Synonim to `-search.lookback-delta` from Prometheus. "+ - "The value is dynamically detected from interval between time series datapoints if not set. It can be overridden on per-query basis via `max_lookback` arg") + maxLookback = flag.Duration("search.maxLookback", 0, "Synonim to -search.lookback-delta from Prometheus. "+ + "The value is dynamically detected from interval between time series datapoints if not set. It can be overridden on per-query basis via max_lookback arg") denyPartialResponse = flag.Bool("search.denyPartialResponse", false, "Whether to deny partial responses when some of vmstorage nodes are unavailable. This trades consistency over availability") selectNodes = flagutil.NewArray("selectNode", "Addresses of vmselect nodes; usage: -selectNode=vmselect-host1:8481 -selectNode=vmselect-host2:8481") ) diff --git a/lib/backup/actions/restore.go b/lib/backup/actions/restore.go index f681e876c..29905e364 100644 --- a/lib/backup/actions/restore.go +++ b/lib/backup/actions/restore.go @@ -62,7 +62,7 @@ func (r *Restore) Run() error { } if !ok { return fmt.Errorf("cannot find %s file in %s; this means either incomplete backup or old backup; "+ - "pass `-skipBackupCompleteCheck` command-line flag if you still need restoring from this backup", fscommon.BackupCompleteFilename, src) + "pass -skipBackupCompleteCheck command-line flag if you still need restoring from this backup", fscommon.BackupCompleteFilename, src) } } diff --git a/lib/fs/reader_at.go b/lib/fs/reader_at.go index 078d818d6..0acaedafb 100644 --- a/lib/fs/reader_at.go +++ b/lib/fs/reader_at.go @@ -10,7 +10,7 @@ import ( "golang.org/x/sys/unix" ) -var disableMmap = flag.Bool("fs.disableMmap", false, "Whether to use `pread` instead of `mmap` for reading data files") +var disableMmap = flag.Bool("fs.disableMmap", false, "Whether to use pread() instead of mmap() for reading data files") // MustReadAtCloser is rand-access read interface. type MustReadAtCloser interface { diff --git a/lib/storage/dedup.go b/lib/storage/dedup.go index 04e9929a6..aa78665db 100644 --- a/lib/storage/dedup.go +++ b/lib/storage/dedup.go @@ -8,7 +8,7 @@ import ( var minScrapeInterval = flag.Duration("dedup.minScrapeInterval", 0, "Remove superflouos samples from time series if they are located closer to each other than this duration. "+ "This may be useful for reducing overhead when multiple identically configured Prometheus instances write data to the same VictoriaMetrics. "+ - "Deduplication is disabled if the `-dedup.minScrapeInterval` is 0") + "Deduplication is disabled if the -dedup.minScrapeInterval is 0") func getMinDelta() int64 { // Divide minScrapeInterval by 2 in order to preserve proper data points.