mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2024-11-21 14:44:00 +00:00
all: do not clash flag description with back-quoted flag types
See https://golang.org/pkg/flag/#PrintDefaults for more details.
This commit is contained in:
parent
2ec248453b
commit
7cde594696
9 changed files with 13 additions and 13 deletions
|
@ -13,8 +13,8 @@ import (
|
|||
)
|
||||
|
||||
var (
|
||||
maxConcurrentInserts = flag.Int("maxConcurrentInserts", runtime.GOMAXPROCS(-1)*4, "The maximum number of concurrent inserts; see also `-insert.maxQueueDuration`")
|
||||
maxQueueDuration = flag.Duration("insert.maxQueueDuration", time.Minute, "The maximum duration for waiting in the queue for insert requests due to `-maxConcurrentInserts`")
|
||||
maxConcurrentInserts = flag.Int("maxConcurrentInserts", runtime.GOMAXPROCS(-1)*4, "The maximum number of concurrent inserts; see also -insert.maxQueueDuration")
|
||||
maxQueueDuration = flag.Duration("insert.maxQueueDuration", time.Minute, "The maximum duration for waiting in the queue for insert requests due to -maxConcurrentInserts")
|
||||
)
|
||||
|
||||
// ch is the channel for limiting concurrent calls to Do.
|
||||
|
|
|
@ -21,8 +21,8 @@ import (
|
|||
)
|
||||
|
||||
var (
|
||||
measurementFieldSeparator = flag.String("influxMeasurementFieldSeparator", "_", "Separator for `{measurement}{separator}{field_name}` metric name when inserted via Influx line protocol")
|
||||
skipSingleField = flag.Bool("influxSkipSingleField", false, "Uses `{measurement}` instead of `{measurement}{separator}{field_name}` for metic name if Influx line contains only a single field")
|
||||
measurementFieldSeparator = flag.String("influxMeasurementFieldSeparator", "_", "Separator for '{measurement}{separator}{field_name}' metric name when inserted via Influx line protocol")
|
||||
skipSingleField = flag.Bool("influxSkipSingleField", false, "Uses '{measurement}' instead of '{measurement}{separator}{field_name}' for metic name if Influx line contains only a single field")
|
||||
)
|
||||
|
||||
var (
|
||||
|
|
|
@ -18,7 +18,7 @@ import (
|
|||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
var maxLineLen = flag.Int("import.maxLineLen", 100*1024*1024, "The maximum length in bytes of a single line accepted by `/api/v1/import`")
|
||||
var maxLineLen = flag.Int("import.maxLineLen", 100*1024*1024, "The maximum length in bytes of a single line accepted by /api/v1/import")
|
||||
|
||||
var (
|
||||
rowsInserted = tenantmetrics.NewCounterMap(`vm_rows_inserted_total{type="vmimport"}`)
|
||||
|
|
|
@ -18,7 +18,7 @@ var (
|
|||
"VictoriaMetrics must be stopped when restoring from backup. -storageDataPath dir can be non-empty. In this case only missing data is downloaded from backup")
|
||||
concurrency = flag.Int("concurrency", 10, "The number of concurrent workers. Higher concurrency may reduce restore duration")
|
||||
maxBytesPerSecond = flag.Int("maxBytesPerSecond", 0, "The maximum download speed. There is no limit if it is set to 0")
|
||||
skipBackupCompleteCheck = flag.Bool("skipBackupCompleteCheck", false, "Whether to skip checking for `backup complete` file in `-src`. This may be useful for restoring from old backups, which were created without `backup complete` file")
|
||||
skipBackupCompleteCheck = flag.Bool("skipBackupCompleteCheck", false, "Whether to skip checking for 'backup complete' file in -src. This may be useful for restoring from old backups, which were created without 'backup complete' file")
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
|
|
@ -26,7 +26,7 @@ var (
|
|||
httpListenAddr = flag.String("httpListenAddr", ":8481", "Address to listen for http connections")
|
||||
cacheDataPath = flag.String("cacheDataPath", "", "Path to directory for cache files. Cache isn't saved if empty")
|
||||
maxConcurrentRequests = flag.Int("search.maxConcurrentRequests", getDefaultMaxConcurrentRequests(), "The maximum number of concurrent search requests. "+
|
||||
"It shouldn't be high, since a single request can saturate all the CPU cores. See also `-search.maxQueueDuration`")
|
||||
"It shouldn't be high, since a single request can saturate all the CPU cores. See also -search.maxQueueDuration")
|
||||
maxQueueDuration = flag.Duration("search.maxQueueDuration", 10*time.Second, "The maximum time the request waits for execution when -search.maxConcurrentRequests limit is reached")
|
||||
storageNodes = flagutil.NewArray("storageNode", "Addresses of vmstorage nodes; usage: -storageNode=vmstorage-host1:8401 -storageNode=vmstorage-host2:8401")
|
||||
)
|
||||
|
|
|
@ -26,11 +26,11 @@ import (
|
|||
var (
|
||||
latencyOffset = flag.Duration("search.latencyOffset", time.Second*30, "The time when data points become visible in query results after the colection. "+
|
||||
"Too small value can result in incomplete last points for query results")
|
||||
maxExportDuration = flag.Duration("search.maxExportDuration", time.Hour*24*30, "The maximum duration for `/api/v1/export` call")
|
||||
maxExportDuration = flag.Duration("search.maxExportDuration", time.Hour*24*30, "The maximum duration for /api/v1/export call")
|
||||
maxQueryDuration = flag.Duration("search.maxQueryDuration", time.Second*30, "The maximum duration for search query execution")
|
||||
maxQueryLen = flag.Int("search.maxQueryLen", 16*1024, "The maximum search query length in bytes")
|
||||
maxLookback = flag.Duration("search.maxLookback", 0, "Synonim to `-search.lookback-delta` from Prometheus. "+
|
||||
"The value is dynamically detected from interval between time series datapoints if not set. It can be overridden on per-query basis via `max_lookback` arg")
|
||||
maxLookback = flag.Duration("search.maxLookback", 0, "Synonim to -search.lookback-delta from Prometheus. "+
|
||||
"The value is dynamically detected from interval between time series datapoints if not set. It can be overridden on per-query basis via max_lookback arg")
|
||||
denyPartialResponse = flag.Bool("search.denyPartialResponse", false, "Whether to deny partial responses when some of vmstorage nodes are unavailable. This trades consistency over availability")
|
||||
selectNodes = flagutil.NewArray("selectNode", "Addresses of vmselect nodes; usage: -selectNode=vmselect-host1:8481 -selectNode=vmselect-host2:8481")
|
||||
)
|
||||
|
|
|
@ -62,7 +62,7 @@ func (r *Restore) Run() error {
|
|||
}
|
||||
if !ok {
|
||||
return fmt.Errorf("cannot find %s file in %s; this means either incomplete backup or old backup; "+
|
||||
"pass `-skipBackupCompleteCheck` command-line flag if you still need restoring from this backup", fscommon.BackupCompleteFilename, src)
|
||||
"pass -skipBackupCompleteCheck command-line flag if you still need restoring from this backup", fscommon.BackupCompleteFilename, src)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -10,7 +10,7 @@ import (
|
|||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
var disableMmap = flag.Bool("fs.disableMmap", false, "Whether to use `pread` instead of `mmap` for reading data files")
|
||||
var disableMmap = flag.Bool("fs.disableMmap", false, "Whether to use pread() instead of mmap() for reading data files")
|
||||
|
||||
// MustReadAtCloser is rand-access read interface.
|
||||
type MustReadAtCloser interface {
|
||||
|
|
|
@ -8,7 +8,7 @@ import (
|
|||
|
||||
var minScrapeInterval = flag.Duration("dedup.minScrapeInterval", 0, "Remove superflouos samples from time series if they are located closer to each other than this duration. "+
|
||||
"This may be useful for reducing overhead when multiple identically configured Prometheus instances write data to the same VictoriaMetrics. "+
|
||||
"Deduplication is disabled if the `-dedup.minScrapeInterval` is 0")
|
||||
"Deduplication is disabled if the -dedup.minScrapeInterval is 0")
|
||||
|
||||
func getMinDelta() int64 {
|
||||
// Divide minScrapeInterval by 2 in order to preserve proper data points.
|
||||
|
|
Loading…
Reference in a new issue