Merge branch 'master' into master

This commit is contained in:
Victor Amorim dos Santos 2024-02-08 13:36:27 -03:00 committed by GitHub
commit adac05e978
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
53 changed files with 584 additions and 149 deletions

View file

@ -178,7 +178,8 @@ victoria-metrics-crossbuild: \
victoria-metrics-darwin-amd64 \
victoria-metrics-darwin-arm64 \
victoria-metrics-freebsd-amd64 \
victoria-metrics-openbsd-amd64
victoria-metrics-openbsd-amd64 \
victoria-metrics-windows-amd64
vmutils-crossbuild: \
vmutils-linux-386 \

View file

@ -1538,7 +1538,7 @@ Set HTTP request header `Content-Encoding: gzip` when sending gzip-compressed da
VictoriaMetrics accepts data in JSON line format at [/api/v1/import](#how-to-import-data-in-json-line-format)
and exports data in this format at [/api/v1/export](#how-to-export-data-in-json-line-format).
The format follows [JSON streaming concept](http://ndjson.org/), e.g. each line contains JSON object with metrics data in the following format:
The format follows [JSON streaming concept](https://jsonlines.org/), e.g. each line contains JSON object with metrics data in the following format:
```json
{
@ -2310,6 +2310,21 @@ Sometimes it is needed to remove such caches on the next startup. This can be do
In this case VictoriaMetrics will automatically remove all the caches on the next start.
See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1447) for details.
It is also possible removing [rollup result cache](#rollup-result-cache) on startup by passing `-search.resetRollupResultCacheOnStartup` command-line flag to VictoriaMetrics.
## Rollup result cache
VictoriaMetrics caches query reponses by default. This allows increasing performance for repated queries
to [`/api/v1/query`](https://docs.victoriametrics.com/keyconcepts/#instant-query) and [`/api/v1/query_range`](https://docs.victoriametrics.com/keyconcepts/#range-query)
with the increasing `time`, `start` and `end` query args.
This cache may work incorrectly when ingesting historical data into VictoriaMetrics. See [these docs](#backfilling) for details.
The rollup cache can be disabled either globally by running VictoriaMetrics with `-search.disableCache` command-line flag
or on a per-query basis by passing `nocache=1` query arg to `/api/v1/query` and `/api/v1/query_range`.
See also [cache removal docs](#cache-removal).
## Cache tuning
VictoriaMetrics uses various in-memory caches for faster data ingestion and query performance.
@ -2374,11 +2389,12 @@ VictoriaMetrics accepts historical data in arbitrary order of time via [any supp
See [how to backfill data with recording rules in vmalert](https://docs.victoriametrics.com/vmalert.html#rules-backfilling).
Make sure that configured `-retentionPeriod` covers timestamps for the backfilled data.
It is recommended disabling query cache with `-search.disableCache` command-line flag when writing
It is recommended disabling [query cache](#rollup-result-cache) with `-search.disableCache` command-line flag when writing
historical data with timestamps from the past, since the cache assumes that the data is written with
the current timestamps. Query cache can be enabled after the backfilling is complete.
An alternative solution is to query [/internal/resetRollupResultCache](https://docs.victoriametrics.com/url-examples.html#internalresetrollupresultcache) handler after the backfilling is complete. This will reset the query cache, which could contain incomplete data cached during the backfilling.
An alternative solution is to query [/internal/resetRollupResultCache](https://docs.victoriametrics.com/url-examples.html#internalresetrollupresultcache)
after the backfilling is complete. This will reset the [query cache](#rollup-result-cache), which could contain incomplete data cached during the backfilling.
Yet another solution is to increase `-search.cacheTimestampOffset` flag value in order to disable caching
for data with timestamps close to the current time. Single-node VictoriaMetrics automatically resets response
@ -2722,6 +2738,10 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
-metricsAuthKey value
Auth key for /metrics endpoint. It must be passed via authKey query arg. It overrides httpAuth.* settings
Flag value can be read from the given file when using -metricsAuthKey=file:///abs/path/to/file or -metricsAuthKey=file://./relative/path/to/file . Flag value can be read from the given http/https url when using -metricsAuthKey=http://host/path or -metricsAuthKey=https://host/path
-mtls
Whether to require valid client certificate for https requests to -httpListenAddr . This flag works only if -tls flag is set. See also -mtlsCAFile . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/enterprise.html
-mtlsCAFile string
Optional path to TLS Root CA for verifying client certificates when -mtls is enabled. By default the host system TLS Root CA is used for client certificate verification. This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/enterprise.html
-newrelic.maxInsertRequestSize size
The maximum size in bytes of a single NewRelic request to /newrelic/infra/v2/metrics/events/bulk
Supports the following optional suffixes for size values: KB, MB, GB, TB, KiB, MiB, GiB, TiB (default 67108864)
@ -2879,7 +2899,7 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
-search.disableAutoCacheReset
Whether to disable automatic response cache reset if a sample with timestamp outside -search.cacheTimestampOffset is inserted into VictoriaMetrics
-search.disableCache
Whether to disable response caching. This may be useful during data backfilling
Whether to disable response caching. This may be useful when ingesting historical data. See https://docs.victoriametrics.com/#backfilling . See also -search.resetRollupResultCacheOnStartup
-search.graphiteMaxPointsPerSeries int
The maximum number of points per series Graphite render API can return (default 1000000)
-search.graphiteStorageStep duration
@ -2963,6 +2983,8 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
-search.resetCacheAuthKey value
Optional authKey for resetting rollup cache via /internal/resetRollupResultCache call
Flag value can be read from the given file when using -search.resetCacheAuthKey=file:///abs/path/to/file or -search.resetCacheAuthKey=file://./relative/path/to/file . Flag value can be read from the given http/https url when using -search.resetCacheAuthKey=http://host/path or -search.resetCacheAuthKey=https://host/path
-search.resetRollupResultCacheOnStartup
Whether to reset rollup result cache on startup. See https://docs.victoriametrics.com/#rollup-result-cache . See also -search.disableCache
-search.setLookbackToStep
Whether to fix lookback interval to 'step' query arg value. If set to true, the query model becomes closer to InfluxDB data model. If set to true, then -search.maxLookback and -search.maxStalenessInterval are ignored
-search.treatDotsAsIsInRegexps
@ -3015,7 +3037,7 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
-streamAggr.keepInput
Whether to keep all the input samples after the aggregation with -streamAggr.config. By default, only aggregated samples are dropped, while the remaining samples are stored in the database. See also -streamAggr.dropInput and https://docs.victoriametrics.com/stream-aggregation.html
-tls
Whether to enable TLS for incoming HTTP requests at -httpListenAddr (aka https). -tlsCertFile and -tlsKeyFile must be set if -tls is set
Whether to enable TLS for incoming HTTP requests at -httpListenAddr (aka https). -tlsCertFile and -tlsKeyFile must be set if -tls is set. See also -mtls
-tlsCertFile string
Path to file with TLS certificate if -tls is set. Prefer ECDSA certs instead of RSA certs as RSA certs are slower. The provided certificate file is automatically re-read every second, so it can be dynamically updated
-tlsCipherSuites array

View file

@ -124,6 +124,7 @@ func requestHandler(w http.ResponseWriter, r *http.Request) bool {
{"api/v1/status/tsdb", "tsdb status page"},
{"api/v1/status/top_queries", "top queries"},
{"api/v1/status/active_queries", "active queries"},
{"-/reload", "reload configuration"},
})
return true
}

View file

@ -8,6 +8,7 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmstorage"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/appmetrics"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/decimal"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompb"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/prometheus"
@ -49,16 +50,8 @@ func selfScraper(scrapeInterval time.Duration) {
var mrs []storage.MetricRow
var labels []prompb.Label
t := time.NewTicker(scrapeInterval)
var currentTimestamp int64
for {
select {
case <-selfScraperStopCh:
t.Stop()
logger.Infof("stopped self-scraping `/metrics` page")
return
case currentTime := <-t.C:
currentTimestamp = currentTime.UnixNano() / 1e6
}
f := func(currentTime time.Time, sendStaleMarkers bool) {
currentTimestamp := currentTime.UnixNano() / 1e6
bb.Reset()
appmetrics.WritePrometheusMetrics(&bb)
s := bytesutil.ToUnsafeString(bb.B)
@ -83,12 +76,27 @@ func selfScraper(scrapeInterval time.Duration) {
mr := &mrs[len(mrs)-1]
mr.MetricNameRaw = storage.MarshalMetricNameRaw(mr.MetricNameRaw[:0], labels)
mr.Timestamp = currentTimestamp
mr.Value = r.Value
if sendStaleMarkers {
mr.Value = decimal.StaleNaN
} else {
mr.Value = r.Value
}
}
if err := vmstorage.AddRows(mrs); err != nil {
logger.Errorf("cannot store self-scraped metrics: %s", err)
}
}
for {
select {
case <-selfScraperStopCh:
f(time.Now(), true)
t.Stop()
logger.Infof("stopped self-scraping `/metrics` page")
return
case currentTime := <-t.C:
f(currentTime, false)
}
}
}
func addLabel(dst []prompb.Label, key, value string) []prompb.Label {

View file

@ -10,6 +10,7 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/utils"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httputils"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
)
@ -93,7 +94,7 @@ func Init(extraParams url.Values) (QuerierBuilder, error) {
logger.Warnf("flag `-datasource.lookback` will be deprecated soon. Please use `-rule.evalDelay` command-line flag instead. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5155 for details.")
}
tr, err := utils.Transport(*addr, *tlsCertFile, *tlsKeyFile, *tlsCAFile, *tlsServerName, *tlsInsecureSkipVerify)
tr, err := httputils.Transport(*addr, *tlsCertFile, *tlsKeyFile, *tlsCAFile, *tlsServerName, *tlsInsecureSkipVerify)
if err != nil {
return nil, fmt.Errorf("failed to create transport: %w", err)
}

View file

@ -11,6 +11,7 @@ import (
"time"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/utils"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httputils"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promauth"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promrelabel"
)
@ -127,7 +128,7 @@ func NewAlertManager(alertManagerURL string, fn AlertURLGenerator, authCfg proma
if authCfg.TLSConfig != nil {
tls = authCfg.TLSConfig
}
tr, err := utils.Transport(alertManagerURL, tls.CertFile, tls.KeyFile, tls.CAFile, tls.ServerName, tls.InsecureSkipVerify)
tr, err := httputils.Transport(alertManagerURL, tls.CertFile, tls.KeyFile, tls.CAFile, tls.ServerName, tls.InsecureSkipVerify)
if err != nil {
return nil, fmt.Errorf("failed to create transport: %w", err)
}

View file

@ -8,6 +8,7 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/datasource"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/utils"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httputils"
)
var (
@ -60,7 +61,7 @@ func Init() (datasource.QuerierBuilder, error) {
if *addr == "" {
return nil, nil
}
tr, err := utils.Transport(*addr, *tlsCertFile, *tlsKeyFile, *tlsCAFile, *tlsServerName, *tlsInsecureSkipVerify)
tr, err := httputils.Transport(*addr, *tlsCertFile, *tlsKeyFile, *tlsCAFile, *tlsServerName, *tlsInsecureSkipVerify)
if err != nil {
return nil, fmt.Errorf("failed to create transport: %w", err)
}

View file

@ -11,7 +11,7 @@ import (
"github.com/golang/snappy"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/utils"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httputils"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
)
@ -30,7 +30,7 @@ func NewDebugClient() (*DebugClient, error) {
return nil, nil
}
t, err := utils.Transport(*addr, *tlsCertFile, *tlsKeyFile, *tlsCAFile, *tlsServerName, *tlsInsecureSkipVerify)
t, err := httputils.Transport(*addr, *tlsCertFile, *tlsKeyFile, *tlsCAFile, *tlsServerName, *tlsInsecureSkipVerify)
if err != nil {
return nil, fmt.Errorf("failed to create transport: %w", err)
}

View file

@ -8,6 +8,7 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/utils"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httputils"
)
var (
@ -64,7 +65,7 @@ func Init(ctx context.Context) (*Client, error) {
return nil, nil
}
t, err := utils.Transport(*addr, *tlsCertFile, *tlsKeyFile, *tlsCAFile, *tlsServerName, *tlsInsecureSkipVerify)
t, err := httputils.Transport(*addr, *tlsCertFile, *tlsKeyFile, *tlsCAFile, *tlsServerName, *tlsInsecureSkipVerify)
if err != nil {
return nil, fmt.Errorf("failed to create transport: %w", err)
}

View file

@ -351,7 +351,7 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
}
promscrapeConfigReloadRequests.Inc()
procutil.SelfSIGHUP()
w.WriteHeader(http.StatusNoContent)
w.WriteHeader(http.StatusOK)
return true
case "/ready":
if rdy := atomic.LoadInt32(&promscrape.PendingScrapeConfigs); rdy > 0 {

View file

@ -29,7 +29,8 @@ import (
)
var (
disableCache = flag.Bool("search.disableCache", false, "Whether to disable response caching. This may be useful during data backfilling")
disableCache = flag.Bool("search.disableCache", false, "Whether to disable response caching. This may be useful when ingesting historical data. "+
"See https://docs.victoriametrics.com/#backfilling . See also -search.resetRollupResultCacheOnStartup")
maxPointsSubqueryPerTimeseries = flag.Int("search.maxPointsSubqueryPerTimeseries", 100e3, "The maximum number of points per series, which can be generated by subquery. "+
"See https://valyala.medium.com/prometheus-subqueries-in-victoriametrics-9b1492b720b3")
maxMemoryPerQuery = flagutil.NewBytes("search.maxMemoryPerQuery", 0, "The maximum amounts of memory a single query may consume. "+

View file

@ -1916,6 +1916,10 @@ func rollupChangesPrometheus(rfa *rollupFuncArg) float64 {
n := 0
for _, v := range values[1:] {
if v != prevValue {
if math.Abs(v-prevValue) < 1e-12*math.Abs(v) {
// This may be precision error. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/767#issuecomment-1650932203
continue
}
n++
prevValue = v
}
@ -1939,6 +1943,10 @@ func rollupChanges(rfa *rollupFuncArg) float64 {
}
for _, v := range values {
if v != prevValue {
if math.Abs(v-prevValue) < 1e-12*math.Abs(v) {
// This may be precision error. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/767#issuecomment-1650932203
continue
}
n++
prevValue = v
}
@ -1967,6 +1975,10 @@ func rollupIncreases(rfa *rollupFuncArg) float64 {
n := 0
for _, v := range values {
if v > prevValue {
if math.Abs(v-prevValue) < 1e-12*math.Abs(v) {
// This may be precision error. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/767#issuecomment-1650932203
continue
}
n++
}
prevValue = v
@ -1998,6 +2010,10 @@ func rollupResets(rfa *rollupFuncArg) float64 {
n := 0
for _, v := range values {
if v < prevValue {
if math.Abs(v-prevValue) < 1e-12*math.Abs(v) {
// This may be precision error. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/767#issuecomment-1650932203
continue
}
n++
}
prevValue = v

View file

@ -30,6 +30,8 @@ var (
"due to time synchronization issues between VictoriaMetrics and data sources. See also -search.disableAutoCacheReset")
disableAutoCacheReset = flag.Bool("search.disableAutoCacheReset", false, "Whether to disable automatic response cache reset if a sample with timestamp "+
"outside -search.cacheTimestampOffset is inserted into VictoriaMetrics")
resetRollupResultCacheOnStartup = flag.Bool("search.resetRollupResultCacheOnStartup", false, "Whether to reset rollup result cache on startup. "+
"See https://docs.victoriametrics.com/#rollup-result-cache . See also -search.disableCache")
)
// ResetRollupResultCacheIfNeeded resets rollup result cache if mrs contains timestamps outside `now - search.cacheTimestampOffset`.
@ -117,7 +119,12 @@ func InitRollupResultCache(cachePath string) {
cacheSize := getRollupResultCacheSize()
var c *workingsetcache.Cache
if len(rollupResultCachePath) > 0 {
logger.Infof("loading rollupResult cache from %q...", rollupResultCachePath)
if *resetRollupResultCacheOnStartup {
logger.Infof("removing rollupResult cache at %q becasue -search.resetRollupResultCacheOnStartup command-line flag is set", rollupResultCachePath)
fs.MustRemoveAll(rollupResultCachePath)
} else {
logger.Infof("loading rollupResult cache from %q...", rollupResultCachePath)
}
c = workingsetcache.Load(rollupResultCachePath, cacheSize)
mustLoadRollupResultCacheKeyPrefix(rollupResultCachePath)
} else {

View file

@ -4857,7 +4857,7 @@
"type": "prometheus",
"uid": "$ds"
},
"description": "Shows the approx time needed to reach 100% of disk capacity for at least one vmstorage node based on the following params:\n* free disk space;\n* row ingestion rate;\n* dedup rate;\n* compression.\n\nUse this panel for capacity planning in order to estimate the time remaining for running out of the disk space.",
"description": "Shows the approx time needed to reach 100% of disk capacity for at least one vmstorage node based on the following params:\n* free disk space;\n* row ingestion rate;\n* compression.\n\nNote: this panel doesn't account for deduplication process.",
"fieldConfig": {
"defaults": {
"color": {
@ -4952,7 +4952,7 @@
"uid": "$ds"
},
"editorMode": "code",
"expr": "min(vm_free_disk_space_bytes{job=~\"$job_storage\", instance=~\"$instance\"} \n/ \nignoring(path) (\n (\n rate(vm_rows_added_to_storage_total{job=~\"$job_storage\", instance=~\"$instance\"}[1d])\n - \n ignoring(type) rate(vm_deduplicated_samples_total{job=~\"$job_storage\", instance=~\"$instance\", type=\"merge\"}[1d])\n ) * scalar(\n sum(vm_data_size_bytes{job=~\"$job_storage\", instance=~\"$instance\", type!~\"indexdb.*\"})\n / \n sum(vm_rows{job=~\"$job_storage\", instance=~\"$instance\", type!~\"indexdb.*\"})\n )\n))",
"expr": "min(vm_free_disk_space_bytes{job=~\"$job_storage\", instance=~\"$instance\"} \n/ \nignoring(path) (\n rate(vm_rows_added_to_storage_total{job=~\"$job_storage\", instance=~\"$instance\"}[1d])\n * scalar(\n sum(vm_data_size_bytes{job=~\"$job_storage\", instance=~\"$instance\", type!~\"indexdb.*\"})\n / \n sum(vm_rows{job=~\"$job_storage\", instance=~\"$instance\", type!~\"indexdb.*\"})\n )\n))",
"format": "time_series",
"interval": "",
"intervalFactor": 1,
@ -9050,7 +9050,7 @@
"type": "prometheus",
"uid": "$ds"
},
"description": "Shows the approx time needed to reach 100% of disk capacity based on the following params:\n* free disk space;\n* row ingestion rate;\n* dedup rate;\n* compression.\n\nUse this panel for capacity planning in order to estimate the time remaining for running out of the disk space.",
"description": "Shows the approx time needed to reach 100% of disk capacity based on the following params:\n* free disk space;\n* row ingestion rate;\n* compression.\n\nNote: this panel doesn't account for deduplication process.",
"fieldConfig": {
"defaults": {
"color": {
@ -9139,7 +9139,7 @@
"uid": "$ds"
},
"editorMode": "code",
"expr": "vm_free_disk_space_bytes{job=~\"$job_storage\", instance=~\"$instance\"} \n/ \nignoring(path) (\n (\n rate(vm_rows_added_to_storage_total{job=~\"$job_storage\", instance=~\"$instance\"}[1d])\n - \n ignoring(type) rate(vm_deduplicated_samples_total{job=~\"$job_storage\", instance=~\"$instance\", type=\"merge\"}[1d])\n ) * scalar(\n sum(vm_data_size_bytes{job=~\"$job_storage\", instance=~\"$instance\", type!~\"indexdb.*\"})\n / \n sum(vm_rows{job=~\"$job_storage\", instance=~\"$instance\", type!~\"indexdb.*\"})\n )\n)",
"expr": "vm_free_disk_space_bytes{job=~\"$job_storage\", instance=~\"$instance\"} \n/ \nignoring(path) (\n rate(vm_rows_added_to_storage_total{job=~\"$job_storage\", instance=~\"$instance\"}[1d])\n * scalar(\n sum(vm_data_size_bytes{job=~\"$job_storage\", instance=~\"$instance\", type!~\"indexdb.*\"})\n / \n sum(vm_rows{job=~\"$job_storage\", instance=~\"$instance\", type!~\"indexdb.*\"})\n )\n)",
"format": "time_series",
"interval": "",
"intervalFactor": 1,

View file

@ -4122,7 +4122,7 @@
"type": "prometheus",
"uid": "$ds"
},
"description": "Shows the time needed to reach the 100% of disk capacity based on the following params:\n* free disk space;\n* row ingestion rate;\n* dedup rate;\n* compression.\n\nUse this panel for capacity planning in order to estimate the time remaining for running out of the disk space.\n\n",
"description": "Shows the approx time needed to reach 100% of disk capacity based on the following params:\n* free disk space;\n* row ingestion rate;\n* compression.\n\nNote: this panel doesn't account for deduplication process.",
"fieldConfig": {
"defaults": {
"color": {
@ -4211,7 +4211,7 @@
"uid": "$ds"
},
"editorMode": "code",
"expr": "vm_free_disk_space_bytes{job=~\"$job\", instance=~\"$instance\"} \n/ ignoring(path) (\n (\n rate(vm_rows_added_to_storage_total{job=~\"$job\", instance=~\"$instance\"}[1d]) \n - ignoring(type) rate(vm_deduplicated_samples_total{job=~\"$job\", instance=~\"$instance\", type=\"merge\"}[1d])\n ) * scalar(\n sum(vm_data_size_bytes{job=~\"$job\", instance=~\"$instance\", type!~\"indexdb.*\"}) \n / sum(vm_rows{job=~\"$job\", instance=~\"$instance\", type!~\"indexdb.*\"})\n )\n )",
"expr": "vm_free_disk_space_bytes{job=~\"$job\", instance=~\"$instance\"} \n/ ignoring(path) (\n rate(vm_rows_added_to_storage_total{job=~\"$job\", instance=~\"$instance\"}[1d]) \n * scalar(\n sum(vm_data_size_bytes{job=~\"$job\", instance=~\"$instance\", type!~\"indexdb.*\"}) \n / sum(vm_rows{job=~\"$job\", instance=~\"$instance\", type!~\"indexdb.*\"})\n )\n )",
"format": "time_series",
"hide": false,
"interval": "",

View file

@ -4858,7 +4858,7 @@
"type": "victoriametrics-datasource",
"uid": "$ds"
},
"description": "Shows the approx time needed to reach 100% of disk capacity for at least one vmstorage node based on the following params:\n* free disk space;\n* row ingestion rate;\n* dedup rate;\n* compression.\n\nUse this panel for capacity planning in order to estimate the time remaining for running out of the disk space.",
"description": "Shows the approx time needed to reach 100% of disk capacity for at least one vmstorage node based on the following params:\n* free disk space;\n* row ingestion rate;\n* compression.\n\nNote: this panel doesn't account for deduplication process.",
"fieldConfig": {
"defaults": {
"color": {
@ -4953,7 +4953,7 @@
"uid": "$ds"
},
"editorMode": "code",
"expr": "min(vm_free_disk_space_bytes{job=~\"$job_storage\", instance=~\"$instance\"} \n/ \nignoring(path) (\n (\n rate(vm_rows_added_to_storage_total{job=~\"$job_storage\", instance=~\"$instance\"}[1d])\n - \n ignoring(type) rate(vm_deduplicated_samples_total{job=~\"$job_storage\", instance=~\"$instance\", type=\"merge\"}[1d])\n ) * scalar(\n sum(vm_data_size_bytes{job=~\"$job_storage\", instance=~\"$instance\", type!~\"indexdb.*\"})\n / \n sum(vm_rows{job=~\"$job_storage\", instance=~\"$instance\", type!~\"indexdb.*\"})\n )\n))",
"expr": "min(vm_free_disk_space_bytes{job=~\"$job_storage\", instance=~\"$instance\"} \n/ \nignoring(path) (\n rate(vm_rows_added_to_storage_total{job=~\"$job_storage\", instance=~\"$instance\"}[1d])\n * scalar(\n sum(vm_data_size_bytes{job=~\"$job_storage\", instance=~\"$instance\", type!~\"indexdb.*\"})\n / \n sum(vm_rows{job=~\"$job_storage\", instance=~\"$instance\", type!~\"indexdb.*\"})\n )\n))",
"format": "time_series",
"interval": "",
"intervalFactor": 1,
@ -9051,7 +9051,7 @@
"type": "victoriametrics-datasource",
"uid": "$ds"
},
"description": "Shows the approx time needed to reach 100% of disk capacity based on the following params:\n* free disk space;\n* row ingestion rate;\n* dedup rate;\n* compression.\n\nUse this panel for capacity planning in order to estimate the time remaining for running out of the disk space.",
"description": "Shows the approx time needed to reach 100% of disk capacity based on the following params:\n* free disk space;\n* row ingestion rate;\n* compression.\n\nNote: this panel doesn't account for deduplication process.",
"fieldConfig": {
"defaults": {
"color": {
@ -9140,7 +9140,7 @@
"uid": "$ds"
},
"editorMode": "code",
"expr": "vm_free_disk_space_bytes{job=~\"$job_storage\", instance=~\"$instance\"} \n/ \nignoring(path) (\n (\n rate(vm_rows_added_to_storage_total{job=~\"$job_storage\", instance=~\"$instance\"}[1d])\n - \n ignoring(type) rate(vm_deduplicated_samples_total{job=~\"$job_storage\", instance=~\"$instance\", type=\"merge\"}[1d])\n ) * scalar(\n sum(vm_data_size_bytes{job=~\"$job_storage\", instance=~\"$instance\", type!~\"indexdb.*\"})\n / \n sum(vm_rows{job=~\"$job_storage\", instance=~\"$instance\", type!~\"indexdb.*\"})\n )\n)",
"expr": "vm_free_disk_space_bytes{job=~\"$job_storage\", instance=~\"$instance\"} \n/ \nignoring(path) (\n rate(vm_rows_added_to_storage_total{job=~\"$job_storage\", instance=~\"$instance\"}[1d])\n * scalar(\n sum(vm_data_size_bytes{job=~\"$job_storage\", instance=~\"$instance\", type!~\"indexdb.*\"})\n / \n sum(vm_rows{job=~\"$job_storage\", instance=~\"$instance\", type!~\"indexdb.*\"})\n )\n)",
"format": "time_series",
"interval": "",
"intervalFactor": 1,

View file

@ -4123,7 +4123,7 @@
"type": "victoriametrics-datasource",
"uid": "$ds"
},
"description": "Shows the time needed to reach the 100% of disk capacity based on the following params:\n* free disk space;\n* row ingestion rate;\n* dedup rate;\n* compression.\n\nUse this panel for capacity planning in order to estimate the time remaining for running out of the disk space.\n\n",
"description": "Shows the approx time needed to reach 100% of disk capacity based on the following params:\n* free disk space;\n* row ingestion rate;\n* compression.\n\nNote: this panel doesn't account for deduplication process.",
"fieldConfig": {
"defaults": {
"color": {
@ -4212,7 +4212,7 @@
"uid": "$ds"
},
"editorMode": "code",
"expr": "vm_free_disk_space_bytes{job=~\"$job\", instance=~\"$instance\"} \n/ ignoring(path) (\n (\n rate(vm_rows_added_to_storage_total{job=~\"$job\", instance=~\"$instance\"}[1d]) \n - ignoring(type) rate(vm_deduplicated_samples_total{job=~\"$job\", instance=~\"$instance\", type=\"merge\"}[1d])\n ) * scalar(\n sum(vm_data_size_bytes{job=~\"$job\", instance=~\"$instance\", type!~\"indexdb.*\"}) \n / sum(vm_rows{job=~\"$job\", instance=~\"$instance\", type!~\"indexdb.*\"})\n )\n )",
"expr": "vm_free_disk_space_bytes{job=~\"$job\", instance=~\"$instance\"} \n/ ignoring(path) (\n rate(vm_rows_added_to_storage_total{job=~\"$job\", instance=~\"$instance\"}[1d]) \n * scalar(\n sum(vm_data_size_bytes{job=~\"$job\", instance=~\"$instance\", type!~\"indexdb.*\"}) \n / sum(vm_rows{job=~\"$job\", instance=~\"$instance\", type!~\"indexdb.*\"})\n )\n )",
"format": "time_series",
"hide": false,
"interval": "",

View file

@ -7,32 +7,24 @@ and [Grafana](https://grafana.com/).
For starting the docker-compose environment ensure you have docker installed and running and access to the Internet.
**All commands should be executed from the root directory of [the repo](https://github.com/VictoriaMetrics/VictoriaMetrics).**
To spin-up environment with VictoriaMetrics components run one of the following commands:
```
make docker-single-up # start single server VictoriaMetrics
or
make docker-cluster-up # start cluster VictoriaMetrics
```
* [VictoriaMetrics single server](#victoriametrics-single-server)
* [VictoriaMetrics cluster](#victoriametrics-cluster)
* [vmagent](#vmagent)
* [vmauth](#vmauth)
* [vmalert](#vmalert)
* [alertmanager](#alertmanager)
* [Alerts](#alerts)
* [Grafana](#grafana)
* [VictoriaLogs](#victoriaLogs-server)
To shut down the docker-compose environment run one the following commands:
```
make docker-single-down # shutdown single server VictoriaMetrics
or
make docker-cluster-down # shutdown cluster VictoriaMetrics
```
Optionally, environment with [VictoriaMetrics Grafana datasource](https://github.com/VictoriaMetrics/grafana-datasource)
can be started with the following commands:
```
make docker-single-vm-datasource-up # start single server
make docker-single-vm-datasource-down # shut down single server
make docker-cluster-vm-datasource-up # start cluster
make docker-cluster-vm-datasource-down # shutdown cluster
```
## VictoriaMetrics single server
To spin-up environment with VictoriaMetrics single server run the following command:
```
make docker-single-up
```
VictoriaMetrics will be accessible on the following ports:
* `--graphiteListenAddr=:2003`
@ -53,9 +45,19 @@ use link [http://localhost:8428/vmui](http://localhost:8428/vmui).
To access `vmalert` use link [http://localhost:8428/vmalert](http://localhost:8428/vmalert/).
To shutdown environment execute the following command:
```
make docker-single-down
```
## VictoriaMetrics cluster
To spin-up environment with VictoriaMetrics cluster run the following command:
```
make docker-cluster-up
```
VictoriaMetrics cluster environment consists of `vminsert`, `vmstorage` and `vmselect` components.
`vminsert` has exposed port `:8480`, access to `vmselect` components goes through `vmauth` on port `:8427`,
and the rest of components are available only inside the environment.
@ -77,6 +79,11 @@ use link [http://localhost:8427/select/0/prometheus/vmui/](http://localhost:8427
To access `vmalert` use link [http://localhost:8427/select/0/prometheus/vmalert/](http://localhost:8427/select/0/prometheus/vmalert/).
To shutdown environment execute the following command:
```
make docker-cluster-down
```
## vmagent
vmagent is used for scraping and pushing time series to VictoriaMetrics instance.
@ -127,9 +134,15 @@ Grafana is provisioned by default with following entities:
Remember to pick `VictoriaMetrics - cluster` datasource when viewing `VictoriaMetrics - cluster` dashboard.
If environment was started via `docker-single-vm-datasource-up` or `docker-cluster-vm-datasource-up`, then
Grafana will have [VictoriaMetrics Grafana datasource](https://github.com/VictoriaMetrics/grafana-datasource)
installed by default.
Optionally, environment with [VictoriaMetrics Grafana datasource](https://github.com/VictoriaMetrics/grafana-datasource)
can be started with the following commands:
```
make docker-single-vm-datasource-up # start single server
make docker-single-vm-datasource-down # shut down single server
make docker-cluster-vm-datasource-up # start cluster
make docker-cluster-vm-datasource-down # shutdown cluster
```
## Alerts

View file

@ -13,10 +13,7 @@ groups:
expr: |
vm_free_disk_space_bytes / ignoring(path)
(
(
rate(vm_rows_added_to_storage_total[1d]) -
ignoring(type) rate(vm_deduplicated_samples_total{type="merge"}[1d])
)
rate(vm_rows_added_to_storage_total[1d])
* scalar(
sum(vm_data_size_bytes{type!~"indexdb.*"}) /
sum(vm_rows{type!~"indexdb.*"})

View file

@ -13,10 +13,7 @@ groups:
expr: |
vm_free_disk_space_bytes / ignoring(path)
(
(
rate(vm_rows_added_to_storage_total[1d]) -
ignoring(type) rate(vm_deduplicated_samples_total{type="merge"}[1d])
)
rate(vm_rows_added_to_storage_total[1d])
* scalar(
sum(vm_data_size_bytes{type!~"indexdb.*"}) /
sum(vm_rows{type!~"indexdb.*"})

View file

@ -31,17 +31,25 @@ The sandbox cluster installation is running under the constant load generated by
* SECURITY: upgrade Go builder from Go1.21.6 to Go1.21.7. See [the list of issues addressed in Go1.21.7](https://github.com/golang/go/issues?q=milestone%3AGo1.21.7+label%3ACherryPickApproved).
* FEATURE: all VictoriaMetrics components: add support for TLS client certificate verification at `-httpListenAddr` (aka [mTLS](https://en.wikipedia.org/wiki/Mutual_authentication)). See [these docs](https://docs.victoriametrics.com/#mtls-protection). See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5458).
* FEATURE: all VictoriaMetrics components: add support for empty command flag values in short array notation. For example, `-remoteWrite.sendTimeout=',20s,'` specifies three `-remoteWrite.sendTimeout` values - the first and the last ones are default values (`30s` in this case), while the second one is `20s`.
* FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html) and [single-node VictoriaMetrics](https://docs.victoriametrics.com): add support for data ingestion via [DataDog lambda extension](https://docs.datadoghq.com/serverless/libraries_integrations/extension/) aka `/api/beta/sketches` endpoint. See [these docs](https://docs.victoriametrics.com/#how-to-send-data-from-datadog-agent) and [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3091). Thanks to @AndrewChubatiuk for [the pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/5584).
* FEATURE: [VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html): add `-disableReroutingOnUnavailable` command-line flag, which can be used for reducing resource usage spikes at `vmstorage` nodes during rolling restart. Thanks to @Muxa1L for [the pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/5713).
* FEATURE: [VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html): add `-disableReroutingOnUnavailable` command-line flag to `vminsert`, which can be used for reducing resource usage spikes at `vmstorage` nodes during rolling restart. See [these docs](https://docs.victoriametrics.com/cluster-victoriametrics/#improving-re-routing-performance-during-restart). Thanks to @Muxa1L for [the pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/5713).
* FEATURE: add `-search.resetRollupResultCacheOnStartup` command-line flag for resetting [query cache](https://docs.victoriametrics.com/#rollup-result-cache) on startup. See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/834).
* FEATURE: [dashboards/vmagent](https://grafana.com/grafana/dashboards/12683): add `Targets scraped/s` stat panel showing the number of targets scraped by the vmagent per-second.
* FEATURE: [dashboards/all](https://grafana.com/orgs/victoriametrics): add new panel `CPU spent on GC`. It should help identifying cases when too much CPU is spent on garbage collection, and advice users on how this can be addressed.
* FEATURE: [vmalert](https://docs.victoriametrics.com/#vmalert): support filtering alerting and recording rules using `type` parameter in API `/vmalert/groups`, `/rules`, `/vmalert/api/v1/rules` and `/api/v1/rules`. See [the pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/5749) by @victoramsantos.
* FEATURE: [vmbackup](https://docs.victoriametrics.com/vmbackup.html): support client-side TLS configuration for creating and deleting snapshots via `-snapshot.tls*` cmd-line flags. See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5724). Thanks to @khushijain21 for the [pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/5738).
* BUGFIX: [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html): properly propagate [label filters](https://docs.victoriametrics.com/keyconcepts/#filtering) from multiple arguments passed to [aggregate functions](https://docs.victoriametrics.com/metricsql/#aggregate-functions). For example, `sum({job="foo"}, {job="bar"}) by (job) + a` was improperly optimized to `sum({job="foo"}, {job="bar"}) by (job) + a{job="foo"}` before being executed. This could lead to unexpected results. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5604).
* BUGFIX: [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html): properly handle precision errors when calculating [changes](https://docs.victoriametrics.com/metricsql/#changes), [changes_prometheus](https://docs.victoriametrics.com/metricsql/#changes_prometheus), [increases_over_time](https://docs.victoriametrics.com/metricsql/#increases_over_time) and [resets](https://docs.victoriametrics.com/metricsql/#resets) functions. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/767).
* BUGFIX: all VictoriaMetrics components: consistently return 200 http status code from [`/-/reload` endpoint](https://docs.victoriametrics.com/vmagent/#configuration-update). Previously [single-node VictoriaMetrics](https://docs.victoriametrics.com/) was returning 204 http status code. See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5774).
* BUGFIX: properly store [staleness markers](https://docs.victoriametrics.com/vmagent/#prometheus-staleness-markers) for [self-scraped metrics](https://docs.victoriametrics.com/#monitoring) on single-node VictoriaMetrics shutdown. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/943).
* BUGFIX: prevent from possible `too big indexBlockSize` panic when samples with too long label values (~64Kb) are ingested into VictoriaMetrics.
* BUGFIX: [vmui](https://docs.victoriametrics.com/#vmui): fix the graph dragging for Firefox and Safari. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5764).
* BUGFIX: [vmui](https://docs.victoriametrics.com/#vmui): fix handling invalid timezone. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5732).
* BUGFIX: [vmui](https://docs.victoriametrics.com/#vmui): fix the bug where the select does not open. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5728).
* BUGFIX: [vmui](https://docs.victoriametrics.com/#vmui): clear entered text in select after selecting a value. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5727).
* BUGFIX: [dashboards](https://grafana.com/orgs/victoriametrics): update `Storage full ETA` panels for Single-node and Cluster dashboards to prevent them from showing negative or blank results caused by increase of deduplicated samples. Deduplicated samples were part of the expression to provide a better estimate for disk usage, but due to sporadic nature of [deduplication](https://docs.victoriametrics.com/#deduplication) in VictoriaMetrics it rather produced skewed results. See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/5747).
## [v1.97.1](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.97.1)

View file

@ -597,7 +597,15 @@ The cluster works in the following way when some of `vmstorage` nodes are unavai
It is also possible to configure independent replication factor per distinct `vmstorage` groups - see [these docs](#vmstorage-groups-at-vmselect).
`vmselect` doesn't serve partial responses for API handlers returning raw datapoints - [`/api/v1/export*` endpoints](https://docs.victoriametrics.com/#how-to-export-time-series), since users usually expect this data is always complete.
`vmselect` doesn't serve partial responses for API handlers returning [raw datapoints](https://docs.victoriametrics.com/keyconcepts/#raw-samples),
since users usually expect this data is always complete. The following handlers return raw samples:
- [`/api/v1/export*` endpoints](https://docs.victoriametrics.com/#how-to-export-time-series)
- [`/api/v1/query`](https://docs.victoriametrics.com/url-examples/#apiv1query) when the `query` contains [series selector](https://docs.victoriametrics.com/keyconcepts/#filtering)
ending with some duration in square brackets. For example, `/api/v1/query?query=up[1h]&time=2024-01-02T03:00:00Z`.
This query returns [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples) for [time series](https://docs.victoriametrics.com/keyconcepts/#time-series)
with the `up` name on the time range `(2024-01-02T02:00:00 .. 2024-01-02T03:00:00]`. See [this article](https://valyala.medium.com/analyzing-prometheus-data-with-external-tools-5f3e5e147639)
for details.
Data replication can be used for increasing storage durability. See [these docs](#replication-and-data-safety) for details.
@ -1375,7 +1383,7 @@ Below is the output for `/path/to/vmselect -help`:
-search.denyPartialResponse
Whether to deny partial responses if a part of -storageNode instances fail to perform queries; this trades availability over consistency; see also -search.maxQueryDuration
-search.disableCache
Whether to disable response caching. This may be useful during data backfilling
Whether to disable response caching. This may be useful when ingesting historical data. See https://docs.victoriametrics.com/#backfilling . See also -search.resetRollupResultCacheOnStartup
-search.graphiteMaxPointsPerSeries int
The maximum number of points per series Graphite render API can return (default 1000000)
-search.graphiteStorageStep duration
@ -1455,6 +1463,8 @@ Below is the output for `/path/to/vmselect -help`:
-search.resetCacheAuthKey value
Optional authKey for resetting rollup cache via /internal/resetRollupResultCache call
Flag value can be read from the given file when using -search.resetCacheAuthKey=file:///abs/path/to/file or -search.resetCacheAuthKey=file://./relative/path/to/file . Flag value can be read from the given http/https url when using -search.resetCacheAuthKey=http://host/path or -search.resetCacheAuthKey=https://host/path
-search.resetRollupResultCacheOnStartup
Whether to reset rollup result cache on startup. See https://docs.victoriametrics.com/#rollup-result-cache . See also -search.disableCache
-search.setLookbackToStep
Whether to fix lookback interval to 'step' query arg value. If set to true, the query model becomes closer to InfluxDB data model. If set to true, then -search.maxLookback and -search.maxStalenessInterval are ignored
-search.skipSlowReplicas

View file

@ -1541,7 +1541,7 @@ Set HTTP request header `Content-Encoding: gzip` when sending gzip-compressed da
VictoriaMetrics accepts data in JSON line format at [/api/v1/import](#how-to-import-data-in-json-line-format)
and exports data in this format at [/api/v1/export](#how-to-export-data-in-json-line-format).
The format follows [JSON streaming concept](http://ndjson.org/), e.g. each line contains JSON object with metrics data in the following format:
The format follows [JSON streaming concept](https://jsonlines.org/), e.g. each line contains JSON object with metrics data in the following format:
```json
{
@ -2313,6 +2313,21 @@ Sometimes it is needed to remove such caches on the next startup. This can be do
In this case VictoriaMetrics will automatically remove all the caches on the next start.
See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1447) for details.
It is also possible removing [rollup result cache](#rollup-result-cache) on startup by passing `-search.resetRollupResultCacheOnStartup` command-line flag to VictoriaMetrics.
## Rollup result cache
VictoriaMetrics caches query reponses by default. This allows increasing performance for repated queries
to [`/api/v1/query`](https://docs.victoriametrics.com/keyconcepts/#instant-query) and [`/api/v1/query_range`](https://docs.victoriametrics.com/keyconcepts/#range-query)
with the increasing `time`, `start` and `end` query args.
This cache may work incorrectly when ingesting historical data into VictoriaMetrics. See [these docs](#backfilling) for details.
The rollup cache can be disabled either globally by running VictoriaMetrics with `-search.disableCache` command-line flag
or on a per-query basis by passing `nocache=1` query arg to `/api/v1/query` and `/api/v1/query_range`.
See also [cache removal docs](#cache-removal).
## Cache tuning
VictoriaMetrics uses various in-memory caches for faster data ingestion and query performance.
@ -2377,11 +2392,12 @@ VictoriaMetrics accepts historical data in arbitrary order of time via [any supp
See [how to backfill data with recording rules in vmalert](https://docs.victoriametrics.com/vmalert.html#rules-backfilling).
Make sure that configured `-retentionPeriod` covers timestamps for the backfilled data.
It is recommended disabling query cache with `-search.disableCache` command-line flag when writing
It is recommended disabling [query cache](#rollup-result-cache) with `-search.disableCache` command-line flag when writing
historical data with timestamps from the past, since the cache assumes that the data is written with
the current timestamps. Query cache can be enabled after the backfilling is complete.
An alternative solution is to query [/internal/resetRollupResultCache](https://docs.victoriametrics.com/url-examples.html#internalresetrollupresultcache) handler after the backfilling is complete. This will reset the query cache, which could contain incomplete data cached during the backfilling.
An alternative solution is to query [/internal/resetRollupResultCache](https://docs.victoriametrics.com/url-examples.html#internalresetrollupresultcache)
after the backfilling is complete. This will reset the [query cache](#rollup-result-cache), which could contain incomplete data cached during the backfilling.
Yet another solution is to increase `-search.cacheTimestampOffset` flag value in order to disable caching
for data with timestamps close to the current time. Single-node VictoriaMetrics automatically resets response
@ -2725,6 +2741,10 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
-metricsAuthKey value
Auth key for /metrics endpoint. It must be passed via authKey query arg. It overrides httpAuth.* settings
Flag value can be read from the given file when using -metricsAuthKey=file:///abs/path/to/file or -metricsAuthKey=file://./relative/path/to/file . Flag value can be read from the given http/https url when using -metricsAuthKey=http://host/path or -metricsAuthKey=https://host/path
-mtls
Whether to require valid client certificate for https requests to -httpListenAddr . This flag works only if -tls flag is set. See also -mtlsCAFile . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/enterprise.html
-mtlsCAFile string
Optional path to TLS Root CA for verifying client certificates when -mtls is enabled. By default the host system TLS Root CA is used for client certificate verification. This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/enterprise.html
-newrelic.maxInsertRequestSize size
The maximum size in bytes of a single NewRelic request to /newrelic/infra/v2/metrics/events/bulk
Supports the following optional suffixes for size values: KB, MB, GB, TB, KiB, MiB, GiB, TiB (default 67108864)
@ -2882,7 +2902,7 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
-search.disableAutoCacheReset
Whether to disable automatic response cache reset if a sample with timestamp outside -search.cacheTimestampOffset is inserted into VictoriaMetrics
-search.disableCache
Whether to disable response caching. This may be useful during data backfilling
Whether to disable response caching. This may be useful when ingesting historical data. See https://docs.victoriametrics.com/#backfilling . See also -search.resetRollupResultCacheOnStartup
-search.graphiteMaxPointsPerSeries int
The maximum number of points per series Graphite render API can return (default 1000000)
-search.graphiteStorageStep duration
@ -2966,6 +2986,8 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
-search.resetCacheAuthKey value
Optional authKey for resetting rollup cache via /internal/resetRollupResultCache call
Flag value can be read from the given file when using -search.resetCacheAuthKey=file:///abs/path/to/file or -search.resetCacheAuthKey=file://./relative/path/to/file . Flag value can be read from the given http/https url when using -search.resetCacheAuthKey=http://host/path or -search.resetCacheAuthKey=https://host/path
-search.resetRollupResultCacheOnStartup
Whether to reset rollup result cache on startup. See https://docs.victoriametrics.com/#rollup-result-cache . See also -search.disableCache
-search.setLookbackToStep
Whether to fix lookback interval to 'step' query arg value. If set to true, the query model becomes closer to InfluxDB data model. If set to true, then -search.maxLookback and -search.maxStalenessInterval are ignored
-search.treatDotsAsIsInRegexps
@ -3018,7 +3040,7 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
-streamAggr.keepInput
Whether to keep all the input samples after the aggregation with -streamAggr.config. By default, only aggregated samples are dropped, while the remaining samples are stored in the database. See also -streamAggr.dropInput and https://docs.victoriametrics.com/stream-aggregation.html
-tls
Whether to enable TLS for incoming HTTP requests at -httpListenAddr (aka https). -tlsCertFile and -tlsKeyFile must be set if -tls is set
Whether to enable TLS for incoming HTTP requests at -httpListenAddr (aka https). -tlsCertFile and -tlsKeyFile must be set if -tls is set. See also -mtls
-tlsCertFile string
Path to file with TLS certificate if -tls is set. Prefer ECDSA certs instead of RSA certs as RSA certs are slower. The provided certificate file is automatically re-read every second, so it can be dynamically updated
-tlsCipherSuites array

View file

@ -1549,7 +1549,7 @@ Set HTTP request header `Content-Encoding: gzip` when sending gzip-compressed da
VictoriaMetrics accepts data in JSON line format at [/api/v1/import](#how-to-import-data-in-json-line-format)
and exports data in this format at [/api/v1/export](#how-to-export-data-in-json-line-format).
The format follows [JSON streaming concept](http://ndjson.org/), e.g. each line contains JSON object with metrics data in the following format:
The format follows [JSON streaming concept](https://jsonlines.org/), e.g. each line contains JSON object with metrics data in the following format:
```json
{
@ -2321,6 +2321,21 @@ Sometimes it is needed to remove such caches on the next startup. This can be do
In this case VictoriaMetrics will automatically remove all the caches on the next start.
See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1447) for details.
It is also possible removing [rollup result cache](#rollup-result-cache) on startup by passing `-search.resetRollupResultCacheOnStartup` command-line flag to VictoriaMetrics.
## Rollup result cache
VictoriaMetrics caches query reponses by default. This allows increasing performance for repated queries
to [`/api/v1/query`](https://docs.victoriametrics.com/keyconcepts/#instant-query) and [`/api/v1/query_range`](https://docs.victoriametrics.com/keyconcepts/#range-query)
with the increasing `time`, `start` and `end` query args.
This cache may work incorrectly when ingesting historical data into VictoriaMetrics. See [these docs](#backfilling) for details.
The rollup cache can be disabled either globally by running VictoriaMetrics with `-search.disableCache` command-line flag
or on a per-query basis by passing `nocache=1` query arg to `/api/v1/query` and `/api/v1/query_range`.
See also [cache removal docs](#cache-removal).
## Cache tuning
VictoriaMetrics uses various in-memory caches for faster data ingestion and query performance.
@ -2385,11 +2400,12 @@ VictoriaMetrics accepts historical data in arbitrary order of time via [any supp
See [how to backfill data with recording rules in vmalert](https://docs.victoriametrics.com/vmalert.html#rules-backfilling).
Make sure that configured `-retentionPeriod` covers timestamps for the backfilled data.
It is recommended disabling query cache with `-search.disableCache` command-line flag when writing
It is recommended disabling [query cache](#rollup-result-cache) with `-search.disableCache` command-line flag when writing
historical data with timestamps from the past, since the cache assumes that the data is written with
the current timestamps. Query cache can be enabled after the backfilling is complete.
An alternative solution is to query [/internal/resetRollupResultCache](https://docs.victoriametrics.com/url-examples.html#internalresetrollupresultcache) handler after the backfilling is complete. This will reset the query cache, which could contain incomplete data cached during the backfilling.
An alternative solution is to query [/internal/resetRollupResultCache](https://docs.victoriametrics.com/url-examples.html#internalresetrollupresultcache)
after the backfilling is complete. This will reset the [query cache](#rollup-result-cache), which could contain incomplete data cached during the backfilling.
Yet another solution is to increase `-search.cacheTimestampOffset` flag value in order to disable caching
for data with timestamps close to the current time. Single-node VictoriaMetrics automatically resets response
@ -2733,6 +2749,10 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
-metricsAuthKey value
Auth key for /metrics endpoint. It must be passed via authKey query arg. It overrides httpAuth.* settings
Flag value can be read from the given file when using -metricsAuthKey=file:///abs/path/to/file or -metricsAuthKey=file://./relative/path/to/file . Flag value can be read from the given http/https url when using -metricsAuthKey=http://host/path or -metricsAuthKey=https://host/path
-mtls
Whether to require valid client certificate for https requests to -httpListenAddr . This flag works only if -tls flag is set. See also -mtlsCAFile . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/enterprise.html
-mtlsCAFile string
Optional path to TLS Root CA for verifying client certificates when -mtls is enabled. By default the host system TLS Root CA is used for client certificate verification. This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/enterprise.html
-newrelic.maxInsertRequestSize size
The maximum size in bytes of a single NewRelic request to /newrelic/infra/v2/metrics/events/bulk
Supports the following optional suffixes for size values: KB, MB, GB, TB, KiB, MiB, GiB, TiB (default 67108864)
@ -2890,7 +2910,7 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
-search.disableAutoCacheReset
Whether to disable automatic response cache reset if a sample with timestamp outside -search.cacheTimestampOffset is inserted into VictoriaMetrics
-search.disableCache
Whether to disable response caching. This may be useful during data backfilling
Whether to disable response caching. This may be useful when ingesting historical data. See https://docs.victoriametrics.com/#backfilling . See also -search.resetRollupResultCacheOnStartup
-search.graphiteMaxPointsPerSeries int
The maximum number of points per series Graphite render API can return (default 1000000)
-search.graphiteStorageStep duration
@ -2974,6 +2994,8 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
-search.resetCacheAuthKey value
Optional authKey for resetting rollup cache via /internal/resetRollupResultCache call
Flag value can be read from the given file when using -search.resetCacheAuthKey=file:///abs/path/to/file or -search.resetCacheAuthKey=file://./relative/path/to/file . Flag value can be read from the given http/https url when using -search.resetCacheAuthKey=http://host/path or -search.resetCacheAuthKey=https://host/path
-search.resetRollupResultCacheOnStartup
Whether to reset rollup result cache on startup. See https://docs.victoriametrics.com/#rollup-result-cache . See also -search.disableCache
-search.setLookbackToStep
Whether to fix lookback interval to 'step' query arg value. If set to true, the query model becomes closer to InfluxDB data model. If set to true, then -search.maxLookback and -search.maxStalenessInterval are ignored
-search.treatDotsAsIsInRegexps
@ -3026,7 +3048,7 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
-streamAggr.keepInput
Whether to keep all the input samples after the aggregation with -streamAggr.config. By default, only aggregated samples are dropped, while the remaining samples are stored in the database. See also -streamAggr.dropInput and https://docs.victoriametrics.com/stream-aggregation.html
-tls
Whether to enable TLS for incoming HTTP requests at -httpListenAddr (aka https). -tlsCertFile and -tlsKeyFile must be set if -tls is set
Whether to enable TLS for incoming HTTP requests at -httpListenAddr (aka https). -tlsCertFile and -tlsKeyFile must be set if -tls is set. See also -mtls
-tlsCertFile string
Path to file with TLS certificate if -tls is set. Prefer ECDSA certs instead of RSA certs as RSA certs are slower. The provided certificate file is automatically re-read every second, so it can be dynamically updated
-tlsCipherSuites array

View file

@ -35,7 +35,7 @@ See also:
VictoriaLogs supports the following data ingestion HTTP APIs:
- Elasticsearch bulk API. See [these docs](#elasticsearch-bulk-api).
- JSON stream API aka [ndjson](http://ndjson.org/). See [these docs](#json-stream-api).
- JSON stream API aka [ndjson](https://jsonlines.org/). See [these docs](#json-stream-api).
- Loki JSON API. See [these docs](#loki-json-api).
VictoriaLogs accepts optional [HTTP parameters](#http-parameters) at data ingestion HTTP APIs.
@ -94,7 +94,7 @@ See also:
### JSON stream API
VictoriaLogs accepts JSON line stream aka [ndjson](http://ndjson.org/) at `http://localhost:9428/insert/jsonline` endpoint.
VictoriaLogs accepts JSON line stream aka [ndjson](https://jsonlines.org/) at `http://localhost:9428/insert/jsonline` endpoint.
The following command pushes multiple log lines to VictoriaLogs:

View file

@ -44,7 +44,7 @@ See [LogsQL docs](https://docs.victoriametrics.com/VictoriaLogs/LogsQL.html) for
The `query` arg must be properly encoded with [percent encoding](https://en.wikipedia.org/wiki/URL_encoding) when passing it to `curl`
or similar tools.
The `/select/logsql/query` endpoint returns [a stream of JSON lines](http://ndjson.org/),
The `/select/logsql/query` endpoint returns [a stream of JSON lines](https://jsonlines.org/),
where each line contains JSON-encoded log entry in the form `{field1="value1",...,fieldN="valueN"}`.
Example response:

View file

@ -16,7 +16,7 @@ aliases:
## Next release
- TODO
- Remove deprecated autoscaling/v2beta1 HPA objects, previously operator still use it for k8s 1.25. See [this issue](https://github.com/VictoriaMetrics/operator/issues/864) for details.
<a name="v0.41.1"></a>
## [v0.41.1](https://github.com/VictoriaMetrics/operator/releases/tag/v0.41.1) - 1 Feb 2024

View file

@ -10,7 +10,7 @@ menu:
<!-- this doc autogenerated - don't edit it manually -->
# Auto Generated vars for package config
updated at Mon Feb 5 11:34:25 UTC 2024
updated at Wed Feb 7 07:43:09 UTC 2024
| varible name | variable default value | variable required | variable description |

View file

@ -28,6 +28,9 @@ The relabeling is mostly used for the following tasks:
* Removing prefixes from target label names. See [how to remove prefixes from target label names](#how-to-remove-prefixes-from-target-label-names).
* Removing some labels from discovered targets. See [how to remove labels from targets](#how-to-remove-labels-from-targets).
* Dropping some metrics during scape. See [how to drop metrics during scrape](#how-to-drop-metrics-during-scrape).
* Renaming scraped metrics. See [how to rename scraped metrics](#how-to-rename-scraped-metrics).
* Adding labels to scraped metrics. See [how to add labels to scraped metrics](#how-to-add-labels-to-scraped-metrics).
* Changing label values in scraped metrics. See [how to change label values in scraped metrics](#how-to-change-label-values-in-scraped-metrics).
* Removing some labels from scraped metrics. See [how to remove labels from scraped metrics](#how-to-remove-labels-from-scraped-metrics).
* Removing some labels from metrics matching some [series selector](https://prometheus.io/docs/prometheus/latest/querying/basics/#time-series-selectors).
See [how to remove labels from metrics subset](#how-to-remove-labels-from-metrics-subset).
@ -54,7 +57,125 @@ scrape_configs:
regex: "foo_.*"
```
See also [how to remove labels from scraped metrics](#how-to-remove-labels-from-scraped-metrics).
See also:
- [how to remove labels from scraped metrics](#how-to-remove-labels-from-scraped-metrics)
- [useful tips for metric relabeling](#useful-tips-for-metric-relabeling)
## How to rename scraped metrics
Metric name is a regular label with special name - `__name__` (see [these docs](https://docs.victoriametrics.com/keyconcepts/#labels)).
So renaming of metric name is performed in the same way as changing label value.
Let's look at a few examples.
The following config renames `foo` metric to `bar` across all the scraped metrics, while leaving other metric names as is:
```yaml
scrape_configs:
- job_name: test
static_configs:
- targets: [host123]
metric_relabel_configs:
- if: 'foo'
replacement: bar
target_label: __name__
```
The following config renames metrics starting from `foo_` to metrics starting from `bar_` across all the scraped metrics. For example, `foo_count` is renamed to `bar_count`:
```yaml
scrape_configs:
- job_name: test
static_configs:
- targets: [host123]
metric_relabel_configs:
- source_labels: [__name__]
regex: 'foo_(.*)'
replacement: bar_$1
target_label: __name__
```
The following config replaces all the `-` chars in metric names with `_` chars across all the scraped metrics. For example, `foo-bar-baz` is renamed to `foo_bar_baz`:
```yaml
scrape_configs:
- job_name: test
static_configs:
- targets: [host123]
metric_relabel_configs:
- source_labels: [__name__]
action: replace_all
regex: '-'
replacement: '_'
target_label: __name__
```
See also [useful tips for metric relabeling](#useful-tips-for-metric-relabeling).
## How to add labels to scraped metrics
The following config sets `foo="bar"` [label](https://docs.victoriametrics.com/keyconcepts/#labels) across all the scraped metrics:
```yaml
scrape_configs:
- job_name: test
static_configs:
- targets: [host123]
metric_relabel_configs:
- target_label: foo
replacement: bar
```
The following config sets `foo="bar"` label only for metrics matching `{job=~"my-app-.*",env!="dev"}` [series selector](https://docs.victoriametrics.com/keyconcepts/#filtering):
```yaml
scrape_configs:
- job_name: test
static_configs:
- targets: [host123]
metric_relabel_configs:
- if: '{job=~"my-app-.*",env!="dev"}'
target_label: foo
replacement: bar
```
See also [useful tips for metric relabeling](#useful-tips-for-metric-relabeling).
## How to change label values in scraped metrics
The following config adds `foo_` prefix to all the values of `job` label across all the scraped metrics:
```yaml
scrape_configs:
- job_name: test
static_configs:
- targets: [host123]
metric_relabel_configs:
- source_labels: [job]
target_label: job
replacement: foo_$1
```
The following config adds `foo_` prefix to `job` label values only for metrics
matching `{job=~"my-app-.*",env!="dev"}` [series selector](https://docs.victoriametrics.com/keyconcepts/#filtering):
```yaml
scrape_configs:
- job_name: test
static_configs:
- targets: [host123]
metric_relabel_configs:
- if: '{job=~"my-app-.*",env!="dev"}'
source_labels: [job]
target_label: job
replacement: foo_$1
```
See also [useful tips for metric relabeling](#useful-tips-for-metric-relabeling).
## How to remove labels from scraped metrics

View file

@ -729,7 +729,7 @@ Additionally, the `action: graphite` relabeling rules usually work much faster t
## Relabel debug
`vmagent` and [single-node VictoriaMetrics](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html)
`vmagent` and [single-node VictoriaMetrics](https://docs.victoriametrics.com/#how-to-scrape-prometheus-exporters-such-as-node-exporter)
provide the following tools for debugging target-level and metric-level relabeling:
- Target-level debugging (e.g. `relabel_configs` section at [scrape_configs](https://docs.victoriametrics.com/sd_configs.html#scrape_configs))
@ -751,6 +751,45 @@ provide the following tools for debugging target-level and metric-level relabeli
The link is unavailable if `vmagent` runs with `-promscrape.dropOriginalLabels` command-line flag.
The opened page shows step-by-step results for the actual metric relabeling rules applied to the given target labels.
See also [debugging scrape targets](#debugging-scrape-targets).
## Debugging scrape targets
`vmagent` and [single-node VictoriaMetrics](https://docs.victoriametrics.com/#how-to-scrape-prometheus-exporters-such-as-node-exporter)
provide the following tools for debugging scrape targets:
- `http://vmagent:8429/targets` page, which contains information about all the targets, which are scraped at the moment.
This page helps answering the following questions:
- **Why some targets cannot be scraped?** The `last error` column contains the reason why the given target cannot be scraped.
You can also click the `endpoint` link in order open the target url in your browser.
You can also click the `response` link in order to open the target url on behalf of `vmagent`. This may be helpful when `vmagent`
is located in some isolated network.
- **Which labels the particular target has?** The `labels` column shows per-target labels. These labels are attached to all the metrics
scraped from the given target. You can also click on the target labels in order to see the original labels of the target
before applying the [relabeling](#relabeling). The original labels are unavailable if `vmagent` runs with `-promscrape.dropOriginalLabels` command-line flag.
- **Why the given target has the given set of labels?** Click the `target` link at `debug relabeling` column for the particular target in order to see step-by-step
execution of [target relabeling rules](#relabeling) applied to the original labels. This link is unavailable if `vmagent` runs
with `-promscrape.dropOriginalLabels` command-line flag.
- **How the given metrics relabeling rules are applied to scraped metrics?** Click the `metrics` link at `debug relabeling` column
for the particular target in order to see step-by-step execution of [metric relabeling rules](#relabeling) applied to the scraped metrics.
- **How many failed scrapes were for the particular target?** The `errors` column shows this value.
- **How many metrics the given target exposes?** The `samples` column shows the number of metrics scraped per each target during the last scrape.
- **How long does it take to scrape the given target?** The `duration` column shows last scrape duration per each target.
- **When was the last scrape for the given target?** The `last scrape` column shows the last time the given target was scraped.
- **How many times the given target was scraped?** The `scrapes` column shows this infromation.
- **What is the current state of the particular target?** The `state` column shows the current state of the particular target.
- `http://vmagent:8429/service-discovery` page, which contains infromation about all the [discovered targets](https://docs.victoriametrics.com/sd_configs/).
This page doesn't work if `vmagent` runs with `-promscrape.dropOriginalLabels` command-line flag.
This pages helps answering the following questions:
- **Why some targets are dropped during service discovery?** Click `debug` link at `debug relabeling` on the dropped target in order to see step-by-step
execution of [target relabeling rules](#relabeling) applied to the original labels of discovered target.
- **Why some targets contain unexpected labels?** Click `debug` link at `debug relabeling` on the dropped target in order to see step-by-step
execution of [target relabeling rules](#relabeling) applied to the original labels of discovered target.
- **What were the original labels before relabeling for a particular target?** The `discovered labels` column contains the original labels per each discovered target.
See also [relabel debug](#relabel-debug).
## Prometheus staleness markers
`vmagent` sends [Prometheus staleness markers](https://www.robustperception.io/staleness-and-promql) to `-remoteWrite.url` in the following cases:
@ -1144,7 +1183,11 @@ If you have suggestions for improvements or have found a bug - please open an is
regex: true
```
See also [general troubleshooting docs](https://docs.victoriametrics.com/Troubleshooting.html).
See also:
- [debugging scrape targets](#debugging-scrape-targets)
- [relabel debug](#relabel-debug)
- [general troubleshooting docs](https://docs.victoriametrics.com/Troubleshooting.html)
## Calculating disk space for persistence queue

View file

@ -404,6 +404,10 @@ Run `vmbackup -help` in order to see all the available options:
-metricsAuthKey value
Auth key for /metrics endpoint. It must be passed via authKey query arg. It overrides httpAuth.* settings
Flag value can be read from the given file when using -metricsAuthKey=file:///abs/path/to/file or -metricsAuthKey=file://./relative/path/to/file . Flag value can be read from the given http/https url when using -metricsAuthKey=http://host/path or -metricsAuthKey=https://host/path
-mtls
Whether to require valid client certificate for https requests to -httpListenAddr . This flag works only if -tls flag is set. See also -mtlsCAFile . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/enterprise.html
-mtlsCAFile string
Optional path to TLS Root CA for verifying client certificates when -mtls is enabled. By default the host system TLS Root CA is used for client certificate verification. This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/enterprise.html
-origin string
Optional origin directory on the remote storage with old backup for server-side copying when performing full backup. This speeds up full backups
-pprofAuthKey value
@ -434,12 +438,22 @@ Run `vmbackup -help` in order to see all the available options:
VictoriaMetrics create snapshot url. When this is given a snapshot will automatically be created during backup. Example: http://victoriametrics:8428/snapshot/create . There is no need in setting -snapshotName if -snapshot.createURL is set
-snapshot.deleteURL string
VictoriaMetrics delete snapshot url. Optional. Will be generated from -snapshot.createURL if not provided. All created snapshots will be automatically deleted. Example: http://victoriametrics:8428/snapshot/delete
-snapshot.tlsCAFile string
Optional path to TLS CA file to use for verifying connections to -snapshotCreateURL. By default, system CA is used
-snapshot.tlsCertFile string
Optional path to client-side TLS certificate file to use when connecting to -snapshotCreateURL
-snapshot.tlsInsecureSkipVerify
Whether to skip tls verification when connecting to -snapshotCreateURL
-snapshot.tlsKeyFile string
Optional path to client-side TLS certificate key to use when connecting to -snapshotCreateURL
-snapshot.tlsServerName string
Optional TLS server name to use for connections to -snapshotCreateURL. By default, the server name from -snapshotCreateURL is used
-snapshotName string
Name for the snapshot to backup. See https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-work-with-snapshots. There is no need in setting -snapshotName if -snapshot.createURL is set
-storageDataPath string
Path to VictoriaMetrics data. Must match -storageDataPath from VictoriaMetrics or vmstorage (default "victoria-metrics-data")
-tls
Whether to enable TLS for incoming HTTP requests at -httpListenAddr (aka https). -tlsCertFile and -tlsKeyFile must be set if -tls is set
Whether to enable TLS for incoming HTTP requests at -httpListenAddr (aka https). -tlsCertFile and -tlsKeyFile must be set if -tls is set. See also -mtls
-tlsCertFile string
Path to file with TLS certificate if -tls is set. Prefer ECDSA certs instead of RSA certs as RSA certs are slower. The provided certificate file is automatically re-read every second, so it can be dynamically updated
-tlsCipherSuites array

View file

@ -540,6 +540,10 @@ command-line flags:
-metricsAuthKey value
Auth key for /metrics endpoint. It must be passed via authKey query arg. It overrides httpAuth.* settings
Flag value can be read from the given file when using -metricsAuthKey=file:///abs/path/to/file or -metricsAuthKey=file://./relative/path/to/file . Flag value can be read from the given http/https url when using -metricsAuthKey=http://host/path or -metricsAuthKey=https://host/path
-mtls
Whether to require valid client certificate for https requests to -httpListenAddr . This flag works only if -tls flag is set. See also -mtlsCAFile . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/enterprise.html
-mtlsCAFile string
Optional path to TLS Root CA for verifying client certificates when -mtls is enabled. By default the host system TLS Root CA is used for client certificate verification. This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/enterprise.html
-pprofAuthKey value
Auth key for /debug/pprof/* endpoints. It must be passed via authKey query arg. It overrides httpAuth.* settings
Flag value can be read from the given file when using -pprofAuthKey=file:///abs/path/to/file or -pprofAuthKey=file://./relative/path/to/file . Flag value can be read from the given http/https url when using -pprofAuthKey=http://host/path or -pprofAuthKey=https://host/path
@ -570,10 +574,20 @@ command-line flags:
VictoriaMetrics create snapshot url. When this is given a snapshot will automatically be created during backup.Example: http://victoriametrics:8428/snapshot/create
-snapshot.deleteURL string
VictoriaMetrics delete snapshot url. Optional. Will be generated from snapshot.createURL if not provided. All created snaphosts will be automatically deleted.Example: http://victoriametrics:8428/snapshot/delete
-snapshot.tlsCAFile string
Optional path to TLS CA file to use for verifying connections to -snapshotCreateURL. By default, system CA is used
-snapshot.tlsCertFile string
Optional path to client-side TLS certificate file to use when connecting to -snapshotCreateURL
-snapshot.tlsInsecureSkipVerify
Whether to skip tls verification when connecting to -snapshotCreateURL
-snapshot.tlsKeyFile string
Optional path to client-side TLS certificate key to use when connecting to -snapshotCreateURL
-snapshot.tlsServerName string
Optional TLS server name to use for connections to -snapshotCreateURL. By default, the server name from -snapshotCreateURL is used
-storageDataPath string
Path to VictoriaMetrics data. Must match -storageDataPath from VictoriaMetrics or vmstorage (default "victoria-metrics-data")
-tls
Whether to enable TLS for incoming HTTP requests at -httpListenAddr (aka https). -tlsCertFile and -tlsKeyFile must be set if -tls is set
Whether to enable TLS for incoming HTTP requests at -httpListenAddr (aka https). -tlsCertFile and -tlsKeyFile must be set if -tls is set. See also -mtls
-tlsCertFile string
Path to file with TLS certificate if -tls is set. Prefer ECDSA certs instead of RSA certs as RSA certs are slower. The provided certificate file is automatically re-read every second, so it can be dynamically updated
-tlsCipherSuites array

View file

@ -434,6 +434,10 @@ Below is the list of configuration flags (it can be viewed by running `./vmgatew
-metricsAuthKey value
Auth key for /metrics endpoint. It must be passed via authKey query arg. It overrides httpAuth.* settings
Flag value can be read from the given file when using -metricsAuthKey=file:///abs/path/to/file or -metricsAuthKey=file://./relative/path/to/file . Flag value can be read from the given http/https url when using -metricsAuthKey=http://host/path or -metricsAuthKey=https://host/path
-mtls
Whether to require valid client certificate for https requests to -httpListenAddr . This flag works only if -tls flag is set. See also -mtlsCAFile . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/enterprise.html
-mtlsCAFile string
Optional path to TLS Root CA for verifying client certificates when -mtls is enabled. By default the host system TLS Root CA is used for client certificate verification. This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/enterprise.html
-pprofAuthKey value
Auth key for /debug/pprof/* endpoints. It must be passed via authKey query arg. It overrides httpAuth.* settings
Flag value can be read from the given file when using -pprofAuthKey=file:///abs/path/to/file or -pprofAuthKey=file://./relative/path/to/file . Flag value can be read from the given http/https url when using -pprofAuthKey=http://host/path or -pprofAuthKey=https://host/path
@ -468,7 +472,7 @@ Below is the list of configuration flags (it can be viewed by running `./vmgatew
-remoteRead.disablePathAppend
Whether to disable automatic appending of '/api/v1/query' path to the configured -datasource.url and -remoteRead.url
-tls
Whether to enable TLS for incoming HTTP requests at -httpListenAddr (aka https). -tlsCertFile and -tlsKeyFile must be set if -tls is set
Whether to enable TLS for incoming HTTP requests at -httpListenAddr (aka https). -tlsCertFile and -tlsKeyFile must be set if -tls is set. See also -mtls
-tlsCertFile string
Path to file with TLS certificate if -tls is set. Prefer ECDSA certs instead of RSA certs as RSA certs are slower. The provided certificate file is automatically re-read every second, so it can be dynamically updated
-tlsCipherSuites array

View file

@ -189,6 +189,10 @@ i.e. the end result would be similar to [rsync --delete](https://askubuntu.com/q
-metricsAuthKey value
Auth key for /metrics endpoint. It must be passed via authKey query arg. It overrides httpAuth.* settings
Flag value can be read from the given file when using -metricsAuthKey=file:///abs/path/to/file or -metricsAuthKey=file://./relative/path/to/file . Flag value can be read from the given http/https url when using -metricsAuthKey=http://host/path or -metricsAuthKey=https://host/path
-mtls
Whether to require valid client certificate for https requests to -httpListenAddr . This flag works only if -tls flag is set. See also -mtlsCAFile . This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/enterprise.html
-mtlsCAFile string
Optional path to TLS Root CA for verifying client certificates when -mtls is enabled. By default the host system TLS Root CA is used for client certificate verification. This flag is available only in Enterprise binaries. See https://docs.victoriametrics.com/enterprise.html
-pprofAuthKey value
Auth key for /debug/pprof/* endpoints. It must be passed via authKey query arg. It overrides httpAuth.* settings
Flag value can be read from the given file when using -pprofAuthKey=file:///abs/path/to/file or -pprofAuthKey=file://./relative/path/to/file . Flag value can be read from the given http/https url when using -pprofAuthKey=http://host/path or -pprofAuthKey=https://host/path
@ -215,12 +219,22 @@ i.e. the end result would be similar to [rsync --delete](https://askubuntu.com/q
See https://docs.aws.amazon.com/AmazonS3/latest/userguide/storage-class-intro.html
-skipBackupCompleteCheck
Whether to skip checking for 'backup complete' file in -src. This may be useful for restoring from old backups, which were created without 'backup complete' file
-snapshot.tlsCAFile string
Optional path to TLS CA file to use for verifying connections to -snapshotCreateURL. By default, system CA is used
-snapshot.tlsCertFile string
Optional path to client-side TLS certificate file to use when connecting to -snapshotCreateURL
-snapshot.tlsInsecureSkipVerify
Whether to skip tls verification when connecting to -snapshotCreateURL
-snapshot.tlsKeyFile string
Optional path to client-side TLS certificate key to use when connecting to -snapshotCreateURL
-snapshot.tlsServerName string
Optional TLS server name to use for connections to -snapshotCreateURL. By default, the server name from -snapshotCreateURL is used
-src string
Source path with backup on the remote storage. Example: gs://bucket/path/to/backup, s3://bucket/path/to/backup, azblob://container/path/to/backup or fs:///path/to/local/backup
-storageDataPath string
Destination path where backup must be restored. VictoriaMetrics must be stopped when restoring from backup. -storageDataPath dir can be non-empty. In this case the contents of -storageDataPath dir is synchronized with -src contents, i.e. it works like 'rsync --delete' (default "victoria-metrics-data")
-tls
Whether to enable TLS for incoming HTTP requests at -httpListenAddr (aka https). -tlsCertFile and -tlsKeyFile must be set if -tls is set
Whether to enable TLS for incoming HTTP requests at -httpListenAddr (aka https). -tlsCertFile and -tlsKeyFile must be set if -tls is set. See also -mtls
-tlsCertFile string
Path to file with TLS certificate if -tls is set. Prefer ECDSA certs instead of RSA certs as RSA certs are slower. The provided certificate file is automatically re-read every second, so it can be dynamically updated
-tlsCipherSuites array

View file

@ -21,6 +21,7 @@ func NewArrayString(name, description string) *ArrayString {
func NewArrayDuration(name string, defaultValue time.Duration, description string) *ArrayDuration {
description += fmt.Sprintf(" (default %s)", defaultValue)
description += "\nSupports `array` of values separated by comma or specified via multiple flags."
description += "\nEmpty values are set to default value."
a := &ArrayDuration{
defaultValue: defaultValue,
}
@ -31,6 +32,7 @@ func NewArrayDuration(name string, defaultValue time.Duration, description strin
// NewArrayBool returns new ArrayBool with the given name and description.
func NewArrayBool(name, description string) *ArrayBool {
description += "\nSupports `array` of values separated by comma or specified via multiple flags."
description += "\nEmpty values are set to false."
var a ArrayBool
flag.Var(&a, name, description)
return &a
@ -40,6 +42,7 @@ func NewArrayBool(name, description string) *ArrayBool {
func NewArrayInt(name string, defaultValue int, description string) *ArrayInt {
description += fmt.Sprintf(" (default %d)", defaultValue)
description += "\nSupports `array` of values separated by comma or specified via multiple flags."
description += "\nEmpty values are set to default value."
a := &ArrayInt{
defaultValue: defaultValue,
}
@ -52,6 +55,7 @@ func NewArrayBytes(name string, defaultValue int64, description string) *ArrayBy
description += "\nSupports the following optional suffixes for size values: KB, MB, GB, TB, KiB, MiB, GiB, TiB."
description += fmt.Sprintf(" (default %d)", defaultValue)
description += "\nSupports `array` of values separated by comma or specified via multiple flags."
description += "\nEmpty values are set to default value."
a := &ArrayBytes{
defaultValue: defaultValue,
}
@ -241,6 +245,9 @@ func (a *ArrayBool) String() string {
func (a *ArrayBool) Set(value string) error {
values := parseArrayValues(value)
for _, v := range values {
if v == "" {
v = "false"
}
b, err := strconv.ParseBool(v)
if err != nil {
return err
@ -284,6 +291,9 @@ func (a *ArrayDuration) String() string {
func (a *ArrayDuration) Set(value string) error {
values := parseArrayValues(value)
for _, v := range values {
if v == "" {
v = a.defaultValue.String()
}
b, err := time.ParseDuration(v)
if err != nil {
return err
@ -332,6 +342,9 @@ func (a *ArrayInt) String() string {
func (a *ArrayInt) Set(value string) error {
values := parseArrayValues(value)
for _, v := range values {
if v == "" {
v = fmt.Sprintf("%d", a.defaultValue)
}
n, err := strconv.Atoi(v)
if err != nil {
return err
@ -376,6 +389,9 @@ func (a *ArrayBytes) Set(value string) error {
values := parseArrayValues(value)
for _, v := range values {
var b Bytes
if v == "" {
v = fmt.Sprintf("%d", a.defaultValue)
}
if err := b.Set(v); err != nil {
return err
}

View file

@ -165,6 +165,7 @@ func TestArrayDuration_Set(t *testing.T) {
f := func(s, expectedResult string) {
t.Helper()
var a ArrayDuration
a.defaultValue = 42 * time.Second
if err := a.Set(s); err != nil {
t.Fatalf("unexpected error: %s", err)
}
@ -176,6 +177,7 @@ func TestArrayDuration_Set(t *testing.T) {
f("", "")
f(`1m`, `1m0s`)
f(`5m,1s,1h`, `5m0s,1s,1h0m0s`)
f(`5m,,1h`, `5m0s,42s,1h0m0s`)
}
func TestArrayDuration_GetOptionalArg(t *testing.T) {
@ -238,6 +240,7 @@ func TestArrayBool_Set(t *testing.T) {
f("", "")
f(`true`, `true`)
f(`false,True,False`, `false,true,false`)
f(`1,,False`, `true,false,false`)
}
func TestArrayBool_GetOptionalArg(t *testing.T) {
@ -289,6 +292,7 @@ func TestArrayInt_Set(t *testing.T) {
f := func(s, expectedResult string, expectedValues []int) {
t.Helper()
var a ArrayInt
a.defaultValue = 42
if err := a.Set(s); err != nil {
t.Fatalf("unexpected error: %s", err)
}
@ -304,6 +308,7 @@ func TestArrayInt_Set(t *testing.T) {
f("", "", nil)
f(`1`, `1`, []int{1})
f(`-2,3,-64`, `-2,3,-64`, []int{-2, 3, -64})
f(`,,-64,`, `42,42,-64,42`, []int{42, 42, -64, 42})
}
func TestArrayInt_GetOptionalArg(t *testing.T) {
@ -357,6 +362,7 @@ func TestArrayBytes_Set(t *testing.T) {
f := func(s, expectedResult string) {
t.Helper()
var a ArrayBytes
a.defaultValue = 42
if err := a.Set(s); err != nil {
t.Fatalf("unexpected error: %s", err)
}
@ -368,6 +374,7 @@ func TestArrayBytes_Set(t *testing.T) {
f("", "")
f(`1`, `1`)
f(`-2,3,10kb`, `-2,3,10KB`)
f(`,,10kb`, `42,42,10KB`)
}
func TestArrayBytes_GetOptionalArg(t *testing.T) {

View file

@ -47,6 +47,11 @@ func (b *Bytes) String() string {
// Set implements flag.Value interface
func (b *Bytes) Set(value string) error {
if value == "" {
b.N = 0
b.valueString = ""
return nil
}
value = normalizeBytesString(value)
switch {
case strings.HasSuffix(value, "KB"):

View file

@ -12,7 +12,6 @@ func TestBytesSetFailure(t *testing.T) {
t.Fatalf("expecting non-nil error in b.Set(%q)", value)
}
}
f("")
f("foobar")
f("5foobar")
f("aKB")
@ -39,6 +38,7 @@ func TestBytesSetSuccess(t *testing.T) {
t.Fatalf("unexpected valueString; got %q; want %q", valueString, valueExpected)
}
}
f("", 0)
f("0", 0)
f("1", 1)
f("-1234", -1234)

View file

@ -49,6 +49,11 @@ func (d *Duration) String() string {
// Set implements flag.Value interface
func (d *Duration) Set(value string) error {
if value == "" {
d.msecs = 0
d.valueString = ""
return nil
}
// An attempt to parse value in months.
months, err := strconv.ParseFloat(value, 64)
if err == nil {

View file

@ -14,7 +14,6 @@ func TestDurationSetFailure(t *testing.T) {
t.Fatalf("expecting non-nil error in d.Set(%q)", value)
}
}
f("")
f("foobar")
f("5foobar")
f("ah")
@ -51,6 +50,7 @@ func TestDurationSetSuccess(t *testing.T) {
t.Fatalf("unexpected valueString; got %q; want %q", valueString, valueExpected)
}
}
f("", 0)
f("0", 0)
f("1", msecsPer31Days)
f("123.456", 123.456*msecsPer31Days)

View file

@ -1,4 +1,4 @@
package utils
package httputils
import (
"crypto/tls"

View file

@ -1,4 +1,4 @@
package utils
package httputils
import "testing"

View file

@ -148,12 +148,15 @@ func (bsw *blockStreamWriter) WriteBlock(ib *inmemoryBlock) {
bsw.lensBlockOffset += uint64(bsw.bh.lensBlockSize)
// Write blockHeader
unpackedIndexBlockBufLen := len(bsw.unpackedIndexBlockBuf)
bsw.unpackedIndexBlockBuf = bsw.bh.Marshal(bsw.unpackedIndexBlockBuf)
if len(bsw.unpackedIndexBlockBuf) > maxIndexBlockSize {
bsw.unpackedIndexBlockBuf = bsw.unpackedIndexBlockBuf[:unpackedIndexBlockBufLen]
bsw.flushIndexData()
bsw.unpackedIndexBlockBuf = bsw.bh.Marshal(bsw.unpackedIndexBlockBuf)
}
bsw.bh.Reset()
bsw.mr.blockHeadersCount++
if len(bsw.unpackedIndexBlockBuf) >= maxIndexBlockSize {
bsw.flushIndexData()
}
}
// The maximum size of index block with multiple blockHeaders.

View file

@ -104,17 +104,18 @@ func (ib *inmemoryBlock) Reset() {
}
func (ib *inmemoryBlock) updateCommonPrefixSorted() {
ib.commonPrefix = ib.commonPrefix[:0]
items := ib.items
if len(items) == 0 {
if len(items) <= 1 {
// There is no sense in duplicating a single item or zero items into commonPrefix,
// since this only can increase blockHeader size without any benefits.
ib.commonPrefix = ib.commonPrefix[:0]
return
}
data := ib.data
cp := items[0].Bytes(data)
if len(items) > 1 {
cpLen := commonPrefixLen(cp, items[len(items)-1].Bytes(data))
cp = cp[:cpLen]
}
cpLen := commonPrefixLen(cp, items[len(items)-1].Bytes(data))
cp = cp[:cpLen]
ib.commonPrefix = append(ib.commonPrefix[:0], cp...)
}

View file

@ -6,6 +6,7 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
)
type inmemoryPart struct {
@ -84,6 +85,10 @@ func (mp *inmemoryPart) Init(ib *inmemoryBlock) {
bb := inmemoryPartBytePool.Get()
bb.B = mp.bh.Marshal(bb.B[:0])
if len(bb.B) > 3*maxIndexBlockSize {
// marshaled blockHeader can exceed indexBlockSize when firstItem and commonPrefix sizes are close to indexBlockSize
logger.Panicf("BUG: too big index block: %d bytes; mustn't exceed %d bytes", len(bb.B), 3*maxIndexBlockSize)
}
mp.indexData.B = encoding.CompressZSTDLevel(mp.indexData.B[:0], bb.B, compressLevel)
mp.mr.firstItem = append(mp.mr.firstItem[:0], mp.bh.firstItem...)

View file

@ -72,8 +72,11 @@ func (mr *metaindexRow) Unmarshal(src []byte) ([]byte, error) {
if mr.blockHeadersCount <= 0 {
return src, fmt.Errorf("blockHeadersCount must be bigger than 0; got %d", mr.blockHeadersCount)
}
if mr.indexBlockSize > 2*maxIndexBlockSize {
return src, fmt.Errorf("too big indexBlockSize: %d; cannot exceed %d", mr.indexBlockSize, 2*maxIndexBlockSize)
if mr.indexBlockSize > 4*maxIndexBlockSize {
// The index block size can exceed maxIndexBlockSize by up to 4x,
// since it can contain commonPrefix and firstItem at blockHeader
// with the maximum length of maxIndexBlockSize per each field.
return src, fmt.Errorf("too big indexBlockSize: %d; cannot exceed %d", mr.indexBlockSize, 4*maxIndexBlockSize)
}
return src, nil

View file

@ -15,7 +15,7 @@ import (
var idxbCache = blockcache.NewCache(getMaxIndexBlocksCacheSize)
var ibCache = blockcache.NewCache(getMaxInmemoryBlocksCacheSize)
// SetIndexBlocksCacheSize overrides the default size of indexdb/indexBlock cache
// SetIndexBlocksCacheSize overrides the default size of indexdb/indexBlocks cache
func SetIndexBlocksCacheSize(size int) {
maxIndexBlockCacheSize = size
}

View file

@ -244,6 +244,7 @@ func (ris *rawItemsShard) addItems(tb *Table, items [][]byte) [][]byte {
ibs = append(ibs, ib)
continue
}
ris.mu.Unlock()
logger.Panicf("BUG: cannot insert too big item into an empty inmemoryBlock; len(item)=%d; the caller should be responsible for avoiding too big items", len(item))
}
ris.ibs = ibs

View file

@ -33,6 +33,27 @@ func TestTableOpenClose(t *testing.T) {
}
}
func TestTableAddItemsTooLongItem(t *testing.T) {
const path = "TestTableAddItemsTooLongItem"
if err := os.RemoveAll(path); err != nil {
t.Fatalf("cannot remove %q: %s", path, err)
}
var isReadOnly uint32
tb := MustOpenTable(path, nil, nil, &isReadOnly)
func() {
defer func() {
if r := recover(); r == nil {
t.Fatalf("expecting panic")
}
}()
tb.AddItems([][]byte{make([]byte, maxInmemoryBlockSize+1)})
}()
t.Logf("foobar")
tb.MustClose()
_ = os.RemoveAll(path)
}
func TestTableAddItemsSerial(t *testing.T) {
r := rand.New(rand.NewSource(1))
const path = "TestTableAddItemsSerial"

View file

@ -9,11 +9,11 @@ import (
)
var (
// TODO: @AndrewChubatiuk, please provide a permalink for the original source code where these constants were extracted
epsillon = 1.0 / 128
gamma = 1 + 2*epsillon
gammaLn = math.Log(gamma)
defaultMin = 0.981e-9
// These constants were obtained from https://github.com/DataDog/opentelemetry-mapping-go/blob/48d52eeea60d28da2e14c154a24557c4d290c6e2/pkg/quantile/config.go
eps = 1.0 / 128
gamma = 1 + 2*eps
gammaLn = math.Log1p(2 * eps)
defaultMin = 1e-9
bias = 1 - int(math.Floor(math.Log(defaultMin)/gammaLn))
quantiles = []float64{0.5, 0.75, 0.9, 0.95, 0.99}
)
@ -174,7 +174,7 @@ func (s *Sketch) ToSummary() []*Metric {
timestamp := d.Ts * 1000
points[j] = Point{
Timestamp: timestamp,
Value: d.valueForQuantile(q),
Value: d.quantile(q),
}
sumPoints[j] = Point{
Timestamp: timestamp,
@ -285,7 +285,8 @@ func (d *Dogsketch) unmarshalProtobuf(src []byte) (err error) {
return nil
}
func (d *Dogsketch) valueForQuantile(q float64) float64 {
// This function has been copied from https://github.com/DataDog/opentelemetry-mapping-go/blob/48d52eeea60d28da2e14c154a24557c4d290c6e2/pkg/quantile/sparse.go#L92
func (d *Dogsketch) quantile(q float64) float64 {
switch {
case d.Cnt == 0:
return 0
@ -302,7 +303,7 @@ func (d *Dogsketch) valueForQuantile(q float64) float64 {
return 0
}
rank := q * float64(d.Cnt-1)
rank := math.RoundToEven(q * float64(d.Cnt-1))
cnt := float64(0)
for i, n := range ns {
cnt += float64(n)
@ -312,13 +313,7 @@ func (d *Dogsketch) valueForQuantile(q float64) float64 {
weight := (cnt - rank) / float64(n)
vLow := f64(ks[i])
vHigh := vLow * gamma
switch i {
// TODO: I'm unsure this code is correct. i cannot equal len(ns) in this loop.
// @AndrewChubatiuk, please add a permalink to the original source code, which was used
// for writing this code, in the comments to this function.
case len(ns):
vHigh = d.Max
case 0:
if i == 0 {
vLow = d.Min
}
return vLow*weight + vHigh*(1-weight)
@ -326,17 +321,22 @@ func (d *Dogsketch) valueForQuantile(q float64) float64 {
return d.Max
}
// This function has been copied from https://github.com/DataDog/opentelemetry-mapping-go/blob/48d52eeea60d28da2e14c154a24557c4d290c6e2/pkg/quantile/config.go#L54
func f64(k int32) float64 {
switch {
case k < 0:
return -f64(-k)
// TODO: I'm unsure this logic is correct, since k can be smaller than math.MinInt16 and bigger than math.MaxInt16
// @AndrewChubatiuk, please add a permalink to the original source code, which was used for writing this code.
case k == math.MaxInt16 || k == math.MinInt16:
return math.Inf(int(k))
case k == 0:
// See https://github.com/DataDog/opentelemetry-mapping-go/blob/48d52eeea60d28da2e14c154a24557c4d290c6e2/pkg/quantile/key.go#L14
if k <= -((1 << 15) - 1) {
return math.Inf(-1)
}
if k >= ((1 << 15) - 1) {
return math.Inf(1)
}
if k == 0 {
return 0
}
if k < 0 {
return -f64(-k)
}
exp := float64(int(k) - bias)
return math.Pow(gamma, exp)
}

View file

@ -5,25 +5,27 @@ import (
"testing"
)
func TestPointsForQuantile(t *testing.T) {
func TestDogsketchQuantile(t *testing.T) {
f := func(d *Dogsketch, q float64, vExpected float64) {
t.Helper()
v := d.valueForQuantile(q)
if math.Abs(v-vExpected) > 0.4 {
v := d.quantile(q)
if math.Abs(v-vExpected) > 0.01 {
t.Fatalf("unexpected value; got %v; want %v", v, vExpected)
}
}
sketches := &Dogsketch{
Min: 8.0,
Max: 20.0,
Max: 21.0,
Cnt: 17,
N: []uint32{0x0, 0x0, 0x1, 0x0, 0x1, 0x4, 0x6, 0x1, 0x2, 0x0, 0x1, 0x0, 0x1},
K: []int32{0, 1472, 1473, 1479, 1480, 1503, 1504, 1512, 1513, 1514, 1515, 1531, 1532},
}
f(sketches, 0.1, 8.96)
f(sketches, 0.5, 13.01)
f(sketches, 0.75, 14.96)
f(sketches, 0.9, 14.96)
f(sketches, 0.95, 15.43)
f(sketches, 0.99, 15.43)
f(sketches, 0, 8)
f(sketches, 0.1, 12.91)
f(sketches, 0.5, 13.18)
f(sketches, 0.75, 14.84)
f(sketches, 0.9, 15.19)
f(sketches, 0.95, 15.55)
f(sketches, 0.99, 20.24)
f(sketches, 1, 21)
}

View file

@ -3,6 +3,7 @@ package snapshot
import (
"encoding/json"
"errors"
"flag"
"fmt"
"io"
"net/http"
@ -12,11 +13,20 @@ import (
"sync/atomic"
"time"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httputils"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
)
var snapshotNameRegexp = regexp.MustCompile(`^[0-9]{14}-[0-9A-Fa-f]+$`)
var (
tlsInsecureSkipVerify = flag.Bool("snapshot.tlsInsecureSkipVerify", false, "Whether to skip tls verification when connecting to -snapshotCreateURL")
tlsCertFile = flag.String("snapshot.tlsCertFile", "", "Optional path to client-side TLS certificate file to use when connecting to -snapshotCreateURL")
tlsKeyFile = flag.String("snapshot.tlsKeyFile", "", "Optional path to client-side TLS certificate key to use when connecting to -snapshotCreateURL")
tlsCAFile = flag.String("snapshot.tlsCAFile", "", `Optional path to TLS CA file to use for verifying connections to -snapshotCreateURL. By default, system CA is used`)
tlsServerName = flag.String("snapshot.tlsServerName", "", `Optional TLS server name to use for connections to -snapshotCreateURL. By default, the server name from -snapshotCreateURL is used`)
)
type snapshot struct {
Status string `json:"status"`
Snapshot string `json:"snapshot"`
@ -30,7 +40,15 @@ func Create(createSnapshotURL string) (string, error) {
if err != nil {
return "", err
}
resp, err := http.Get(u.String())
// create Transport
tr, err := httputils.Transport(createSnapshotURL, *tlsCertFile, *tlsKeyFile, *tlsCAFile, *tlsServerName, *tlsInsecureSkipVerify)
if err != nil {
return "", err
}
hc := &http.Client{Transport: tr}
resp, err := hc.Get(u.String())
if err != nil {
return "", err
}
@ -68,7 +86,13 @@ func Delete(deleteSnapshotURL string, snapshotName string) error {
if err != nil {
return err
}
resp, err := http.PostForm(u.String(), formData)
// create Transport
tr, err := httputils.Transport(deleteSnapshotURL, *tlsCertFile, *tlsKeyFile, *tlsCAFile, *tlsServerName, *tlsInsecureSkipVerify)
if err != nil {
return err
}
hc := &http.Client{Transport: tr}
resp, err := hc.PostForm(u.String(), formData)
if err != nil {
return err
}

View file

@ -143,11 +143,14 @@ func (bsw *blockStreamWriter) WriteExternalBlock(b *Block, ph *partHeader, rowsM
atomic.AddUint64(&timestampsBlocksMerged, 1)
atomic.AddUint64(&timestampsBytesSaved, uint64(len(timestampsData)))
}
indexDataLen := len(bsw.indexData)
bsw.indexData = append(bsw.indexData, headerData...)
bsw.mr.RegisterBlockHeader(&b.bh)
if len(bsw.indexData) >= maxBlockSize {
if len(bsw.indexData) > maxBlockSize {
bsw.indexData = bsw.indexData[:indexDataLen]
bsw.flushIndexData()
bsw.indexData = append(bsw.indexData, headerData...)
}
bsw.mr.RegisterBlockHeader(&b.bh)
if !usePrevTimestamps {
bsw.prevTimestampsData = append(bsw.prevTimestampsData[:0], timestampsData...)
bsw.prevTimestampsBlockOffset = bsw.timestampsBlockOffset