Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files

This commit is contained in:
Aliaksandr Valialkin 2021-10-22 19:41:51 +03:00
commit d07d2811d4
No known key found for this signature in database
GPG key ID: A72BEC6CD3D0DED1
150 changed files with 5986 additions and 1791 deletions

View file

@ -261,7 +261,7 @@ golangci-lint: install-golangci-lint
golangci-lint run --exclude '(SA4003|SA1019|SA5011):' -D errcheck -D structcheck --timeout 2m
install-golangci-lint:
which golangci-lint || curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(shell go env GOPATH)/bin v1.40.1
which golangci-lint || curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(shell go env GOPATH)/bin v1.42.1
install-wwhrd:
which wwhrd || GO111MODULE=off go get github.com/frapposelli/wwhrd

View file

@ -28,6 +28,7 @@ Enterprise binaries can be downloaded and evaluated for free from [the releases
Case studies:
* [AbiosGaming](https://docs.victoriametrics.com/CaseStudies.html#abiosgaming)
* [adidas](https://docs.victoriametrics.com/CaseStudies.html#adidas)
* [Adsterra](https://docs.victoriametrics.com/CaseStudies.html#adsterra)
* [ARNES](https://docs.victoriametrics.com/CaseStudies.html#arnes)
@ -35,12 +36,16 @@ Case studies:
* [CERN](https://docs.victoriametrics.com/CaseStudies.html#cern)
* [COLOPL](https://docs.victoriametrics.com/CaseStudies.html#colopl)
* [Dreamteam](https://docs.victoriametrics.com/CaseStudies.html#dreamteam)
* [Fly.io](https://docs.victoriametrics.com/CaseStudies.html#flyio)
* [German Research Center for Artificial Intelligence](https://docs.victoriametrics.com/CaseStudies.html#german-research-center-for-artificial-intelligence)
* [Grammarly](https://docs.victoriametrics.com/CaseStudies.html#grammarly)
* [Groove X](https://docs.victoriametrics.com/CaseStudies.html#groove-x)
* [Idealo.de](https://docs.victoriametrics.com/CaseStudies.html#idealode)
* [MHI Vestas Offshore Wind](https://docs.victoriametrics.com/CaseStudies.html#mhi-vestas-offshore-wind)
* [Razorpay](https://docs.victoriametrics.com/CaseStudies.html#razorpay)
* [Percona](https://docs.victoriametrics.com/CaseStudies.html#percona)
* [Sensedia](https://docs.victoriametrics.com/CaseStudies.html#sensedia)
* [Smarkets](https://docs.victoriametrics.com/CaseStudies.html#smarkets)
* [Synthesio](https://docs.victoriametrics.com/CaseStudies.html#synthesio)
* [Wedos.com](https://docs.victoriametrics.com/CaseStudies.html#wedoscom)
* [Wix.com](https://docs.victoriametrics.com/CaseStudies.html#wixcom)
@ -604,6 +609,12 @@ The UI allows exploring query results via graphs and tables. Graphs support scro
* Drag the graph to the left / right in order to move the displayed time range into the past / future.
* Hold `Ctrl` (or `Cmd` on MacOS) and scroll up / down in order to zoom in / out the graph.
Query history can be navigated by holding `Ctrl` (or `Cmd` on MacOS) and pressing `up` or `down` arrows on the keyboard while the cursor is located in the query input field.
When querying the [backfilled data](https://docs.victoriametrics.com/#backfilling), it may be useful disabling response cache by clicking `Enable cache` checkbox.
See the [example VMUI at VictoriaMetrics playground](https://play.victoriametrics.com/select/accounting/1/6a716b0f-38bc-4856-90ce-448fd713e3fe/prometheus/graph/?g0.expr=100%20*%20sum(rate(process_cpu_seconds_total))%20by%20(job)&g0.range_input=1d).
## How to build from sources
@ -1540,6 +1551,9 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
The maximum number of CPU cores to use for big merges. Default value is used if set to 0
-csvTrimTimestamp duration
Trim timestamps when importing csv data to this duration. Minimum practical duration is 1ms. Higher duration (i.e. 1s) may be used for reducing disk space usage for timestamp data (default 1ms)
-datadog.maxInsertRequestSize size
The maximum size in bytes of a single DataDog POST request to /api/v1/series
Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 67108864)
-dedup.minScrapeInterval duration
Leave only the first sample in every time series per each discrete interval equal to -dedup.minScrapeInterval > 0. See https://docs.victoriametrics.com/#deduplication for details
-deleteAuthKey string
@ -1705,8 +1719,11 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
-promscrape.maxScrapeSize size
The maximum size of scrape response in bytes to process from Prometheus targets. Bigger responses are rejected
Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 16777216)
-promscrape.minResponseSizeForStreamParse size
The minimum target response size for automatic switching to stream parsing mode, which can reduce memory usage. See https://docs.victoriametrics.com/vmagent.html#stream-parsing-mode
Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 1000000)
-promscrape.noStaleMarkers
Whether to disable sending Prometheus stale markers for metrics when scrape target disappears. This option may reduce memory usage if stale markers aren't needed for your setup. See also https://docs.victoriametrics.com/vmagent.html#stream-parsing-mode
Whether to disable sending Prometheus stale markers for metrics when scrape target disappears. This option may reduce memory usage if stale markers aren't needed for your setup. This option also disables populating the scrape_series_added metric. See https://prometheus.io/docs/concepts/jobs_instances/#automatically-generated-labels-and-time-series
-promscrape.openstackSDCheckInterval duration
Interval for checking for changes in openstack API server. This works only if openstack_sd_configs is configured in '-promscrape.config' file. See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#openstack_sd_config for details (default 30s)
-promscrape.seriesLimitPerTarget int
@ -1718,7 +1735,7 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
-promscrape.suppressScrapeErrors
Whether to suppress scrape errors logging. The last error for each target is always available at '/targets' page even if scrape errors logging is suppressed
-relabelConfig string
Optional path to a file with relabeling rules, which are applied to all the ingested metrics. See https://docs.victoriametrics.com/#relabeling for details
Optional path to a file with relabeling rules, which are applied to all the ingested metrics. See https://docs.victoriametrics.com/#relabeling for details. The config is reloaded on SIGHUP signal
-relabelDebug
Whether to log metrics before and after relabeling with -relabelConfig. If the -relabelDebug is enabled, then the metrics aren't sent to storage. This is useful for debugging the relabeling configs
-retentionPeriod value
@ -1795,6 +1812,9 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
The maximum number of unique series can be added to the storage during the last 24 hours. Excess series are logged and dropped. This can be useful for limiting series churn rate. See also -storage.maxHourlySeries
-storage.maxHourlySeries int
The maximum number of unique series can be added to the storage during the last hour. Excess series are logged and dropped. This can be useful for limiting series cardinality. See also -storage.maxDailySeries
-storage.minFreeDiskSpaceBytes size
The minimum free disk space at -storageDataPath after which the storage stops accepting new data
Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 10000000)
-storageDataPath string
Path to storage data (default "victoria-metrics-data")
-tls

View file

@ -99,7 +99,9 @@ func requestHandler(w http.ResponseWriter, r *http.Request) bool {
{"/vmui", "Web UI"},
{"/targets", "discovered targets list"},
{"/api/v1/targets", "advanced information about discovered targets in JSON format"},
{"/config", "-promscrape.config contents"},
{"/metrics", "available service metrics"},
{"/flags", "command-line flags"},
{"/api/v1/status/tsdb", "tsdb status page"},
{"/api/v1/status/top_queries", "top queries"},
{"/api/v1/status/active_queries", "active queries"},

View file

@ -302,12 +302,14 @@ You can read more about relabeling in the following articles:
* If the scrape target is removed from the list of targets, then stale markers are sent for all the metrics scraped from this target.
* Stale markers are sent for all the scraped metrics on graceful shutdown of `vmagent`.
Prometheus staleness markers aren't sent to `-remoteWrite.url` in [stream parsing mode](#stream-parsing-mode) or if `-promscrape.noStaleMarkers` command-line is set.
Prometheus staleness markers' tracking needs additional memory, since it must store the previous response body per each scrape target in order to compare it to the current response body. The memory usage may be reduced by passing `-promscrape.noStaleMarkers` command-line flag to `vmagent`. This disables staleness tracking. This also disables tracking the number of new time series per each scrape with the auto-generated `scrape_series_added` metric. See [these docs](https://prometheus.io/docs/concepts/jobs_instances/#automatically-generated-labels-and-time-series) for details.
## Stream parsing mode
By default `vmagent` reads the full response from scrape target into memory, then parses it, applies [relabeling](#relabeling) and then pushes the resulting metrics to the configured `-remoteWrite.url`. This mode works good for the majority of cases when the scrape target exposes small number of metrics (e.g. less than 10 thousand). But this mode may take big amounts of memory when the scrape target exposes big number of metrics. In this case it is recommended enabling stream parsing mode. When this mode is enabled, then `vmagent` reads response from scrape target in chunks, then immediately processes every chunk and pushes the processed metrics to remote storage. This allows saving memory when scraping targets that expose millions of metrics. Stream parsing mode may be enabled in the following places:
By default `vmagent` reads the full response body from scrape target into memory, then parses it, applies [relabeling](#relabeling) and then pushes the resulting metrics to the configured `-remoteWrite.url`. This mode works good for the majority of cases when the scrape target exposes small number of metrics (e.g. less than 10 thousand). But this mode may take big amounts of memory when the scrape target exposes big number of metrics. In this case it is recommended enabling stream parsing mode. When this mode is enabled, then `vmagent` reads response from scrape target in chunks, then immediately processes every chunk and pushes the processed metrics to remote storage. This allows saving memory when scraping targets that expose millions of metrics.
Stream parsing mode is automatically enabled for scrape targets returning response bodies with sizes bigger than the `-promscrape.minResponseSizeForStreamParse` command-line flag value. Additionally, the stream parsing mode can be explicitly enabled in the following places:
- Via `-promscrape.streamParse` command-line flag. In this case all the scrape targets defined in the file pointed by `-promscrape.config` are scraped in stream parsing mode.
- Via `stream_parse: true` option at `scrape_configs` section. In this case all the scrape targets defined in this section are scraped in stream parsing mode.
@ -329,7 +331,7 @@ scrape_configs:
'match[]': ['{__name__!=""}']
```
Note that `sample_limit` option doesn't prevent from data push to remote storage if stream parsing is enabled because the parsed data is pushed to remote storage as soon as it is parsed.
Note that `sample_limit` and `series_limit` options cannot be used in stream parsing mode because the parsed data is pushed to remote storage as soon as it is parsed.
## Scraping big number of targets
@ -449,7 +451,8 @@ It may be useful to perform `vmagent` rolling update without any scrape loss.
as `vmagent` establishes at least a single TCP connection per target.
* If `vmagent` uses too big amounts of memory, then the following options can help:
* Enabling stream parsing. See [these docs](#stream-parsing-mode).
* Disabling staleness tracking with `-promscrape.noStaleMarkers` option. See [these docs](#prometheus-staleness-markers).
* Enabling stream parsing mode. See [these docs](#stream-parsing-mode).
* Reducing the number of output queues with `-remoteWrite.queues` command-line option.
* Reducing the amounts of RAM vmagent can use for in-memory buffering with `-memory.allowedPercent` or `-memory.allowedBytes` command-line option. Another option is to reduce memory limits in Docker and/or Kuberntes if `vmagent` runs under these systems.
* Reducing the number of CPU cores vmagent can use by passing `GOMAXPROCS=N` environment variable to `vmagent`, where `N` is the desired limit on CPU cores. Another option is to reduce CPU limits in Docker or Kubernetes if `vmagent` runs under these systems.
@ -706,6 +709,9 @@ See the docs at https://docs.victoriametrics.com/vmagent.html .
-csvTrimTimestamp duration
Trim timestamps when importing csv data to this duration. Minimum practical duration is 1ms. Higher duration (i.e. 1s) may be used for reducing disk space usage for timestamp data (default 1ms)
-datadog.maxInsertRequestSize size
The maximum size in bytes of a single DataDog POST request to /api/v1/series
Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 67108864)
-dryRun
Whether to check only config files without running vmagent. The following files are checked: -promscrape.config, -remoteWrite.relabelConfig, -remoteWrite.urlRelabelConfig . Unknown config entries are allowed in -promscrape.config by default. This can be changed with -promscrape.config.strictParse
-enableTCP6
@ -853,8 +859,11 @@ See the docs at https://docs.victoriametrics.com/vmagent.html .
-promscrape.maxScrapeSize size
The maximum size of scrape response in bytes to process from Prometheus targets. Bigger responses are rejected
Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 16777216)
-promscrape.minResponseSizeForStreamParse size
The minimum target response size for automatic switching to stream parsing mode, which can reduce memory usage. See https://docs.victoriametrics.com/vmagent.html#stream-parsing-mode
Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 1000000)
-promscrape.noStaleMarkers
Whether to disable sending Prometheus stale markers for metrics when scrape target disappears. This option may reduce memory usage if stale markers aren't needed for your setup. See also https://docs.victoriametrics.com/vmagent.html#stream-parsing-mode
Whether to disable sending Prometheus stale markers for metrics when scrape target disappears. This option may reduce memory usage if stale markers aren't needed for your setup. This option also disables populating the scrape_series_added metric. See https://prometheus.io/docs/concepts/jobs_instances/#automatically-generated-labels-and-time-series
-promscrape.openstackSDCheckInterval duration
Interval for checking for changes in openstack API server. This works only if openstack_sd_configs is configured in '-promscrape.config' file. See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#openstack_sd_config for details (default 30s)
-promscrape.seriesLimitPerTarget int

View file

@ -159,7 +159,9 @@ func requestHandler(w http.ResponseWriter, r *http.Request) bool {
httpserver.WriteAPIHelp(w, [][2]string{
{"/targets", "discovered targets list"},
{"/api/v1/targets", "advanced information about discovered targets in JSON format"},
{"/config", "-promscrape.config contents"},
{"/metrics", "available service metrics"},
{"/flags", "command-line flags"},
{"/-/reload", "reload configuration"},
})
return true
@ -259,6 +261,11 @@ func requestHandler(w http.ResponseWriter, r *http.Request) bool {
promscrapeTargetsRequests.Inc()
promscrape.WriteHumanReadableTargetsStatus(w, r)
return true
case "/config":
promscrapeConfigRequests.Inc()
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
promscrape.WriteConfigData(w)
return true
case "/api/v1/targets":
promscrapeAPIV1TargetsRequests.Inc()
w.Header().Set("Content-Type", "application/json; charset=utf-8")
@ -427,6 +434,8 @@ var (
promscrapeTargetsRequests = metrics.NewCounter(`vmagent_http_requests_total{path="/targets"}`)
promscrapeAPIV1TargetsRequests = metrics.NewCounter(`vmagent_http_requests_total{path="/api/v1/targets"}`)
promscrapeConfigRequests = metrics.NewCounter(`vmagent_http_requests_total{path="/config"}`)
promscrapeConfigReloadRequests = metrics.NewCounter(`vmagent_http_requests_total{path="/-/reload"}`)
)

View file

@ -351,12 +351,12 @@ See full description for these flags in `./vmalert --help`.
## Monitoring
`vmalert` exports various metrics in Prometheus exposition format at `http://vmalert-host:8880/metrics` page.
We recommend setting up regular scraping of this page either through `vmagent` or by Prometheus so that the exported
`vmalert` exports various metrics in Prometheus exposition format at `http://vmalert-host:8880/metrics` page.
We recommend setting up regular scraping of this page either through `vmagent` or by Prometheus so that the exported
metrics may be analyzed later.
Use official [Grafana dashboard](https://grafana.com/grafana/dashboards/14950) for `vmalert` overview.
If you have suggestions for improvements or have found a bug - please open an issue on github or add
If you have suggestions for improvements or have found a bug - please open an issue on github or add
a review to the dashboard.
@ -496,6 +496,8 @@ The shortlist of configuration flags is the following:
Optional bearer auth token to use for -remoteRead.url.
-remoteRead.bearerTokenFile string
Optional path to bearer token file to use for -remoteRead.url.
-remoteRead.disablePathAppend
Whether to disable automatic appending of '/api/v1/query' path to the configured -remoteRead.url.
-remoteRead.ignoreRestoreErrors
Whether to ignore errors from remote storage when restoring alerts state on startup. (default true)
-remoteRead.lookback duration
@ -511,7 +513,7 @@ The shortlist of configuration flags is the following:
-remoteRead.tlsServerName string
Optional TLS server name to use for connections to -remoteRead.url. By default the server name from -remoteRead.url is used
-remoteRead.url vmalert
Optional URL to VictoriaMetrics or vmselect that will be used to restore alerts state. This configuration makes sense only if vmalert was configured with `remoteWrite.url` before and has been successfully persisted its state. E.g. http://127.0.0.1:8428
Optional URL to VictoriaMetrics or vmselect that will be used to restore alerts state. This configuration makes sense only if vmalert was configured with `remoteWrite.url` before and has been successfully persisted its state. E.g. http://127.0.0.1:8428. See also -remoteRead.disablePathAppend
-remoteWrite.basicAuth.password string
Optional basic auth password for -remoteWrite.url
-remoteWrite.basicAuth.passwordFile string

View file

@ -163,7 +163,13 @@ func (ar *AlertingRule) ExecRange(ctx context.Context, start, end time.Time) ([]
// so the hash key will be consistent on restore
s.SetLabel(k, v)
}
// set additional labels to identify group and rule name
if ar.Name != "" {
s.SetLabel(alertNameLabel, ar.Name)
}
if !*disableAlertGroupLabel && ar.GroupName != "" {
s.SetLabel(alertGroupNameLabel, ar.GroupName)
}
a, err := ar.newAlert(s, time.Time{}, qFn) // initial alert
if err != nil {
return nil, fmt.Errorf("failed to create alert: %s", err)
@ -178,13 +184,11 @@ func (ar *AlertingRule) ExecRange(ctx context.Context, start, end time.Time) ([]
// if alert with For > 0
prevT := time.Time{}
//activeAt := time.Time{}
for i := range s.Values {
at := time.Unix(s.Timestamps[i], 0)
if at.Sub(prevT) > ar.EvalInterval {
// reset to Pending if there are gaps > EvalInterval between DPs
a.State = notifier.StatePending
//activeAt = at
a.Start = at
} else if at.Sub(a.Start) >= ar.For {
a.State = notifier.StateFiring
@ -231,6 +235,14 @@ func (ar *AlertingRule) Exec(ctx context.Context) ([]prompbmarshal.TimeSeries, e
// so the hash key will be consistent on restore
m.SetLabel(k, v)
}
// set additional labels to identify group and rule name
// set additional labels to identify group and rule name
if ar.Name != "" {
m.SetLabel(alertNameLabel, ar.Name)
}
if !*disableAlertGroupLabel && ar.GroupName != "" {
m.SetLabel(alertGroupNameLabel, ar.GroupName)
}
h := hash(m)
if _, ok := updated[h]; ok {
// duplicate may be caused by extra labels
@ -352,11 +364,6 @@ func (ar *AlertingRule) newAlert(m datasource.Metric, start time.Time, qFn notif
Start: start,
Expr: ar.Expr,
}
// label defined here to make override possible by
// time series labels.
if !*disableAlertGroupLabel && ar.GroupName != "" {
a.Labels[alertGroupNameLabel] = ar.GroupName
}
for _, l := range m.Labels {
// drop __name__ to be consistent with Prometheus alerting
if l.Name == "__name__" {
@ -415,7 +422,7 @@ func (ar *AlertingRule) AlertsAPI() []*APIAlert {
}
func (ar *AlertingRule) newAlertAPI(a notifier.Alert) *APIAlert {
return &APIAlert{
aa := &APIAlert{
// encode as strings to avoid rounding
ID: fmt.Sprintf("%d", a.ID),
GroupID: fmt.Sprintf("%d", a.GroupID),
@ -427,8 +434,13 @@ func (ar *AlertingRule) newAlertAPI(a notifier.Alert) *APIAlert {
Annotations: a.Annotations,
State: a.State.String(),
ActiveAt: a.Start,
Restored: a.Restored,
Value: strconv.FormatFloat(a.Value, 'f', -1, 32),
}
if alertURLGeneratorFn != nil {
aa.SourceLink = alertURLGeneratorFn(a)
}
return aa
}
const (
@ -443,43 +455,42 @@ const (
alertStateLabel = "alertstate"
// alertGroupNameLabel defines the label name attached for generated time series.
// attaching this label may be disabled via `-disableAlertgroupLabel` flag.
alertGroupNameLabel = "alertgroup"
)
// alertToTimeSeries converts the given alert with the given timestamp to timeseries
func (ar *AlertingRule) alertToTimeSeries(a *notifier.Alert, timestamp int64) []prompbmarshal.TimeSeries {
var tss []prompbmarshal.TimeSeries
tss = append(tss, alertToTimeSeries(ar.Name, a, timestamp))
tss = append(tss, alertToTimeSeries(a, timestamp))
if ar.For > 0 {
tss = append(tss, alertForToTimeSeries(ar.Name, a, timestamp))
tss = append(tss, alertForToTimeSeries(a, timestamp))
}
return tss
}
func alertToTimeSeries(name string, a *notifier.Alert, timestamp int64) prompbmarshal.TimeSeries {
func alertToTimeSeries(a *notifier.Alert, timestamp int64) prompbmarshal.TimeSeries {
labels := make(map[string]string)
for k, v := range a.Labels {
labels[k] = v
}
labels["__name__"] = alertMetricName
labels[alertNameLabel] = name
labels[alertStateLabel] = a.State.String()
return newTimeSeries([]float64{1}, []int64{timestamp}, labels)
}
// alertForToTimeSeries returns a timeseries that represents
// state of active alerts, where value is time when alert become active
func alertForToTimeSeries(name string, a *notifier.Alert, timestamp int64) prompbmarshal.TimeSeries {
func alertForToTimeSeries(a *notifier.Alert, timestamp int64) prompbmarshal.TimeSeries {
labels := make(map[string]string)
for k, v := range a.Labels {
labels[k] = v
}
labels["__name__"] = alertForStateMetricName
labels[alertNameLabel] = name
return newTimeSeries([]float64{float64(a.Start.Unix())}, []int64{timestamp}, labels)
}
// Restore restores the state of active alerts basing on previously written timeseries.
// Restore restores the state of active alerts basing on previously written time series.
// Restore restores only Start field. Field State will be always Pending and supposed
// to be updated on next Exec, as well as Value field.
// Only rules with For > 0 will be restored.
@ -507,23 +518,13 @@ func (ar *AlertingRule) Restore(ctx context.Context, q datasource.Querier, lookb
}
for _, m := range qMetrics {
labels := m.Labels
m.Labels = make([]datasource.Label, 0)
// drop all extra labels, so hash key will
// be identical to time series received in Exec
for _, l := range labels {
if l.Name == alertNameLabel || l.Name == alertGroupNameLabel {
continue
}
m.Labels = append(m.Labels, l)
}
a, err := ar.newAlert(m, time.Unix(int64(m.Values[0]), 0), qFn)
if err != nil {
return fmt.Errorf("failed to create alert: %w", err)
}
a.ID = hash(m)
a.State = notifier.StatePending
a.Restored = true
ar.alerts[a.ID] = a
logger.Infof("alert %q (%d) restored to state at %v", a.Name, a.ID, a.Start)
}

View file

@ -27,7 +27,6 @@ func TestAlertingRule_ToTimeSeries(t *testing.T) {
newTimeSeries([]float64{1}, []int64{timestamp.UnixNano()}, map[string]string{
"__name__": alertMetricName,
alertStateLabel: notifier.StateFiring.String(),
alertNameLabel: "instant",
}),
},
},
@ -41,7 +40,6 @@ func TestAlertingRule_ToTimeSeries(t *testing.T) {
newTimeSeries([]float64{1}, []int64{timestamp.UnixNano()}, map[string]string{
"__name__": alertMetricName,
alertStateLabel: notifier.StateFiring.String(),
alertNameLabel: "instant extra labels",
"job": "foo",
"instance": "bar",
}),
@ -57,7 +55,6 @@ func TestAlertingRule_ToTimeSeries(t *testing.T) {
newTimeSeries([]float64{1}, []int64{timestamp.UnixNano()}, map[string]string{
"__name__": alertMetricName,
alertStateLabel: notifier.StateFiring.String(),
alertNameLabel: "instant labels override",
}),
},
},
@ -68,13 +65,11 @@ func TestAlertingRule_ToTimeSeries(t *testing.T) {
newTimeSeries([]float64{1}, []int64{timestamp.UnixNano()}, map[string]string{
"__name__": alertMetricName,
alertStateLabel: notifier.StateFiring.String(),
alertNameLabel: "for",
}),
newTimeSeries([]float64{float64(timestamp.Add(time.Second).Unix())},
[]int64{timestamp.UnixNano()},
map[string]string{
"__name__": alertForStateMetricName,
alertNameLabel: "for",
"__name__": alertForStateMetricName,
}),
},
},
@ -85,13 +80,11 @@ func TestAlertingRule_ToTimeSeries(t *testing.T) {
newTimeSeries([]float64{1}, []int64{timestamp.UnixNano()}, map[string]string{
"__name__": alertMetricName,
alertStateLabel: notifier.StatePending.String(),
alertNameLabel: "for pending",
}),
newTimeSeries([]float64{float64(timestamp.Add(time.Second).Unix())},
[]int64{timestamp.UnixNano()},
map[string]string{
"__name__": alertForStateMetricName,
alertNameLabel: "for pending",
"__name__": alertForStateMetricName,
}),
},
},
@ -109,23 +102,27 @@ func TestAlertingRule_ToTimeSeries(t *testing.T) {
func TestAlertingRule_Exec(t *testing.T) {
const defaultStep = 5 * time.Millisecond
type testAlert struct {
labels []string
alert *notifier.Alert
}
testCases := []struct {
rule *AlertingRule
steps [][]datasource.Metric
expAlerts map[uint64]*notifier.Alert
expAlerts []testAlert
}{
{
newTestAlertingRule("empty", 0),
[][]datasource.Metric{},
map[uint64]*notifier.Alert{},
nil,
},
{
newTestAlertingRule("empty labels", 0),
[][]datasource.Metric{
{datasource.Metric{Values: []float64{1}, Timestamps: []int64{1}}},
},
map[uint64]*notifier.Alert{
hash(datasource.Metric{}): {State: notifier.StateFiring},
[]testAlert{
{alert: &notifier.Alert{State: notifier.StateFiring}},
},
},
{
@ -133,8 +130,8 @@ func TestAlertingRule_Exec(t *testing.T) {
[][]datasource.Metric{
{metricWithLabels(t, "name", "foo")},
},
map[uint64]*notifier.Alert{
hash(metricWithLabels(t, "name", "foo")): {State: notifier.StateFiring},
[]testAlert{
{labels: []string{"name", "foo"}, alert: &notifier.Alert{State: notifier.StateFiring}},
},
},
{
@ -143,8 +140,8 @@ func TestAlertingRule_Exec(t *testing.T) {
{metricWithLabels(t, "name", "foo")},
{},
},
map[uint64]*notifier.Alert{
hash(metricWithLabels(t, "name", "foo")): {State: notifier.StateInactive},
[]testAlert{
{labels: []string{"name", "foo"}, alert: &notifier.Alert{State: notifier.StateInactive}},
},
},
{
@ -154,8 +151,8 @@ func TestAlertingRule_Exec(t *testing.T) {
{},
{metricWithLabels(t, "name", "foo")},
},
map[uint64]*notifier.Alert{
hash(metricWithLabels(t, "name", "foo")): {State: notifier.StateFiring},
[]testAlert{
{labels: []string{"name", "foo"}, alert: &notifier.Alert{State: notifier.StateFiring}},
},
},
{
@ -166,8 +163,8 @@ func TestAlertingRule_Exec(t *testing.T) {
{metricWithLabels(t, "name", "foo")},
{},
},
map[uint64]*notifier.Alert{
hash(metricWithLabels(t, "name", "foo")): {State: notifier.StateInactive},
[]testAlert{
{labels: []string{"name", "foo"}, alert: &notifier.Alert{State: notifier.StateInactive}},
},
},
{
@ -179,7 +176,7 @@ func TestAlertingRule_Exec(t *testing.T) {
{},
{},
},
map[uint64]*notifier.Alert{},
nil,
},
{
newTestAlertingRule("single-firing=>inactive=>firing=>inactive=>empty=>firing", 0),
@ -191,8 +188,8 @@ func TestAlertingRule_Exec(t *testing.T) {
{},
{metricWithLabels(t, "name", "foo")},
},
map[uint64]*notifier.Alert{
hash(metricWithLabels(t, "name", "foo")): {State: notifier.StateFiring},
[]testAlert{
{labels: []string{"name", "foo"}, alert: &notifier.Alert{State: notifier.StateFiring}},
},
},
{
@ -204,10 +201,10 @@ func TestAlertingRule_Exec(t *testing.T) {
metricWithLabels(t, "name", "foo2"),
},
},
map[uint64]*notifier.Alert{
hash(metricWithLabels(t, "name", "foo")): {State: notifier.StateFiring},
hash(metricWithLabels(t, "name", "foo1")): {State: notifier.StateFiring},
hash(metricWithLabels(t, "name", "foo2")): {State: notifier.StateFiring},
[]testAlert{
{labels: []string{"name", "foo"}, alert: &notifier.Alert{State: notifier.StateFiring}},
{labels: []string{"name", "foo1"}, alert: &notifier.Alert{State: notifier.StateFiring}},
{labels: []string{"name", "foo2"}, alert: &notifier.Alert{State: notifier.StateFiring}},
},
},
{
@ -220,9 +217,9 @@ func TestAlertingRule_Exec(t *testing.T) {
// 1: fire first alert
// 2: fire second alert, set first inactive
// 3: fire third alert, set second inactive, delete first one
map[uint64]*notifier.Alert{
hash(metricWithLabels(t, "name", "foo1")): {State: notifier.StateInactive},
hash(metricWithLabels(t, "name", "foo2")): {State: notifier.StateFiring},
[]testAlert{
{labels: []string{"name", "foo1"}, alert: &notifier.Alert{State: notifier.StateInactive}},
{labels: []string{"name", "foo2"}, alert: &notifier.Alert{State: notifier.StateFiring}},
},
},
{
@ -230,8 +227,8 @@ func TestAlertingRule_Exec(t *testing.T) {
[][]datasource.Metric{
{metricWithLabels(t, "name", "foo")},
},
map[uint64]*notifier.Alert{
hash(metricWithLabels(t, "name", "foo")): {State: notifier.StatePending},
[]testAlert{
{labels: []string{"name", "foo"}, alert: &notifier.Alert{State: notifier.StatePending}},
},
},
{
@ -240,8 +237,8 @@ func TestAlertingRule_Exec(t *testing.T) {
{metricWithLabels(t, "name", "foo")},
{metricWithLabels(t, "name", "foo")},
},
map[uint64]*notifier.Alert{
hash(metricWithLabels(t, "name", "foo")): {State: notifier.StateFiring},
[]testAlert{
{labels: []string{"name", "foo"}, alert: &notifier.Alert{State: notifier.StateFiring}},
},
},
{
@ -252,7 +249,7 @@ func TestAlertingRule_Exec(t *testing.T) {
// empty step to reset and delete pending alerts
{},
},
map[uint64]*notifier.Alert{},
nil,
},
{
newTestAlertingRule("for-pending=>firing=>inactive", defaultStep),
@ -262,8 +259,8 @@ func TestAlertingRule_Exec(t *testing.T) {
// empty step to reset pending alerts
{},
},
map[uint64]*notifier.Alert{
hash(metricWithLabels(t, "name", "foo")): {State: notifier.StateInactive},
[]testAlert{
{labels: []string{"name", "foo"}, alert: &notifier.Alert{State: notifier.StateInactive}},
},
},
{
@ -275,8 +272,8 @@ func TestAlertingRule_Exec(t *testing.T) {
{},
{metricWithLabels(t, "name", "foo")},
},
map[uint64]*notifier.Alert{
hash(metricWithLabels(t, "name", "foo")): {State: notifier.StatePending},
[]testAlert{
{labels: []string{"name", "foo"}, alert: &notifier.Alert{State: notifier.StatePending}},
},
},
{
@ -289,8 +286,8 @@ func TestAlertingRule_Exec(t *testing.T) {
{metricWithLabels(t, "name", "foo")},
{metricWithLabels(t, "name", "foo")},
},
map[uint64]*notifier.Alert{
hash(metricWithLabels(t, "name", "foo")): {State: notifier.StateFiring},
[]testAlert{
{labels: []string{"name", "foo"}, alert: &notifier.Alert{State: notifier.StateFiring}},
},
},
}
@ -312,7 +309,15 @@ func TestAlertingRule_Exec(t *testing.T) {
if len(tc.rule.alerts) != len(tc.expAlerts) {
t.Fatalf("expected %d alerts; got %d", len(tc.expAlerts), len(tc.rule.alerts))
}
for key, exp := range tc.expAlerts {
expAlerts := make(map[uint64]*notifier.Alert)
for _, ta := range tc.expAlerts {
labels := ta.labels
labels = append(labels, alertNameLabel)
labels = append(labels, tc.rule.Name)
h := hash(metricWithLabels(t, labels...))
expAlerts[h] = ta.alert
}
for key, exp := range expAlerts {
got, ok := tc.rule.alerts[key]
if !ok {
t.Fatalf("expected to have key %d", key)
@ -468,6 +473,11 @@ func TestAlertingRule_ExecRange(t *testing.T) {
var j int
for _, series := range tc.data {
for _, timestamp := range series.Timestamps {
a := tc.expAlerts[j]
if a.Labels == nil {
a.Labels = make(map[string]string)
}
a.Labels[alertNameLabel] = tc.rule.Name
expTS = append(expTS, tc.rule.alertToTimeSeries(tc.expAlerts[j], timestamp)...)
j++
}
@ -496,7 +506,6 @@ func TestAlertingRule_Restore(t *testing.T) {
[]datasource.Metric{
metricWithValueAndLabels(t, float64(time.Now().Truncate(time.Hour).Unix()),
"__name__", alertForStateMetricName,
alertNameLabel, "",
),
},
map[uint64]*notifier.Alert{
@ -509,7 +518,7 @@ func TestAlertingRule_Restore(t *testing.T) {
[]datasource.Metric{
metricWithValueAndLabels(t, float64(time.Now().Truncate(time.Hour).Unix()),
"__name__", alertForStateMetricName,
alertNameLabel, "",
alertNameLabel, "metric labels",
alertGroupNameLabel, "groupID",
"foo", "bar",
"namespace", "baz",
@ -517,6 +526,8 @@ func TestAlertingRule_Restore(t *testing.T) {
},
map[uint64]*notifier.Alert{
hash(metricWithLabels(t,
alertNameLabel, "metric labels",
alertGroupNameLabel, "groupID",
"foo", "bar",
"namespace", "baz",
)): {State: notifier.StatePending,
@ -528,7 +539,6 @@ func TestAlertingRule_Restore(t *testing.T) {
[]datasource.Metric{
metricWithValueAndLabels(t, float64(time.Now().Truncate(time.Hour).Unix()),
"__name__", alertForStateMetricName,
alertNameLabel, "",
"foo", "bar",
"namespace", "baz",
// extra labels set by rule
@ -645,18 +655,20 @@ func TestAlertingRule_Template(t *testing.T) {
metricWithValueAndLabels(t, 1, "instance", "bar"),
},
map[uint64]*notifier.Alert{
hash(metricWithLabels(t, "region", "east", "instance", "foo")): {
hash(metricWithLabels(t, alertNameLabel, "common", "region", "east", "instance", "foo")): {
Annotations: map[string]string{},
Labels: map[string]string{
"region": "east",
"instance": "foo",
alertNameLabel: "common",
"region": "east",
"instance": "foo",
},
},
hash(metricWithLabels(t, "region", "east", "instance", "bar")): {
hash(metricWithLabels(t, alertNameLabel, "common", "region", "east", "instance", "bar")): {
Annotations: map[string]string{},
Labels: map[string]string{
"region": "east",
"instance": "bar",
alertNameLabel: "common",
"region": "east",
"instance": "bar",
},
},
},
@ -679,20 +691,22 @@ func TestAlertingRule_Template(t *testing.T) {
metricWithValueAndLabels(t, 10, "instance", "bar"),
},
map[uint64]*notifier.Alert{
hash(metricWithLabels(t, "region", "east", "instance", "foo")): {
hash(metricWithLabels(t, alertNameLabel, "override label", "region", "east", "instance", "foo")): {
Labels: map[string]string{
"instance": "foo",
"region": "east",
alertNameLabel: "override label",
"instance": "foo",
"region": "east",
},
Annotations: map[string]string{
"summary": `Too high connection number for "foo" for region east`,
"description": `It is 2 connections for "foo"`,
},
},
hash(metricWithLabels(t, "region", "east", "instance", "bar")): {
hash(metricWithLabels(t, alertNameLabel, "override label", "region", "east", "instance", "bar")): {
Labels: map[string]string{
"instance": "bar",
"region": "east",
alertNameLabel: "override label",
"instance": "bar",
"region": "east",
},
Annotations: map[string]string{
"summary": `Too high connection number for "bar" for region east`,

View file

@ -52,6 +52,9 @@ func Init(extraParams []Param) (QuerierBuilder, error) {
return nil, fmt.Errorf("failed to create transport: %w", err)
}
tr.MaxIdleConnsPerHost = *maxIdleConnections
if tr.MaxIdleConns != 0 && tr.MaxIdleConns < tr.MaxIdleConnsPerHost {
tr.MaxIdleConns = tr.MaxIdleConnsPerHost
}
if *roundDigits > 0 {
extraParams = append(extraParams, Param{

View file

@ -24,18 +24,20 @@ type VMStorage struct {
evaluationInterval time.Duration
extraLabels []string
extraParams []Param
disablePathAppend bool
}
// Clone makes clone of VMStorage, shares http client.
func (s *VMStorage) Clone() *VMStorage {
return &VMStorage{
c: s.c,
authCfg: s.authCfg,
datasourceURL: s.datasourceURL,
lookBack: s.lookBack,
queryStep: s.queryStep,
appendTypePrefix: s.appendTypePrefix,
dataSourceType: s.dataSourceType,
c: s.c,
authCfg: s.authCfg,
datasourceURL: s.datasourceURL,
lookBack: s.lookBack,
queryStep: s.queryStep,
appendTypePrefix: s.appendTypePrefix,
dataSourceType: s.dataSourceType,
disablePathAppend: s.disablePathAppend,
}
}
@ -57,15 +59,16 @@ func (s *VMStorage) BuildWithParams(params QuerierParams) Querier {
}
// NewVMStorage is a constructor for VMStorage
func NewVMStorage(baseURL string, authCfg *promauth.Config, lookBack time.Duration, queryStep time.Duration, appendTypePrefix bool, c *http.Client) *VMStorage {
func NewVMStorage(baseURL string, authCfg *promauth.Config, lookBack time.Duration, queryStep time.Duration, appendTypePrefix bool, c *http.Client, disablePathAppend bool) *VMStorage {
return &VMStorage{
c: c,
authCfg: authCfg,
datasourceURL: strings.TrimSuffix(baseURL, "/"),
appendTypePrefix: appendTypePrefix,
lookBack: lookBack,
queryStep: queryStep,
dataSourceType: NewPrometheusType(),
c: c,
authCfg: authCfg,
datasourceURL: strings.TrimSuffix(baseURL, "/"),
appendTypePrefix: appendTypePrefix,
lookBack: lookBack,
queryStep: queryStep,
dataSourceType: NewPrometheusType(),
disablePathAppend: disablePathAppend,
}
}
@ -132,12 +135,12 @@ func (s *VMStorage) QueryRange(ctx context.Context, query string, start, end tim
func (s *VMStorage) do(ctx context.Context, req *http.Request) (*http.Response, error) {
resp, err := s.c.Do(req.WithContext(ctx))
if err != nil {
return nil, fmt.Errorf("error getting response from %s: %w", req.URL, err)
return nil, fmt.Errorf("error getting response from %s: %w", req.URL.Redacted(), err)
}
if resp.StatusCode != http.StatusOK {
body, _ := ioutil.ReadAll(resp.Body)
_ = resp.Body.Close()
return nil, fmt.Errorf("unexpected response code %d for %s. Response body %s", resp.StatusCode, req.URL, body)
return nil, fmt.Errorf("unexpected response code %d for %s. Response body %s", resp.StatusCode, req.URL.Redacted(), body)
}
return resp, nil
}

View file

@ -38,7 +38,7 @@ func (r graphiteResponse) metrics() []Metric {
func parseGraphiteResponse(req *http.Request, resp *http.Response) ([]Metric, error) {
r := &graphiteResponse{}
if err := json.NewDecoder(resp.Body).Decode(r); err != nil {
return nil, fmt.Errorf("error parsing graphite metrics for %s: %w", req.URL, err)
return nil, fmt.Errorf("error parsing graphite metrics for %s: %w", req.URL.Redacted(), err)
}
return r.metrics(), nil
}

View file

@ -82,10 +82,10 @@ const (
func parsePrometheusResponse(req *http.Request, resp *http.Response) ([]Metric, error) {
r := &promResponse{}
if err := json.NewDecoder(resp.Body).Decode(r); err != nil {
return nil, fmt.Errorf("error parsing prometheus metrics for %s: %w", req.URL, err)
return nil, fmt.Errorf("error parsing prometheus metrics for %s: %w", req.URL.Redacted(), err)
}
if r.Status == statusError {
return nil, fmt.Errorf("response error, query: %s, errorType: %s, error: %s", req.URL, r.ErrorType, r.Error)
return nil, fmt.Errorf("response error, query: %s, errorType: %s, error: %s", req.URL.Redacted(), r.ErrorType, r.Error)
}
if r.Status != statusSuccess {
return nil, fmt.Errorf("unknown status: %s, Expected success or error ", r.Status)
@ -118,7 +118,9 @@ func (s *VMStorage) setPrometheusInstantReqParams(r *http.Request, query string,
if s.appendTypePrefix {
r.URL.Path += prometheusPrefix
}
r.URL.Path += prometheusInstantPath
if !s.disablePathAppend {
r.URL.Path += prometheusInstantPath
}
q := r.URL.Query()
if s.lookBack > 0 {
timestamp = timestamp.Add(-s.lookBack)
@ -136,7 +138,9 @@ func (s *VMStorage) setPrometheusRangeReqParams(r *http.Request, query string, s
if s.appendTypePrefix {
r.URL.Path += prometheusPrefix
}
r.URL.Path += prometheusRangePath
if !s.disablePathAppend {
r.URL.Path += prometheusRangePath
}
q := r.URL.Query()
q.Add("start", fmt.Sprintf("%d", start.Unix()))
q.Add("end", fmt.Sprintf("%d", end.Unix()))

View file

@ -83,7 +83,7 @@ func TestVMInstantQuery(t *testing.T) {
if err != nil {
t.Fatalf("unexpected: %s", err)
}
s := NewVMStorage(srv.URL, authCfg, time.Minute, 0, false, srv.Client())
s := NewVMStorage(srv.URL, authCfg, time.Minute, 0, false, srv.Client(), false)
p := NewPrometheusType()
pq := s.BuildWithParams(QuerierParams{DataSourceType: &p, EvaluationInterval: 15 * time.Second})
@ -193,7 +193,7 @@ func TestVMRangeQuery(t *testing.T) {
if err != nil {
t.Fatalf("unexpected: %s", err)
}
s := NewVMStorage(srv.URL, authCfg, time.Minute, 0, false, srv.Client())
s := NewVMStorage(srv.URL, authCfg, time.Minute, 0, false, srv.Client(), false)
p := NewPrometheusType()
pq := s.BuildWithParams(QuerierParams{DataSourceType: &p, EvaluationInterval: 15 * time.Second})
@ -252,6 +252,17 @@ func TestRequestParams(t *testing.T) {
checkEqualString(t, prometheusInstantPath, r.URL.Path)
},
},
{
"prometheus path with disablePathAppend",
false,
&VMStorage{
dataSourceType: NewPrometheusType(),
disablePathAppend: true,
},
func(t *testing.T, r *http.Request) {
checkEqualString(t, "", r.URL.Path)
},
},
{
"prometheus prefix",
false,
@ -263,6 +274,18 @@ func TestRequestParams(t *testing.T) {
checkEqualString(t, prometheusPrefix+prometheusInstantPath, r.URL.Path)
},
},
{
"prometheus prefix with disablePathAppend",
false,
&VMStorage{
dataSourceType: NewPrometheusType(),
appendTypePrefix: true,
disablePathAppend: true,
},
func(t *testing.T, r *http.Request) {
checkEqualString(t, prometheusPrefix, r.URL.Path)
},
},
{
"prometheus range path",
true,
@ -273,6 +296,17 @@ func TestRequestParams(t *testing.T) {
checkEqualString(t, prometheusRangePath, r.URL.Path)
},
},
{
"prometheus range path with disablePathAppend",
true,
&VMStorage{
dataSourceType: NewPrometheusType(),
disablePathAppend: true,
},
func(t *testing.T, r *http.Request) {
checkEqualString(t, "", r.URL.Path)
},
},
{
"prometheus range prefix",
true,
@ -284,6 +318,18 @@ func TestRequestParams(t *testing.T) {
checkEqualString(t, prometheusPrefix+prometheusRangePath, r.URL.Path)
},
},
{
"prometheus range prefix with disablePathAppend",
true,
&VMStorage{
dataSourceType: NewPrometheusType(),
appendTypePrefix: true,
disablePathAppend: true,
},
func(t *testing.T, r *http.Request) {
checkEqualString(t, prometheusPrefix, r.URL.Path)
},
},
{
"graphite path",
false,

View file

@ -123,6 +123,9 @@ func (g *Group) newRule(qb datasource.QuerierBuilder, rule config.Rule) Rule {
// ID return unique group ID that consists of
// rules file and group name
func (g *Group) ID() uint64 {
g.mu.RLock()
defer g.mu.RUnlock()
hash := fnv.New64a()
hash.Write([]byte(g.File))
hash.Write([]byte("\xff"))

View file

@ -192,7 +192,14 @@ func TestGroupStart(t *testing.T) {
// add rule labels - see config/testdata/rules1-good.rules
alert1.Labels["label"] = "bar"
alert1.Labels["host"] = inst1
alert1.ID = hash(m1)
// add service labels
alert1.Labels[alertNameLabel] = alert1.Name
alert1.Labels[alertGroupNameLabel] = g.Name
var labels1 []string
for k, v := range alert1.Labels {
labels1 = append(labels1, k, v)
}
alert1.ID = hash(metricWithLabels(t, labels1...))
alert2, err := r.newAlert(m2, time.Now(), nil)
if err != nil {
@ -204,7 +211,14 @@ func TestGroupStart(t *testing.T) {
// add rule labels - see config/testdata/rules1-good.rules
alert2.Labels["label"] = "bar"
alert2.Labels["host"] = inst2
alert2.ID = hash(m2)
// add service labels
alert2.Labels[alertNameLabel] = alert2.Name
alert2.Labels[alertGroupNameLabel] = g.Name
var labels2 []string
for k, v := range alert2.Labels {
labels2 = append(labels2, k, v)
}
alert2.ID = hash(metricWithLabels(t, labels2...))
finished := make(chan struct{})
fs.add(m1)

View file

@ -205,7 +205,8 @@ func compareTimeSeries(t *testing.T, a, b []prompbmarshal.TimeSeries) error {
}*/
}
if len(expTS.Labels) != len(gotTS.Labels) {
return fmt.Errorf("expected number of labels %d; got %d", len(expTS.Labels), len(gotTS.Labels))
return fmt.Errorf("expected number of labels %d (%v); got %d (%v)",
len(expTS.Labels), expTS.Labels, len(gotTS.Labels), gotTS.Labels)
}
for i, exp := range expTS.Labels {
got := gotTS.Labels[i]

View file

@ -59,6 +59,8 @@ eg. 'explore?orgId=1&left=[\"now-1h\",\"now\",\"VictoriaMetrics\",{\"expr\": \"{
dryRun = flag.Bool("dryRun", false, "Whether to check only config files without running vmalert. The rules file are validated. The `-rule` flag must be specified.")
)
var alertURLGeneratorFn notifier.AlertURLGenerator
func main() {
// Write flags and help message to stdout, since it is easier to grep or pipe.
flag.CommandLine.SetOutput(os.Stdout)
@ -79,15 +81,22 @@ func main() {
}
return
}
eu, err := getExternalURL(*externalURL, *httpListenAddr, httpserver.IsTLS())
if err != nil {
logger.Fatalf("failed to init `external.url`: %s", err)
}
notifier.InitTemplateFunc(eu)
alertURLGeneratorFn, err = getAlertURLGenerator(eu, *externalAlertSource, *validateTemplates)
if err != nil {
logger.Fatalf("failed to init `external.alert.source`: %s", err)
}
if *replayFrom != "" || *replayTo != "" {
rw, err := remotewrite.Init(context.Background())
if err != nil {
logger.Fatalf("failed to init remoteWrite: %s", err)
}
eu, err := getExternalURL(*externalURL, *httpListenAddr, httpserver.IsTLS())
if err != nil {
logger.Fatalf("failed to init `external.url`: %s", err)
}
notifier.InitTemplateFunc(eu)
groupsCfg, err := config.Parse(*rulePath, *validateTemplates, *validateExpressions)
if err != nil {
@ -118,11 +127,16 @@ func main() {
logger.Fatalf("cannot parse configuration file: %s", err)
}
// Register SIGHUP handler for config re-read just before manager.start call.
// This guarantees that the config will be re-read if the signal arrives during manager.start call.
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1240
sighupCh := procutil.NewSighupChan()
if err := manager.start(ctx, groupsCfg); err != nil {
logger.Fatalf("failed to start: %s", err)
}
go configReload(ctx, manager, groupsCfg)
go configReload(ctx, manager, groupsCfg, sighupCh)
rh := &requestHandler{m: manager}
go httpserver.Serve(*httpListenAddr, rh.handler)
@ -148,20 +162,10 @@ func newManager(ctx context.Context) (*manager, error) {
if err != nil {
return nil, fmt.Errorf("failed to init datasource: %w", err)
}
eu, err := getExternalURL(*externalURL, *httpListenAddr, httpserver.IsTLS())
if err != nil {
return nil, fmt.Errorf("failed to init `external.url`: %w", err)
}
notifier.InitTemplateFunc(eu)
aug, err := getAlertURLGenerator(eu, *externalAlertSource, *validateTemplates)
if err != nil {
return nil, fmt.Errorf("failed to init `external.alert.source`: %w", err)
}
nts, err := notifier.Init(aug)
nts, err := notifier.Init(alertURLGeneratorFn)
if err != nil {
return nil, fmt.Errorf("failed to init notifier: %w", err)
}
manager := &manager{
groups: make(map[uint64]*Group),
querierBuilder: q,
@ -246,12 +250,7 @@ See the docs at https://docs.victoriametrics.com/vmalert.html .
flagutil.Usage(s)
}
func configReload(ctx context.Context, m *manager, groupsCfg []config.Group) {
// Register SIGHUP handler for config re-read just before manager.start call.
// This guarantees that the config will be re-read if the signal arrives during manager.start call.
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1240
sighupCh := procutil.NewSighupChan()
func configReload(ctx context.Context, m *manager, groupsCfg []config.Group, sighupCh <-chan os.Signal) {
var configCheckCh <-chan time.Time
if *rulesCheckInterval > 0 {
ticker := time.NewTicker(*rulesCheckInterval)

View file

@ -102,8 +102,9 @@ groups:
}
syncCh := make(chan struct{})
sighupCh := procutil.NewSighupChan()
go func() {
configReload(ctx, m, nil)
configReload(ctx, m, nil, sighupCh)
close(syncCh)
}()

View file

@ -34,6 +34,8 @@ type Alert struct {
Value float64
// ID is the unique identifer for the Alert
ID uint64
// Restored is true if Alert was restored after restart
Restored bool
}
// AlertState type indicates the Alert state

View file

@ -12,7 +12,7 @@ import (
var (
addr = flag.String("remoteRead.url", "", "Optional URL to VictoriaMetrics or vmselect that will be used to restore alerts "+
"state. This configuration makes sense only if `vmalert` was configured with `remoteWrite.url` before and has been successfully persisted its state. "+
"E.g. http://127.0.0.1:8428")
"E.g. http://127.0.0.1:8428. See also -remoteRead.disablePathAppend")
basicAuthUsername = flag.String("remoteRead.basicAuth.username", "", "Optional basic auth username for -remoteRead.url")
basicAuthPassword = flag.String("remoteRead.basicAuth.password", "", "Optional basic auth password for -remoteRead.url")
basicAuthPasswordFile = flag.String("remoteRead.basicAuth.passwordFile", "", "Optional path to basic auth password to use for -remoteRead.url")
@ -26,6 +26,7 @@ var (
"By default system CA is used")
tlsServerName = flag.String("remoteRead.tlsServerName", "", "Optional TLS server name to use for connections to -remoteRead.url. "+
"By default the server name from -remoteRead.url is used")
disablePathAppend = flag.Bool("remoteRead.disablePathAppend", false, "Whether to disable automatic appending of '/api/v1/query' path to the configured -remoteRead.url.")
)
// Init creates a Querier from provided flag values.
@ -43,5 +44,5 @@ func Init() (datasource.QuerierBuilder, error) {
return nil, fmt.Errorf("failed to configure auth: %w", err)
}
c := &http.Client{Transport: tr}
return datasource.NewVMStorage(*addr, authCfg, 0, 0, false, c), nil
return datasource.NewVMStorage(*addr, authCfg, 0, 0, false, c, *disablePathAppend), nil
}

View file

@ -246,13 +246,13 @@ func (c *Client) send(ctx context.Context, data []byte) error {
resp, err := c.c.Do(req.WithContext(ctx))
if err != nil {
return fmt.Errorf("error while sending request to %s: %w; Data len %d(%d)",
req.URL, err, len(data), r.Size())
req.URL.Redacted(), err, len(data), r.Size())
}
defer func() { _ = resp.Body.Close() }()
if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK {
body, _ := ioutil.ReadAll(resp.Body)
return fmt.Errorf("unexpected response code %d for %s. Response body %q",
resp.StatusCode, req.URL, body)
resp.StatusCode, req.URL.Redacted(), body)
}
return nil
}

View file

@ -28,13 +28,14 @@ func initLinks() {
{path.Join(pathPrefix, "api/v1/groups"), "list all loaded groups and rules"},
{path.Join(pathPrefix, "api/v1/alerts"), "list all active alerts"},
{path.Join(pathPrefix, "api/v1/groupID/alertID/status"), "get alert status by ID"},
{path.Join(pathPrefix, "flags"), "command-line flags"},
{path.Join(pathPrefix, "metrics"), "list of application metrics"},
{path.Join(pathPrefix, "-/reload"), "reload configuration"},
}
navItems = []tpl.NavItem{
{Name: "vmalert", Url: path.Join(pathPrefix, "/")},
{Name: "vmalert", Url: pathPrefix},
{Name: "Groups", Url: path.Join(pathPrefix, "groups")},
{Name: "Alerts", Url: path.Join(pathPrefix, "/alerts")},
{Name: "Alerts", Url: path.Join(pathPrefix, "alerts")},
{Name: "Docs", Url: "https://docs.victoriametrics.com/vmalert.html"},
}
}

View file

@ -51,7 +51,7 @@
<div class="group-heading{% if rNotOk[g.Name] > 0 %} alert-danger{% endif %}" data-bs-target="rules-{%s g.ID %}">
<span class="anchor" id="group-{%s g.ID %}"></span>
<a href="#group-{%s g.ID %}">{%s g.Name %}{% if g.Type != "prometheus" %} ({%s g.Type %}){% endif %} (every {%s g.Interval %})</a>
{% if rNotOk[g.Name] > 0 %}<span class="badge bg-danger" title="Number of rules withs status Error">{%d rNotOk[g.Name] %}</span> {% endif %}
{% if rNotOk[g.Name] > 0 %}<span class="badge bg-danger" title="Number of rules with status Error">{%d rNotOk[g.Name] %}</span> {% endif %}
<span class="badge bg-success" title="Number of rules withs status Ok">{%d rOk[g.Name] %}</span>
<p class="fs-6 fw-lighter">{%s g.File %}</p>
{% if len(g.ExtraFilterLabels) > 0 %}
@ -155,7 +155,9 @@
sort.Strings(labelKeys)
%}
<br>
<b>alert:</b> {%s defaultAR.Name %} ({%d len(alertsByRule[ruleID]) %})<br>
<b>alert:</b> {%s defaultAR.Name %} ({%d len(alertsByRule[ruleID]) %})
| <span><a target="_blank" href="{%s defaultAR.SourceLink %}">Source</a></span>
<br>
<b>expr:</b><code><pre>{%s defaultAR.Expression %}</pre></code>
<table class="table table-striped table-hover table-sm">
<thead>
@ -175,8 +177,11 @@
<span class="ms-1 badge bg-primary">{%s k %}={%s ar.Labels[k] %}</span>
{% endfor %}
</td>
<td><span class="badge {% if ar.State=="firing" %}bg-danger{% else %} bg-warning text-dark{% endif %}">{%s ar.State %}</span></td>
<td>{%s ar.ActiveAt.Format("2006-01-02T15:04:05Z07:00") %}</td>
<td>{%= badgeState(ar.State) %}</td>
<td>
{%s ar.ActiveAt.Format("2006-01-02T15:04:05Z07:00") %}
{% if ar.Restored %}{%= badgeRestored() %}{% endif %}
</td>
<td>{%s ar.Value %}</td>
<td>
<a href="/{%s g.ID %}/{%s ar.ID %}/status">Details</a>
@ -270,7 +275,31 @@
<a target="_blank" href="/groups#group-{%s alert.GroupID %}">{%s alert.GroupID %}</a>
</div>
</div>
</div>
<div class="container border-bottom p-2">
<div class="row">
<div class="col-2">
Source link
</div>
<div class="col">
<a target="_blank" href="{%s alert.SourceLink %}">Link</a>
</div>
</div>
</div>
{%= tpl.Footer() %}
{% endfunc %}
{% func badgeState(state string) %}
{%code
badgeClass := "bg-warning text-dark"
if state == "firing" {
badgeClass = "bg-danger"
}
%}
<span class="badge {%s badgeClass %}">{%s state %}</span>
{% endfunc %}
{% func badgeRestored() %}
<span class="badge bg-warning text-dark" title="Alert state was restored after the service restart from remote storage">restored</span>
{% endfunc %}

View file

@ -190,7 +190,7 @@ func StreamListGroups(qw422016 *qt422016.Writer, groups []APIGroup) {
//line app/vmalert/web.qtpl:54
if rNotOk[g.Name] > 0 {
//line app/vmalert/web.qtpl:54
qw422016.N().S(`<span class="badge bg-danger" title="Number of rules withs status Error">`)
qw422016.N().S(`<span class="badge bg-danger" title="Number of rules with status Error">`)
//line app/vmalert/web.qtpl:54
qw422016.N().D(rNotOk[g.Name])
//line app/vmalert/web.qtpl:54
@ -573,11 +573,17 @@ func StreamListAlerts(qw422016 *qt422016.Writer, groupAlerts []GroupAlerts) {
//line app/vmalert/web.qtpl:158
qw422016.N().D(len(alertsByRule[ruleID]))
//line app/vmalert/web.qtpl:158
qw422016.N().S(`)<br>
qw422016.N().S(`)
| <span><a target="_blank" href="`)
//line app/vmalert/web.qtpl:159
qw422016.E().S(defaultAR.SourceLink)
//line app/vmalert/web.qtpl:159
qw422016.N().S(`">Source</a></span>
<br>
<b>expr:</b><code><pre>`)
//line app/vmalert/web.qtpl:159
//line app/vmalert/web.qtpl:161
qw422016.E().S(defaultAR.Expression)
//line app/vmalert/web.qtpl:159
//line app/vmalert/web.qtpl:161
qw422016.N().S(`</pre></code>
<table class="table table-striped table-hover table-sm">
<thead>
@ -591,152 +597,151 @@ func StreamListAlerts(qw422016 *qt422016.Writer, groupAlerts []GroupAlerts) {
</thead>
<tbody>
`)
//line app/vmalert/web.qtpl:171
//line app/vmalert/web.qtpl:173
for _, ar := range alertsByRule[ruleID] {
//line app/vmalert/web.qtpl:171
//line app/vmalert/web.qtpl:173
qw422016.N().S(`
<tr>
<td>
`)
//line app/vmalert/web.qtpl:174
//line app/vmalert/web.qtpl:176
for _, k := range labelKeys {
//line app/vmalert/web.qtpl:174
//line app/vmalert/web.qtpl:176
qw422016.N().S(`
<span class="ms-1 badge bg-primary">`)
//line app/vmalert/web.qtpl:175
//line app/vmalert/web.qtpl:177
qw422016.E().S(k)
//line app/vmalert/web.qtpl:175
//line app/vmalert/web.qtpl:177
qw422016.N().S(`=`)
//line app/vmalert/web.qtpl:175
//line app/vmalert/web.qtpl:177
qw422016.E().S(ar.Labels[k])
//line app/vmalert/web.qtpl:175
//line app/vmalert/web.qtpl:177
qw422016.N().S(`</span>
`)
//line app/vmalert/web.qtpl:176
//line app/vmalert/web.qtpl:178
}
//line app/vmalert/web.qtpl:176
//line app/vmalert/web.qtpl:178
qw422016.N().S(`
</td>
<td><span class="badge `)
//line app/vmalert/web.qtpl:178
if ar.State == "firing" {
//line app/vmalert/web.qtpl:178
qw422016.N().S(`bg-danger`)
//line app/vmalert/web.qtpl:178
} else {
//line app/vmalert/web.qtpl:178
qw422016.N().S(` bg-warning text-dark`)
//line app/vmalert/web.qtpl:178
}
//line app/vmalert/web.qtpl:178
qw422016.N().S(`">`)
//line app/vmalert/web.qtpl:178
qw422016.E().S(ar.State)
//line app/vmalert/web.qtpl:178
qw422016.N().S(`</span></td>
<td>`)
//line app/vmalert/web.qtpl:179
qw422016.E().S(ar.ActiveAt.Format("2006-01-02T15:04:05Z07:00"))
//line app/vmalert/web.qtpl:179
qw422016.N().S(`</td>
<td>`)
//line app/vmalert/web.qtpl:180
qw422016.E().S(ar.Value)
streambadgeState(qw422016, ar.State)
//line app/vmalert/web.qtpl:180
qw422016.N().S(`</td>
<td>
`)
//line app/vmalert/web.qtpl:182
qw422016.E().S(ar.ActiveAt.Format("2006-01-02T15:04:05Z07:00"))
//line app/vmalert/web.qtpl:182
qw422016.N().S(`
`)
//line app/vmalert/web.qtpl:183
if ar.Restored {
//line app/vmalert/web.qtpl:183
streambadgeRestored(qw422016)
//line app/vmalert/web.qtpl:183
}
//line app/vmalert/web.qtpl:183
qw422016.N().S(`
</td>
<td>`)
//line app/vmalert/web.qtpl:185
qw422016.E().S(ar.Value)
//line app/vmalert/web.qtpl:185
qw422016.N().S(`</td>
<td>
<a href="/`)
//line app/vmalert/web.qtpl:182
//line app/vmalert/web.qtpl:187
qw422016.E().S(g.ID)
//line app/vmalert/web.qtpl:182
//line app/vmalert/web.qtpl:187
qw422016.N().S(`/`)
//line app/vmalert/web.qtpl:182
//line app/vmalert/web.qtpl:187
qw422016.E().S(ar.ID)
//line app/vmalert/web.qtpl:182
//line app/vmalert/web.qtpl:187
qw422016.N().S(`/status">Details</a>
</td>
</tr>
`)
//line app/vmalert/web.qtpl:185
//line app/vmalert/web.qtpl:190
}
//line app/vmalert/web.qtpl:185
//line app/vmalert/web.qtpl:190
qw422016.N().S(`
</tbody>
</table>
`)
//line app/vmalert/web.qtpl:188
//line app/vmalert/web.qtpl:193
}
//line app/vmalert/web.qtpl:188
//line app/vmalert/web.qtpl:193
qw422016.N().S(`
</div>
<br>
`)
//line app/vmalert/web.qtpl:191
//line app/vmalert/web.qtpl:196
}
//line app/vmalert/web.qtpl:191
//line app/vmalert/web.qtpl:196
qw422016.N().S(`
`)
//line app/vmalert/web.qtpl:193
//line app/vmalert/web.qtpl:198
} else {
//line app/vmalert/web.qtpl:193
//line app/vmalert/web.qtpl:198
qw422016.N().S(`
<div>
<p>No items...</p>
</div>
`)
//line app/vmalert/web.qtpl:197
//line app/vmalert/web.qtpl:202
}
//line app/vmalert/web.qtpl:197
//line app/vmalert/web.qtpl:202
qw422016.N().S(`
`)
//line app/vmalert/web.qtpl:199
//line app/vmalert/web.qtpl:204
tpl.StreamFooter(qw422016)
//line app/vmalert/web.qtpl:199
//line app/vmalert/web.qtpl:204
qw422016.N().S(`
`)
//line app/vmalert/web.qtpl:201
}
//line app/vmalert/web.qtpl:201
func WriteListAlerts(qq422016 qtio422016.Writer, groupAlerts []GroupAlerts) {
//line app/vmalert/web.qtpl:201
qw422016 := qt422016.AcquireWriter(qq422016)
//line app/vmalert/web.qtpl:201
StreamListAlerts(qw422016, groupAlerts)
//line app/vmalert/web.qtpl:201
qt422016.ReleaseWriter(qw422016)
//line app/vmalert/web.qtpl:201
}
//line app/vmalert/web.qtpl:201
func ListAlerts(groupAlerts []GroupAlerts) string {
//line app/vmalert/web.qtpl:201
qb422016 := qt422016.AcquireByteBuffer()
//line app/vmalert/web.qtpl:201
WriteListAlerts(qb422016, groupAlerts)
//line app/vmalert/web.qtpl:201
qs422016 := string(qb422016.B)
//line app/vmalert/web.qtpl:201
qt422016.ReleaseByteBuffer(qb422016)
//line app/vmalert/web.qtpl:201
return qs422016
//line app/vmalert/web.qtpl:201
}
//line app/vmalert/web.qtpl:203
func StreamAlert(qw422016 *qt422016.Writer, alert *APIAlert) {
//line app/vmalert/web.qtpl:203
qw422016.N().S(`
`)
//line app/vmalert/web.qtpl:204
tpl.StreamHeader(qw422016, "", navItems)
//line app/vmalert/web.qtpl:204
qw422016.N().S(`
`)
//line app/vmalert/web.qtpl:206
}
//line app/vmalert/web.qtpl:206
func WriteListAlerts(qq422016 qtio422016.Writer, groupAlerts []GroupAlerts) {
//line app/vmalert/web.qtpl:206
qw422016 := qt422016.AcquireWriter(qq422016)
//line app/vmalert/web.qtpl:206
StreamListAlerts(qw422016, groupAlerts)
//line app/vmalert/web.qtpl:206
qt422016.ReleaseWriter(qw422016)
//line app/vmalert/web.qtpl:206
}
//line app/vmalert/web.qtpl:206
func ListAlerts(groupAlerts []GroupAlerts) string {
//line app/vmalert/web.qtpl:206
qb422016 := qt422016.AcquireByteBuffer()
//line app/vmalert/web.qtpl:206
WriteListAlerts(qb422016, groupAlerts)
//line app/vmalert/web.qtpl:206
qs422016 := string(qb422016.B)
//line app/vmalert/web.qtpl:206
qt422016.ReleaseByteBuffer(qb422016)
//line app/vmalert/web.qtpl:206
return qs422016
//line app/vmalert/web.qtpl:206
}
//line app/vmalert/web.qtpl:208
func StreamAlert(qw422016 *qt422016.Writer, alert *APIAlert) {
//line app/vmalert/web.qtpl:208
qw422016.N().S(`
`)
//line app/vmalert/web.qtpl:209
tpl.StreamHeader(qw422016, "", navItems)
//line app/vmalert/web.qtpl:209
qw422016.N().S(`
`)
//line app/vmalert/web.qtpl:211
var labelKeys []string
for k := range alert.Labels {
labelKeys = append(labelKeys, k)
@ -749,28 +754,28 @@ func StreamAlert(qw422016 *qt422016.Writer, alert *APIAlert) {
}
sort.Strings(annotationKeys)
//line app/vmalert/web.qtpl:217
//line app/vmalert/web.qtpl:222
qw422016.N().S(`
<div class="display-6 pb-3 mb-3">`)
//line app/vmalert/web.qtpl:218
//line app/vmalert/web.qtpl:223
qw422016.E().S(alert.Name)
//line app/vmalert/web.qtpl:218
//line app/vmalert/web.qtpl:223
qw422016.N().S(`<span class="ms-2 badge `)
//line app/vmalert/web.qtpl:218
//line app/vmalert/web.qtpl:223
if alert.State == "firing" {
//line app/vmalert/web.qtpl:218
//line app/vmalert/web.qtpl:223
qw422016.N().S(`bg-danger`)
//line app/vmalert/web.qtpl:218
//line app/vmalert/web.qtpl:223
} else {
//line app/vmalert/web.qtpl:218
//line app/vmalert/web.qtpl:223
qw422016.N().S(` bg-warning text-dark`)
//line app/vmalert/web.qtpl:218
//line app/vmalert/web.qtpl:223
}
//line app/vmalert/web.qtpl:218
//line app/vmalert/web.qtpl:223
qw422016.N().S(`">`)
//line app/vmalert/web.qtpl:218
//line app/vmalert/web.qtpl:223
qw422016.E().S(alert.State)
//line app/vmalert/web.qtpl:218
//line app/vmalert/web.qtpl:223
qw422016.N().S(`</span></div>
<div class="container border-bottom p-2">
<div class="row">
@ -779,9 +784,9 @@ func StreamAlert(qw422016 *qt422016.Writer, alert *APIAlert) {
</div>
<div class="col">
`)
//line app/vmalert/web.qtpl:225
//line app/vmalert/web.qtpl:230
qw422016.E().S(alert.ActiveAt.Format("2006-01-02T15:04:05Z07:00"))
//line app/vmalert/web.qtpl:225
//line app/vmalert/web.qtpl:230
qw422016.N().S(`
</div>
</div>
@ -793,9 +798,9 @@ func StreamAlert(qw422016 *qt422016.Writer, alert *APIAlert) {
</div>
<div class="col">
<code><pre>`)
//line app/vmalert/web.qtpl:235
//line app/vmalert/web.qtpl:240
qw422016.E().S(alert.Expression)
//line app/vmalert/web.qtpl:235
//line app/vmalert/web.qtpl:240
qw422016.N().S(`</pre></code>
</div>
</div>
@ -807,23 +812,23 @@ func StreamAlert(qw422016 *qt422016.Writer, alert *APIAlert) {
</div>
<div class="col">
`)
//line app/vmalert/web.qtpl:245
//line app/vmalert/web.qtpl:250
for _, k := range labelKeys {
//line app/vmalert/web.qtpl:245
//line app/vmalert/web.qtpl:250
qw422016.N().S(`
<span class="m-1 badge bg-primary">`)
//line app/vmalert/web.qtpl:246
//line app/vmalert/web.qtpl:251
qw422016.E().S(k)
//line app/vmalert/web.qtpl:246
//line app/vmalert/web.qtpl:251
qw422016.N().S(`=`)
//line app/vmalert/web.qtpl:246
//line app/vmalert/web.qtpl:251
qw422016.E().S(alert.Labels[k])
//line app/vmalert/web.qtpl:246
//line app/vmalert/web.qtpl:251
qw422016.N().S(`</span>
`)
//line app/vmalert/web.qtpl:247
//line app/vmalert/web.qtpl:252
}
//line app/vmalert/web.qtpl:247
//line app/vmalert/web.qtpl:252
qw422016.N().S(`
</div>
</div>
@ -835,24 +840,24 @@ func StreamAlert(qw422016 *qt422016.Writer, alert *APIAlert) {
</div>
<div class="col">
`)
//line app/vmalert/web.qtpl:257
//line app/vmalert/web.qtpl:262
for _, k := range annotationKeys {
//line app/vmalert/web.qtpl:257
//line app/vmalert/web.qtpl:262
qw422016.N().S(`
<b>`)
//line app/vmalert/web.qtpl:258
//line app/vmalert/web.qtpl:263
qw422016.E().S(k)
//line app/vmalert/web.qtpl:258
//line app/vmalert/web.qtpl:263
qw422016.N().S(`:</b><br>
<p>`)
//line app/vmalert/web.qtpl:259
//line app/vmalert/web.qtpl:264
qw422016.E().S(alert.Annotations[k])
//line app/vmalert/web.qtpl:259
//line app/vmalert/web.qtpl:264
qw422016.N().S(`</p>
`)
//line app/vmalert/web.qtpl:260
//line app/vmalert/web.qtpl:265
}
//line app/vmalert/web.qtpl:260
//line app/vmalert/web.qtpl:265
qw422016.N().S(`
</div>
</div>
@ -864,49 +869,150 @@ func StreamAlert(qw422016 *qt422016.Writer, alert *APIAlert) {
</div>
<div class="col">
<a target="_blank" href="/groups#group-`)
//line app/vmalert/web.qtpl:270
//line app/vmalert/web.qtpl:275
qw422016.E().S(alert.GroupID)
//line app/vmalert/web.qtpl:270
//line app/vmalert/web.qtpl:275
qw422016.N().S(`">`)
//line app/vmalert/web.qtpl:270
//line app/vmalert/web.qtpl:275
qw422016.E().S(alert.GroupID)
//line app/vmalert/web.qtpl:270
//line app/vmalert/web.qtpl:275
qw422016.N().S(`</a>
</div>
</div>
</div>
<div class="container border-bottom p-2">
<div class="row">
<div class="col-2">
Source link
</div>
<div class="col">
<a target="_blank" href="`)
//line app/vmalert/web.qtpl:285
qw422016.E().S(alert.SourceLink)
//line app/vmalert/web.qtpl:285
qw422016.N().S(`">Link</a>
</div>
</div>
</div>
`)
//line app/vmalert/web.qtpl:274
//line app/vmalert/web.qtpl:289
tpl.StreamFooter(qw422016)
//line app/vmalert/web.qtpl:274
//line app/vmalert/web.qtpl:289
qw422016.N().S(`
`)
//line app/vmalert/web.qtpl:276
//line app/vmalert/web.qtpl:291
}
//line app/vmalert/web.qtpl:276
//line app/vmalert/web.qtpl:291
func WriteAlert(qq422016 qtio422016.Writer, alert *APIAlert) {
//line app/vmalert/web.qtpl:276
//line app/vmalert/web.qtpl:291
qw422016 := qt422016.AcquireWriter(qq422016)
//line app/vmalert/web.qtpl:276
//line app/vmalert/web.qtpl:291
StreamAlert(qw422016, alert)
//line app/vmalert/web.qtpl:276
//line app/vmalert/web.qtpl:291
qt422016.ReleaseWriter(qw422016)
//line app/vmalert/web.qtpl:276
//line app/vmalert/web.qtpl:291
}
//line app/vmalert/web.qtpl:276
//line app/vmalert/web.qtpl:291
func Alert(alert *APIAlert) string {
//line app/vmalert/web.qtpl:276
//line app/vmalert/web.qtpl:291
qb422016 := qt422016.AcquireByteBuffer()
//line app/vmalert/web.qtpl:276
//line app/vmalert/web.qtpl:291
WriteAlert(qb422016, alert)
//line app/vmalert/web.qtpl:276
//line app/vmalert/web.qtpl:291
qs422016 := string(qb422016.B)
//line app/vmalert/web.qtpl:276
//line app/vmalert/web.qtpl:291
qt422016.ReleaseByteBuffer(qb422016)
//line app/vmalert/web.qtpl:276
//line app/vmalert/web.qtpl:291
return qs422016
//line app/vmalert/web.qtpl:276
//line app/vmalert/web.qtpl:291
}
//line app/vmalert/web.qtpl:293
func streambadgeState(qw422016 *qt422016.Writer, state string) {
//line app/vmalert/web.qtpl:293
qw422016.N().S(`
`)
//line app/vmalert/web.qtpl:295
badgeClass := "bg-warning text-dark"
if state == "firing" {
badgeClass = "bg-danger"
}
//line app/vmalert/web.qtpl:299
qw422016.N().S(`
<span class="badge `)
//line app/vmalert/web.qtpl:300
qw422016.E().S(badgeClass)
//line app/vmalert/web.qtpl:300
qw422016.N().S(`">`)
//line app/vmalert/web.qtpl:300
qw422016.E().S(state)
//line app/vmalert/web.qtpl:300
qw422016.N().S(`</span>
`)
//line app/vmalert/web.qtpl:301
}
//line app/vmalert/web.qtpl:301
func writebadgeState(qq422016 qtio422016.Writer, state string) {
//line app/vmalert/web.qtpl:301
qw422016 := qt422016.AcquireWriter(qq422016)
//line app/vmalert/web.qtpl:301
streambadgeState(qw422016, state)
//line app/vmalert/web.qtpl:301
qt422016.ReleaseWriter(qw422016)
//line app/vmalert/web.qtpl:301
}
//line app/vmalert/web.qtpl:301
func badgeState(state string) string {
//line app/vmalert/web.qtpl:301
qb422016 := qt422016.AcquireByteBuffer()
//line app/vmalert/web.qtpl:301
writebadgeState(qb422016, state)
//line app/vmalert/web.qtpl:301
qs422016 := string(qb422016.B)
//line app/vmalert/web.qtpl:301
qt422016.ReleaseByteBuffer(qb422016)
//line app/vmalert/web.qtpl:301
return qs422016
//line app/vmalert/web.qtpl:301
}
//line app/vmalert/web.qtpl:303
func streambadgeRestored(qw422016 *qt422016.Writer) {
//line app/vmalert/web.qtpl:303
qw422016.N().S(`
<span class="badge bg-warning text-dark" title="Alert state was restored after reload from remote storage">restored</span>
`)
//line app/vmalert/web.qtpl:305
}
//line app/vmalert/web.qtpl:305
func writebadgeRestored(qq422016 qtio422016.Writer) {
//line app/vmalert/web.qtpl:305
qw422016 := qt422016.AcquireWriter(qq422016)
//line app/vmalert/web.qtpl:305
streambadgeRestored(qw422016)
//line app/vmalert/web.qtpl:305
qt422016.ReleaseWriter(qw422016)
//line app/vmalert/web.qtpl:305
}
//line app/vmalert/web.qtpl:305
func badgeRestored() string {
//line app/vmalert/web.qtpl:305
qb422016 := qt422016.AcquireByteBuffer()
//line app/vmalert/web.qtpl:305
writebadgeRestored(qb422016)
//line app/vmalert/web.qtpl:305
qs422016 := string(qb422016.B)
//line app/vmalert/web.qtpl:305
qt422016.ReleaseByteBuffer(qb422016)
//line app/vmalert/web.qtpl:305
return qs422016
//line app/vmalert/web.qtpl:305
}

View file

@ -17,6 +17,8 @@ type APIAlert struct {
Labels map[string]string `json:"labels"`
Annotations map[string]string `json:"annotations"`
ActiveAt time.Time `json:"activeAt"`
SourceLink string `json:"source"`
Restored bool `json:"restored"`
}
// APIGroup represents Group for WEB view

View file

@ -37,9 +37,8 @@ Each `url_prefix` in the [-auth.config](#auth-config) may contain either a singl
`-auth.config` is represented in the following simple `yml` format:
```yml
# Arbitrary number of usernames may be put here.
# Usernames must be unique.
# Username and bearer_token values must be unique.
users:
# Requests with the 'Authorization: Bearer XXXX' header are proxied to http://localhost:8428 .
@ -47,6 +46,14 @@ users:
- bearer_token: "XXXX"
url_prefix: "http://localhost:8428"
# Requests with the 'Authorization: Bearer YYY' header are proxied to http://localhost:8428 ,
# The `X-Scope-OrgID: foobar` http header is added to every proxied request.
# For example, http://vmauth:8427/api/v1/query is proxied to http://localhost:8428/api/v1/query
- bearer_token: "YYY"
url_prefix: "http://localhost:8428"
headers:
- "X-Scope-OrgID: foobar"
# The user for querying local single-node VictoriaMetrics.
# All the requests to http://vmauth:8427 with the given Basic Auth (username:password)
# will be proxied to http://localhost:8428 .
@ -89,7 +96,6 @@ users:
- "http://vminsert1:8480/insert/42/prometheus"
- "http://vminsert2:8480/insert/42/prometheus"
# A single user for querying and inserting data:
# - Requests to http://vmauth:8427/api/v1/query, http://vmauth:8427/api/v1/query_range
# and http://vmauth:8427/api/v1/label/<label_name>/values are proxied to the following urls in a round-robin manner:
@ -97,7 +103,8 @@ users:
# - http://vmselect2:8481/select/42/prometheus
# For example, http://vmauth:8427/api/v1/query is proxied to http://vmselect1:8480/select/42/prometheus/api/v1/query
# or to http://vmselect2:8480/select/42/prometheus/api/v1/query .
# - Requests to http://vmauth:8427/api/v1/write are proxied to http://vminsert:8480/insert/42/prometheus/api/v1/write
# - Requests to http://vmauth:8427/api/v1/write are proxied to http://vminsert:8480/insert/42/prometheus/api/v1/write .
# The "X-Scope-OrgID: abc" http header is added to these requests.
- username: "foobar"
url_map:
- src_paths:
@ -109,7 +116,8 @@ users:
- "http://vmselect2:8481/select/42/prometheus"
- src_paths: ["/api/v1/write"]
url_prefix: "http://vminsert:8480/insert/42/prometheus"
```
headers:
- "X-Scope-OrgID: abc"```
The config may contain `%{ENV_VAR}` placeholders, which are substituted by the corresponding `ENV_VAR` environment variable values.
This may be useful for passing secrets to the config.

View file

@ -27,24 +27,53 @@ var (
// AuthConfig represents auth config.
type AuthConfig struct {
Users []UserInfo `yaml:"users"`
Users []UserInfo `yaml:"users,omitempty"`
}
// UserInfo is user information read from authConfigPath
type UserInfo struct {
BearerToken string `yaml:"bearer_token"`
Username string `yaml:"username"`
Password string `yaml:"password"`
URLPrefix *URLPrefix `yaml:"url_prefix"`
URLMap []URLMap `yaml:"url_map"`
BearerToken string `yaml:"bearer_token,omitempty"`
Username string `yaml:"username,omitempty"`
Password string `yaml:"password,omitempty"`
URLPrefix *URLPrefix `yaml:"url_prefix,omitempty"`
URLMap []URLMap `yaml:"url_map,omitempty"`
Headers []Header `yaml:"headers,omitempty"`
requests *metrics.Counter
}
// Header is `Name: Value` http header, which must be added to the proxied request.
type Header struct {
Name string
Value string
}
// UnmarshalYAML unmarshals h from f.
func (h *Header) UnmarshalYAML(f func(interface{}) error) error {
var s string
if err := f(&s); err != nil {
return err
}
n := strings.IndexByte(s, ':')
if n < 0 {
return fmt.Errorf("missing speparator char ':' between Name and Value in the header %q; expected format - 'Name: Value'", s)
}
h.Name = strings.TrimSpace(s[:n])
h.Value = strings.TrimSpace(s[n+1:])
return nil
}
// MarshalYAML marshals h to yaml.
func (h *Header) MarshalYAML() (interface{}, error) {
s := fmt.Sprintf("%s: %s", h.Name, h.Value)
return s, nil
}
// URLMap is a mapping from source paths to target urls.
type URLMap struct {
SrcPaths []*SrcPath `yaml:"src_paths"`
URLPrefix *URLPrefix `yaml:"url_prefix"`
SrcPaths []*SrcPath `yaml:"src_paths,omitempty"`
URLPrefix *URLPrefix `yaml:"url_prefix,omitempty"`
Headers []Header `yaml:"headers,omitempty"`
}
// SrcPath represents an src path

View file

@ -69,6 +69,14 @@ users:
- [foo]
`)
// Invalid headers
f(`
users:
- username: foo
url_prefix: http://foo.bar
headers: foobar
`)
// empty url_prefix
f(`
users:
@ -156,6 +164,27 @@ users:
- src_paths: ['fo[obar']
url_prefix: http://foobar
`)
// Invalid headers in url_map (missing ':')
f(`
users:
- username: a
url_map:
- src_paths: ['/foobar']
url_prefix: http://foobar
headers:
- foobar
`)
// Invalid headers in url_map (dictionary instead of array)
f(`
users:
- username: a
url_map:
- src_paths: ['/foobar']
url_prefix: http://foobar
headers:
aaa: bbb
`)
}
func TestParseAuthConfigSuccess(t *testing.T) {
@ -231,6 +260,9 @@ users:
url_prefix: http://vmselect/select/0/prometheus
- src_paths: ["/api/v1/write"]
url_prefix: ["http://vminsert1/insert/0/prometheus","http://vminsert2/insert/0/prometheus"]
headers:
- "foo: bar"
- "xxx: y"
`, map[string]*UserInfo{
getAuthToken("foo", "", ""): {
BearerToken: "foo",
@ -245,6 +277,16 @@ users:
"http://vminsert1/insert/0/prometheus",
"http://vminsert2/insert/0/prometheus",
}),
Headers: []Header{
{
Name: "foo",
Value: "bar",
},
{
Name: "xxx",
Value: "y",
},
},
},
},
},

View file

@ -1,5 +1,5 @@
# Arbitrary number of usernames may be put here.
# Usernames must be unique.
# Username and bearer_token values must be unique.
users:
# Requests with the 'Authorization: Bearer XXXX' header are proxied to http://localhost:8428 .
@ -7,6 +7,14 @@ users:
- bearer_token: "XXXX"
url_prefix: "http://localhost:8428"
# Requests with the 'Authorization: Bearer YYY' header are proxied to http://localhost:8428 ,
# The `X-Scope-OrgID: foobar` http header is added to every proxied request.
# For example, http://vmauth:8427/api/v1/query is proxied to http://localhost:8428/api/v1/query
- bearer_token: "YYY"
url_prefix: "http://localhost:8428"
headers:
- "X-Scope-OrgID: foobar"
# The user for querying local single-node VictoriaMetrics.
# All the requests to http://vmauth:8427 with the given Basic Auth (username:password)
# will be proxied to http://localhost:8428 .
@ -49,7 +57,6 @@ users:
- "http://vminsert1:8480/insert/42/prometheus"
- "http://vminsert2:8480/insert/42/prometheus"
# A single user for querying and inserting data:
# - Requests to http://vmauth:8427/api/v1/query, http://vmauth:8427/api/v1/query_range
# and http://vmauth:8427/api/v1/label/<label_name>/values are proxied to the following urls in a round-robin manner:
@ -57,7 +64,8 @@ users:
# - http://vmselect2:8481/select/42/prometheus
# For example, http://vmauth:8427/api/v1/query is proxied to http://vmselect1:8480/select/42/prometheus/api/v1/query
# or to http://vmselect2:8480/select/42/prometheus/api/v1/query .
# - Requests to http://vmauth:8427/api/v1/write are proxied to http://vminsert:8480/insert/42/prometheus/api/v1/write
# - Requests to http://vmauth:8427/api/v1/write are proxied to http://vminsert:8480/insert/42/prometheus/api/v1/write .
# The "X-Scope-OrgID: abc" http header is added to these requests.
- username: "foobar"
url_map:
- src_paths:
@ -69,3 +77,5 @@ users:
- "http://vmselect2:8481/select/42/prometheus"
- src_paths: ["/api/v1/write"]
url_prefix: "http://vminsert:8480/insert/42/prometheus"
headers:
- "X-Scope-OrgID: abc"

View file

@ -23,7 +23,7 @@ var (
maxIdleConnsPerBackend = flag.Int("maxIdleConnsPerBackend", 100, "The maximum number of idle connections vmauth can open per each backend host")
reloadAuthKey = flag.String("reloadAuthKey", "", "Auth key for /-/reload http endpoint. It must be passed as authKey=...")
logInvalidAuthTokens = flag.Bool("logInvalidAuthTokens", false, "Whether to log requests with invalid auth tokens. "+
`Such requests are always counted at vmagent_http_request_errors_total{reason="invalid_auth_token"} metric, which is exposed at /metrics page`)
`Such requests are always counted at vmauth_http_request_errors_total{reason="invalid_auth_token"} metric, which is exposed at /metrics page`)
)
func main() {
@ -84,12 +84,15 @@ func requestHandler(w http.ResponseWriter, r *http.Request) bool {
return true
}
ui.requests.Inc()
targetURL, err := createTargetURL(ui, r.URL)
targetURL, headers, err := createTargetURL(ui, r.URL)
if err != nil {
httpserver.Errorf(w, r, "cannot determine targetURL: %s", err)
return true
}
r.Header.Set("vm-target-url", targetURL.String())
for _, h := range headers {
r.Header.Set(h.Name, h.Value)
}
proxyRequest(w, r)
return true
}
@ -109,9 +112,9 @@ func proxyRequest(w http.ResponseWriter, r *http.Request) {
}
var (
configReloadRequests = metrics.NewCounter(`vmagent_http_requests_total{path="/-/reload"}`)
invalidAuthTokenRequests = metrics.NewCounter(`vmagent_http_request_errors_total{reason="invalid_auth_token"}`)
missingRouteRequests = metrics.NewCounter(`vmagent_http_request_errors_total{reason="missing_route"}`)
configReloadRequests = metrics.NewCounter(`vmauth_http_requests_total{path="/-/reload"}`)
invalidAuthTokenRequests = metrics.NewCounter(`vmauth_http_request_errors_total{reason="invalid_auth_token"}`)
missingRouteRequests = metrics.NewCounter(`vmauth_http_request_errors_total{reason="missing_route"}`)
)
var reverseProxy = &httputil.ReverseProxy{
@ -130,6 +133,9 @@ var reverseProxy = &httputil.ReverseProxy{
// Disable HTTP/2.0, since VictoriaMetrics components don't support HTTP/2.0 (because there is no sense in this).
tr.ForceAttemptHTTP2 = false
tr.MaxIdleConnsPerHost = *maxIdleConnsPerBackend
if tr.MaxIdleConns != 0 && tr.MaxIdleConns < tr.MaxIdleConnsPerHost {
tr.MaxIdleConns = tr.MaxIdleConnsPerHost
}
return tr
}(),
FlushInterval: time.Second,

View file

@ -35,7 +35,7 @@ func mergeURLs(uiURL, requestURI *url.URL) *url.URL {
return &targetURL
}
func createTargetURL(ui *UserInfo, uOrig *url.URL) (*url.URL, error) {
func createTargetURL(ui *UserInfo, uOrig *url.URL) (*url.URL, []Header, error) {
u := *uOrig
// Prevent from attacks with using `..` in r.URL.Path
u.Path = path.Clean(u.Path)
@ -46,13 +46,13 @@ func createTargetURL(ui *UserInfo, uOrig *url.URL) (*url.URL, error) {
for _, e := range ui.URLMap {
for _, sp := range e.SrcPaths {
if sp.match(u.Path) {
return e.URLPrefix.mergeURLs(&u), nil
return e.URLPrefix.mergeURLs(&u), e.Headers, nil
}
}
}
if ui.URLPrefix != nil {
return ui.URLPrefix.mergeURLs(&u), nil
return ui.URLPrefix.mergeURLs(&u), ui.Headers, nil
}
missingRouteRequests.Inc()
return nil, fmt.Errorf("missing route for %q", u.String())
return nil, nil, fmt.Errorf("missing route for %q", u.String())
}

View file

@ -1,47 +1,56 @@
package main
import (
"fmt"
"net/url"
"testing"
)
func TestCreateTargetURLSuccess(t *testing.T) {
f := func(ui *UserInfo, requestURI, expectedTarget string) {
f := func(ui *UserInfo, requestURI, expectedTarget, expectedHeaders string) {
t.Helper()
u, err := url.Parse(requestURI)
if err != nil {
t.Fatalf("cannot parse %q: %s", requestURI, err)
}
target, err := createTargetURL(ui, u)
target, headers, err := createTargetURL(ui, u)
if err != nil {
t.Fatalf("unexpected error: %s", err)
}
if target.String() != expectedTarget {
t.Fatalf("unexpected target; got %q; want %q", target, expectedTarget)
}
headersStr := fmt.Sprintf("%q", headers)
if headersStr != expectedHeaders {
t.Fatalf("unexpected headers; got %s; want %s", headersStr, expectedHeaders)
}
}
// Simple routing with `url_prefix`
f(&UserInfo{
URLPrefix: mustParseURL("http://foo.bar"),
}, "", "http://foo.bar/.")
}, "", "http://foo.bar/.", "[]")
f(&UserInfo{
URLPrefix: mustParseURL("http://foo.bar"),
}, "/", "http://foo.bar")
Headers: []Header{{
Name: "bb",
Value: "aaa",
}},
}, "/", "http://foo.bar", `[{"bb" "aaa"}]`)
f(&UserInfo{
URLPrefix: mustParseURL("http://foo.bar/federate"),
}, "/", "http://foo.bar/federate")
}, "/", "http://foo.bar/federate", "[]")
f(&UserInfo{
URLPrefix: mustParseURL("http://foo.bar"),
}, "a/b?c=d", "http://foo.bar/a/b?c=d")
}, "a/b?c=d", "http://foo.bar/a/b?c=d", "[]")
f(&UserInfo{
URLPrefix: mustParseURL("https://sss:3894/x/y"),
}, "/z", "https://sss:3894/x/y/z")
}, "/z", "https://sss:3894/x/y/z", "[]")
f(&UserInfo{
URLPrefix: mustParseURL("https://sss:3894/x/y"),
}, "/../../aaa", "https://sss:3894/x/y/aaa")
}, "/../../aaa", "https://sss:3894/x/y/aaa", "[]")
f(&UserInfo{
URLPrefix: mustParseURL("https://sss:3894/x/y"),
}, "/./asd/../../aaa?a=d&s=s/../d", "https://sss:3894/x/y/aaa?a=d&s=s%2F..%2Fd")
}, "/./asd/../../aaa?a=d&s=s/../d", "https://sss:3894/x/y/aaa?a=d&s=s%2F..%2Fd", "[]")
// Complex routing with `url_map`
ui := &UserInfo{
@ -49,6 +58,16 @@ func TestCreateTargetURLSuccess(t *testing.T) {
{
SrcPaths: getSrcPaths([]string{"/api/v1/query"}),
URLPrefix: mustParseURL("http://vmselect/0/prometheus"),
Headers: []Header{
{
Name: "xx",
Value: "aa",
},
{
Name: "yy",
Value: "asdf",
},
},
},
{
SrcPaths: getSrcPaths([]string{"/api/v1/write"}),
@ -56,10 +75,14 @@ func TestCreateTargetURLSuccess(t *testing.T) {
},
},
URLPrefix: mustParseURL("http://default-server"),
Headers: []Header{{
Name: "bb",
Value: "aaa",
}},
}
f(ui, "/api/v1/query?query=up", "http://vmselect/0/prometheus/api/v1/query?query=up")
f(ui, "/api/v1/write", "http://vminsert/0/prometheus/api/v1/write")
f(ui, "/api/v1/query_range", "http://default-server/api/v1/query_range")
f(ui, "/api/v1/query?query=up", "http://vmselect/0/prometheus/api/v1/query?query=up", `[{"xx" "aa"} {"yy" "asdf"}]`)
f(ui, "/api/v1/write", "http://vminsert/0/prometheus/api/v1/write", "[]")
f(ui, "/api/v1/query_range", "http://default-server/api/v1/query_range", `[{"bb" "aaa"}]`)
// Complex routing regexp paths in `url_map`
ui = &UserInfo{
@ -75,17 +98,17 @@ func TestCreateTargetURLSuccess(t *testing.T) {
},
URLPrefix: mustParseURL("http://default-server"),
}
f(ui, "/api/v1/query?query=up", "http://vmselect/0/prometheus/api/v1/query?query=up")
f(ui, "/api/v1/query_range?query=up", "http://vmselect/0/prometheus/api/v1/query_range?query=up")
f(ui, "/api/v1/label/foo/values", "http://vmselect/0/prometheus/api/v1/label/foo/values")
f(ui, "/api/v1/write", "http://vminsert/0/prometheus/api/v1/write")
f(ui, "/api/v1/foo/bar", "http://default-server/api/v1/foo/bar")
f(ui, "/api/v1/query?query=up", "http://vmselect/0/prometheus/api/v1/query?query=up", "[]")
f(ui, "/api/v1/query_range?query=up", "http://vmselect/0/prometheus/api/v1/query_range?query=up", "[]")
f(ui, "/api/v1/label/foo/values", "http://vmselect/0/prometheus/api/v1/label/foo/values", "[]")
f(ui, "/api/v1/write", "http://vminsert/0/prometheus/api/v1/write", "[]")
f(ui, "/api/v1/foo/bar", "http://default-server/api/v1/foo/bar", "[]")
f(&UserInfo{
URLPrefix: mustParseURL("http://foo.bar?extra_label=team=dev"),
}, "/api/v1/query", "http://foo.bar/api/v1/query?extra_label=team=dev")
}, "/api/v1/query", "http://foo.bar/api/v1/query?extra_label=team=dev", "[]")
f(&UserInfo{
URLPrefix: mustParseURL("http://foo.bar?extra_label=team=mobile"),
}, "/api/v1/query?extra_label=team=dev", "http://foo.bar/api/v1/query?extra_label=team%3Dmobile")
}, "/api/v1/query?extra_label=team=dev", "http://foo.bar/api/v1/query?extra_label=team%3Dmobile", "[]")
}
@ -96,13 +119,16 @@ func TestCreateTargetURLFailure(t *testing.T) {
if err != nil {
t.Fatalf("cannot parse %q: %s", requestURI, err)
}
target, err := createTargetURL(ui, u)
target, headers, err := createTargetURL(ui, u)
if err == nil {
t.Fatalf("expecting non-nil error")
}
if target != nil {
t.Fatalf("unexpected target=%q; want empty string", target)
}
if headers != nil {
t.Fatalf("unexpected headers=%q; want empty string", headers)
}
}
f(&UserInfo{}, "/foo/bar")
f(&UserInfo{

View file

@ -58,6 +58,11 @@ func toFloat64(v interface{}) (float64, error) {
return float64(i), nil
case string:
return strconv.ParseFloat(i, 64)
case bool:
if i {
return 1, nil
}
return 0, nil
default:
return 0, fmt.Errorf("unexpected value type %v", i)
}

View file

@ -1,6 +1,7 @@
package influx
import (
"encoding/json"
"reflect"
"testing"
)
@ -58,3 +59,28 @@ func TestSeries_Unmarshal(t *testing.T) {
}
}
}
func TestToFloat64(t *testing.T) {
f := func(in interface{}, want float64) {
t.Helper()
got, err := toFloat64(in)
if err != nil {
t.Fatalf("unexpected err: %s", err)
}
if got != want {
t.Errorf("got %v; want %v", got, want)
}
}
f("123.4", 123.4)
f(float64(123.4), 123.4)
f(float32(12), 12)
f(123, 123)
f(true, 1)
f(false, 0)
f(json.Number("123456.789"), 123456.789)
_, err := toFloat64("text")
if err == nil {
t.Fatalf("expected to get err; got nil instead")
}
}

View file

@ -196,6 +196,11 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
state := r.FormValue("state")
promscrape.WriteAPIV1Targets(w, state)
return true
case "/prometheus/config", "/config":
promscrapeConfigRequests.Inc()
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
promscrape.WriteConfigData(w)
return true
case "/prometheus/-/reload", "/-/reload":
promscrapeConfigReloadRequests.Inc()
procutil.SelfSIGHUP()
@ -250,6 +255,8 @@ var (
promscrapeTargetsRequests = metrics.NewCounter(`vm_http_requests_total{path="/targets"}`)
promscrapeAPIV1TargetsRequests = metrics.NewCounter(`vm_http_requests_total{path="/api/v1/targets"}`)
promscrapeConfigRequests = metrics.NewCounter(`vm_http_requests_total{path="/config"}`)
promscrapeConfigReloadRequests = metrics.NewCounter(`vm_http_requests_total{path="/-/reload"}`)
_ = metrics.NewGauge(`vm_metrics_with_dropped_labels_total`, func() float64 {

View file

@ -11,6 +11,7 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
"github.com/VictoriaMetrics/metrics"
"github.com/VictoriaMetrics/metricsql"
xxhash "github.com/cespare/xxhash/v2"
)
var aggrFuncs = map[string]aggrFunc{
@ -1010,6 +1011,28 @@ func aggrFuncLimitK(afa *aggrFuncArg) ([]*timeseries, error) {
}
}
afe := func(tss []*timeseries, modifier *metricsql.ModifierExpr) []*timeseries {
// Sort series by metricName in order to get consistent set of output series
// across multiple calls to limitk() function.
// Sort series by hash in order to guarantee uniform selection across series.
type hashSeries struct {
h uint64
ts *timeseries
}
hss := make([]hashSeries, len(tss))
d := xxhash.New()
for i, ts := range tss {
h := getHash(d, &ts.MetricName)
hss[i] = hashSeries{
h: h,
ts: ts,
}
}
sort.Slice(hss, func(i, j int) bool {
return hss[i].h < hss[j].h
})
for i, hs := range hss {
tss[i] = hs.ts
}
if len(tss) > maxK {
tss = tss[:maxK]
}
@ -1027,6 +1050,17 @@ func aggrFuncLimitK(afa *aggrFuncArg) ([]*timeseries, error) {
return aggrFuncExt(afe, args[1], &afa.ae.Modifier, afa.ae.Limit, true)
}
func getHash(d *xxhash.Digest, mn *storage.MetricName) uint64 {
d.Reset()
_, _ = d.Write(mn.MetricGroup)
for _, tag := range mn.Tags {
_, _ = d.Write(tag.Key)
_, _ = d.Write(tag.Value)
}
return d.Sum64()
}
func aggrFuncQuantiles(afa *aggrFuncArg) ([]*timeseries, error) {
args := afa.args
if len(args) < 3 {

View file

@ -19,6 +19,9 @@ var binaryOpFuncs = map[string]binaryOpFunc{
"%": newBinaryOpArithFunc(binaryop.Mod),
"^": newBinaryOpArithFunc(binaryop.Pow),
// See https://github.com/prometheus/prometheus/pull/9248
"atan2": newBinaryOpArithFunc(binaryop.Atan2),
// cmp ops
"==": newBinaryOpCmpFunc(binaryop.Eq),
"!=": newBinaryOpCmpFunc(binaryop.Neq),

View file

@ -1039,6 +1039,17 @@ func TestExecSuccess(t *testing.T) {
resultExpected := []netstorage.Result{r}
f(q, resultExpected)
})
t.Run("sinh()", func(t *testing.T) {
t.Parallel()
q := `sinh(pi()*(2000-time())/1000)`
r := netstorage.Result{
MetricName: metricNameExpected,
Values: []float64{11.548739357257748, 6.132140673514712, 3.217113080357038, 1.6144880404748523, 0.6704839982471175, 0},
Timestamps: timestampsExpected,
}
resultExpected := []netstorage.Result{r}
f(q, resultExpected)
})
t.Run("asin()", func(t *testing.T) {
t.Parallel()
q := `asin((2000-time())/1000)`
@ -1050,6 +1061,50 @@ func TestExecSuccess(t *testing.T) {
resultExpected := []netstorage.Result{r}
f(q, resultExpected)
})
t.Run("asinh(sinh)", func(t *testing.T) {
t.Parallel()
q := `asinh(sinh((2000-time())/1000))`
r := netstorage.Result{
MetricName: metricNameExpected,
Values: []float64{1, 0.8000000000000002, 0.6, 0.4000000000000001, 0.2, 0},
Timestamps: timestampsExpected,
}
resultExpected := []netstorage.Result{r}
f(q, resultExpected)
})
t.Run("atan2()", func(t *testing.T) {
t.Parallel()
q := `time() atan2 time()/10`
r := netstorage.Result{
MetricName: metricNameExpected,
Values: []float64{0.07853981633974483, 0.07853981633974483, 0.07853981633974483, 0.07853981633974483, 0.07853981633974483, 0.07853981633974483},
Timestamps: timestampsExpected,
}
resultExpected := []netstorage.Result{r}
f(q, resultExpected)
})
t.Run("atan()", func(t *testing.T) {
t.Parallel()
q := `atan((2000-time())/1000)`
r := netstorage.Result{
MetricName: metricNameExpected,
Values: []float64{0.7853981633974483, 0.6747409422235526, 0.5404195002705842, 0.3805063771123649, 0.19739555984988078, 0},
Timestamps: timestampsExpected,
}
resultExpected := []netstorage.Result{r}
f(q, resultExpected)
})
t.Run("atanh(tanh)", func(t *testing.T) {
t.Parallel()
q := `atanh(tanh((2000-time())/1000))`
r := netstorage.Result{
MetricName: metricNameExpected,
Values: []float64{1, 0.8000000000000002, 0.6, 0.4000000000000001, 0.2, 0},
Timestamps: timestampsExpected,
}
resultExpected := []netstorage.Result{r}
f(q, resultExpected)
})
t.Run("cos()", func(t *testing.T) {
t.Parallel()
q := `cos(pi()*(2000-time())/1000)`
@ -1072,6 +1127,28 @@ func TestExecSuccess(t *testing.T) {
resultExpected := []netstorage.Result{r}
f(q, resultExpected)
})
t.Run("acosh(cosh)", func(t *testing.T) {
t.Parallel()
q := `acosh(cosh((2000-time())/1000))`
r := netstorage.Result{
MetricName: metricNameExpected,
Values: []float64{1, 0.8000000000000002, 0.5999999999999999, 0.40000000000000036, 0.20000000000000023, 0},
Timestamps: timestampsExpected,
}
resultExpected := []netstorage.Result{r}
f(q, resultExpected)
})
t.Run("rad(deg)", func(t *testing.T) {
t.Parallel()
q := `rad(deg(time()/500))`
r := netstorage.Result{
MetricName: metricNameExpected,
Values: []float64{2, 2.3999999999999995, 2.8, 3.2, 3.6, 4},
Timestamps: timestampsExpected,
}
resultExpected := []netstorage.Result{r}
f(q, resultExpected)
})
t.Run("floor(time()/500)", func(t *testing.T) {
t.Parallel()
q := `floor(time()/500)`
@ -4993,7 +5070,7 @@ func TestExecSuccess(t *testing.T) {
})
t.Run(`limitk(1)`, func(t *testing.T) {
t.Parallel()
q := `limitk(1, label_set(10, "foo", "bar") or label_set(time()/150, "baz", "sss"))`
q := `limitk(1, label_set(10, "foo", "bar") or label_set(time()/150, "xbaz", "sss"))`
r1 := netstorage.Result{
MetricName: metricNameExpected,
Values: []float64{10, 10, 10, 10, 10, 10},
@ -7241,9 +7318,13 @@ func TestExecError(t *testing.T) {
f(`smooth_exponential(1)`)
f(`remove_resets()`)
f(`sin()`)
f(`sinh()`)
f(`cos()`)
f(`cosh()`)
f(`asin()`)
f(`asinh()`)
f(`acos()`)
f(`acosh()`)
f(`rand(123, 456)`)
f(`rand_normal(123, 456)`)
f(`rand_exponential(122, 456)`)

View file

@ -1384,7 +1384,7 @@ func rollupIncreasePure(rfa *rollupFuncArg) float64 {
prevValue = 0
}
if len(values) == 0 {
// Assume the counter didsn't change since prevValue.
// Assume the counter didn't change since prevValue.
return 0
}
return values[len(values)-1] - prevValue

View file

@ -43,13 +43,22 @@ var transformFuncs = map[string]transformFunc{
// See funcs accepting instant-vector on https://prometheus.io/docs/prometheus/latest/querying/functions/ .
"abs": newTransformFuncOneArg(transformAbs),
"absent": transformAbsent,
"acos": newTransformFuncOneArg(transformAcos),
"acosh": newTransformFuncOneArg(transformAcosh),
"asin": newTransformFuncOneArg(transformAsin),
"asinh": newTransformFuncOneArg(transformAsinh),
"atan": newTransformFuncOneArg(transformAtan),
"atanh": newTransformFuncOneArg(transformAtanh),
"ceil": newTransformFuncOneArg(transformCeil),
"clamp": transformClamp,
"clamp_max": transformClampMax,
"clamp_min": transformClampMin,
"cos": newTransformFuncOneArg(transformCos),
"cosh": newTransformFuncOneArg(transformCosh),
"day_of_month": newTransformFuncDateTime(transformDayOfMonth),
"day_of_week": newTransformFuncDateTime(transformDayOfWeek),
"days_in_month": newTransformFuncDateTime(transformDaysInMonth),
"deg": newTransformFuncOneArg(transformDeg),
"exp": newTransformFuncOneArg(transformExp),
"floor": newTransformFuncOneArg(transformFloor),
"histogram_quantile": transformHistogramQuantile,
@ -61,12 +70,18 @@ var transformFuncs = map[string]transformFunc{
"log10": newTransformFuncOneArg(transformLog10),
"minute": newTransformFuncDateTime(transformMinute),
"month": newTransformFuncDateTime(transformMonth),
"pi": transformPi,
"rad": newTransformFuncOneArg(transformRad),
"round": transformRound,
"sgn": transformSign,
"scalar": transformScalar,
"sgn": transformSgn,
"sin": newTransformFuncOneArg(transformSin),
"sinh": newTransformFuncOneArg(transformSinh),
"sort": newTransformFuncSort(false),
"sort_desc": newTransformFuncSort(true),
"sqrt": newTransformFuncOneArg(transformSqrt),
"tan": newTransformFuncOneArg(transformTan),
"tanh": newTransformFuncOneArg(transformTanh),
"time": transformTime,
// "timestamp" has been moved to rollup funcs. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/415
"vector": transformVector,
@ -109,11 +124,6 @@ var transformFuncs = map[string]transformFunc{
"rand": newTransformRand(newRandFloat64),
"rand_normal": newTransformRand(newRandNormFloat64),
"rand_exponential": newTransformRand(newRandExpFloat64),
"pi": transformPi,
"sin": newTransformFuncOneArg(transformSin),
"cos": newTransformFuncOneArg(transformCos),
"asin": newTransformFuncOneArg(transformAsin),
"acos": newTransformFuncOneArg(transformAcos),
"prometheus_buckets": transformPrometheusBuckets,
"buckets_limit": transformBucketsLimit,
"histogram_share": transformHistogramShare,
@ -1777,7 +1787,7 @@ func transformRound(tfa *transformFuncArg) ([]*timeseries, error) {
return doTransformValues(args[0], tf, tfa.fe)
}
func transformSign(tfa *transformFuncArg) ([]*timeseries, error) {
func transformSgn(tfa *transformFuncArg) ([]*timeseries, error) {
args := tfa.args
if err := expectTransformArgsNum(args, 1); err != nil {
return nil, err
@ -1898,18 +1908,58 @@ func transformSin(v float64) float64 {
return math.Sin(v)
}
func transformSinh(v float64) float64 {
return math.Sinh(v)
}
func transformCos(v float64) float64 {
return math.Cos(v)
}
func transformCosh(v float64) float64 {
return math.Cosh(v)
}
func transformTan(v float64) float64 {
return math.Tan(v)
}
func transformTanh(v float64) float64 {
return math.Tanh(v)
}
func transformAsin(v float64) float64 {
return math.Asin(v)
}
func transformAsinh(v float64) float64 {
return math.Asinh(v)
}
func transformAtan(v float64) float64 {
return math.Atan(v)
}
func transformAtanh(v float64) float64 {
return math.Atanh(v)
}
func transformAcos(v float64) float64 {
return math.Acos(v)
}
func transformAcosh(v float64) float64 {
return math.Acosh(v)
}
func transformDeg(v float64) float64 {
return v * 180 / math.Pi
}
func transformRad(v float64) float64 {
return v * math.Pi / 180
}
func newTransformRand(newRandFunc func(r *rand.Rand) func() float64) transformFunc {
return func(tfa *transformFuncArg) ([]*timeseries, error) {
args := tfa.args

View file

@ -1,19 +1,19 @@
{
"files": {
"main.css": "./static/css/main.cbb91dd8.chunk.css",
"main.js": "./static/js/main.3fb3f4d1.chunk.js",
"runtime-main.js": "./static/js/runtime-main.22ec3f63.js",
"main.css": "./static/css/main.acc63211.chunk.css",
"main.js": "./static/js/main.fe86f8ba.chunk.js",
"runtime-main.js": "./static/js/runtime-main.b765a534.js",
"static/css/2.a684aa27.chunk.css": "./static/css/2.a684aa27.chunk.css",
"static/js/2.72d7cb01.chunk.js": "./static/js/2.72d7cb01.chunk.js",
"static/js/3.3cf0cbc4.chunk.js": "./static/js/3.3cf0cbc4.chunk.js",
"static/js/2.632b68e4.chunk.js": "./static/js/2.632b68e4.chunk.js",
"static/js/3.daeccd9c.chunk.js": "./static/js/3.daeccd9c.chunk.js",
"index.html": "./index.html",
"static/js/2.72d7cb01.chunk.js.LICENSE.txt": "./static/js/2.72d7cb01.chunk.js.LICENSE.txt"
"static/js/2.632b68e4.chunk.js.LICENSE.txt": "./static/js/2.632b68e4.chunk.js.LICENSE.txt"
},
"entrypoints": [
"static/js/runtime-main.22ec3f63.js",
"static/js/runtime-main.b765a534.js",
"static/css/2.a684aa27.chunk.css",
"static/js/2.72d7cb01.chunk.js",
"static/css/main.cbb91dd8.chunk.css",
"static/js/main.3fb3f4d1.chunk.js"
"static/js/2.632b68e4.chunk.js",
"static/css/main.acc63211.chunk.css",
"static/js/main.fe86f8ba.chunk.js"
]
}

View file

@ -1 +1 @@
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.ico"/><meta name="viewport" content="width=device-width,initial-scale=1"/><meta name="theme-color" content="#000000"/><meta name="description" content="VM-UI is a metric explorer for Victoria Metrics"/><link rel="apple-touch-icon" href="./apple-touch-icon.png"/><link rel="icon" type="image/png" sizes="32x32" href="./favicon-32x32.png"><link rel="manifest" href="./manifest.json"/><title>VM UI</title><link rel="stylesheet" href="https://fonts.googleapis.com/css?family=Roboto:300,400,500,700&display=swap"/><link href="./static/css/2.a684aa27.chunk.css" rel="stylesheet"><link href="./static/css/main.cbb91dd8.chunk.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div><script>!function(e){function r(r){for(var n,i,a=r[0],c=r[1],l=r[2],s=0,p=[];s<a.length;s++)i=a[s],Object.prototype.hasOwnProperty.call(o,i)&&o[i]&&p.push(o[i][0]),o[i]=0;for(n in c)Object.prototype.hasOwnProperty.call(c,n)&&(e[n]=c[n]);for(f&&f(r);p.length;)p.shift()();return u.push.apply(u,l||[]),t()}function t(){for(var e,r=0;r<u.length;r++){for(var t=u[r],n=!0,a=1;a<t.length;a++){var c=t[a];0!==o[c]&&(n=!1)}n&&(u.splice(r--,1),e=i(i.s=t[0]))}return e}var n={},o={1:0},u=[];function i(r){if(n[r])return n[r].exports;var t=n[r]={i:r,l:!1,exports:{}};return e[r].call(t.exports,t,t.exports,i),t.l=!0,t.exports}i.e=function(e){var r=[],t=o[e];if(0!==t)if(t)r.push(t[2]);else{var n=new Promise((function(r,n){t=o[e]=[r,n]}));r.push(t[2]=n);var u,a=document.createElement("script");a.charset="utf-8",a.timeout=120,i.nc&&a.setAttribute("nonce",i.nc),a.src=function(e){return i.p+"static/js/"+({}[e]||e)+"."+{3:"3cf0cbc4"}[e]+".chunk.js"}(e);var c=new Error;u=function(r){a.onerror=a.onload=null,clearTimeout(l);var t=o[e];if(0!==t){if(t){var n=r&&("load"===r.type?"missing":r.type),u=r&&r.target&&r.target.src;c.message="Loading chunk "+e+" failed.\n("+n+": "+u+")",c.name="ChunkLoadError",c.type=n,c.request=u,t[1](c)}o[e]=void 0}};var l=setTimeout((function(){u({type:"timeout",target:a})}),12e4);a.onerror=a.onload=u,document.head.appendChild(a)}return Promise.all(r)},i.m=e,i.c=n,i.d=function(e,r,t){i.o(e,r)||Object.defineProperty(e,r,{enumerable:!0,get:t})},i.r=function(e){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},i.t=function(e,r){if(1&r&&(e=i(e)),8&r)return e;if(4&r&&"object"==typeof e&&e&&e.__esModule)return e;var t=Object.create(null);if(i.r(t),Object.defineProperty(t,"default",{enumerable:!0,value:e}),2&r&&"string"!=typeof e)for(var n in e)i.d(t,n,function(r){return e[r]}.bind(null,n));return t},i.n=function(e){var r=e&&e.__esModule?function(){return e.default}:function(){return e};return i.d(r,"a",r),r},i.o=function(e,r){return Object.prototype.hasOwnProperty.call(e,r)},i.p="./",i.oe=function(e){throw console.error(e),e};var a=this.webpackJsonpvmui=this.webpackJsonpvmui||[],c=a.push.bind(a);a.push=r,a=a.slice();for(var l=0;l<a.length;l++)r(a[l]);var f=c;t()}([])</script><script src="./static/js/2.72d7cb01.chunk.js"></script><script src="./static/js/main.3fb3f4d1.chunk.js"></script></body></html>
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.ico"/><meta name="viewport" content="width=device-width,initial-scale=1"/><meta name="theme-color" content="#000000"/><meta name="description" content="VM-UI is a metric explorer for Victoria Metrics"/><link rel="apple-touch-icon" href="./apple-touch-icon.png"/><link rel="icon" type="image/png" sizes="32x32" href="./favicon-32x32.png"><link rel="manifest" href="./manifest.json"/><title>VM UI</title><link rel="stylesheet" href="https://fonts.googleapis.com/css?family=Roboto:300,400,500,700&display=swap"/><link href="./static/css/2.a684aa27.chunk.css" rel="stylesheet"><link href="./static/css/main.acc63211.chunk.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div><script>!function(e){function r(r){for(var n,i,a=r[0],c=r[1],l=r[2],s=0,p=[];s<a.length;s++)i=a[s],Object.prototype.hasOwnProperty.call(o,i)&&o[i]&&p.push(o[i][0]),o[i]=0;for(n in c)Object.prototype.hasOwnProperty.call(c,n)&&(e[n]=c[n]);for(f&&f(r);p.length;)p.shift()();return u.push.apply(u,l||[]),t()}function t(){for(var e,r=0;r<u.length;r++){for(var t=u[r],n=!0,a=1;a<t.length;a++){var c=t[a];0!==o[c]&&(n=!1)}n&&(u.splice(r--,1),e=i(i.s=t[0]))}return e}var n={},o={1:0},u=[];function i(r){if(n[r])return n[r].exports;var t=n[r]={i:r,l:!1,exports:{}};return e[r].call(t.exports,t,t.exports,i),t.l=!0,t.exports}i.e=function(e){var r=[],t=o[e];if(0!==t)if(t)r.push(t[2]);else{var n=new Promise((function(r,n){t=o[e]=[r,n]}));r.push(t[2]=n);var u,a=document.createElement("script");a.charset="utf-8",a.timeout=120,i.nc&&a.setAttribute("nonce",i.nc),a.src=function(e){return i.p+"static/js/"+({}[e]||e)+"."+{3:"daeccd9c"}[e]+".chunk.js"}(e);var c=new Error;u=function(r){a.onerror=a.onload=null,clearTimeout(l);var t=o[e];if(0!==t){if(t){var n=r&&("load"===r.type?"missing":r.type),u=r&&r.target&&r.target.src;c.message="Loading chunk "+e+" failed.\n("+n+": "+u+")",c.name="ChunkLoadError",c.type=n,c.request=u,t[1](c)}o[e]=void 0}};var l=setTimeout((function(){u({type:"timeout",target:a})}),12e4);a.onerror=a.onload=u,document.head.appendChild(a)}return Promise.all(r)},i.m=e,i.c=n,i.d=function(e,r,t){i.o(e,r)||Object.defineProperty(e,r,{enumerable:!0,get:t})},i.r=function(e){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},i.t=function(e,r){if(1&r&&(e=i(e)),8&r)return e;if(4&r&&"object"==typeof e&&e&&e.__esModule)return e;var t=Object.create(null);if(i.r(t),Object.defineProperty(t,"default",{enumerable:!0,value:e}),2&r&&"string"!=typeof e)for(var n in e)i.d(t,n,function(r){return e[r]}.bind(null,n));return t},i.n=function(e){var r=e&&e.__esModule?function(){return e.default}:function(){return e};return i.d(r,"a",r),r},i.o=function(e,r){return Object.prototype.hasOwnProperty.call(e,r)},i.p="./",i.oe=function(e){throw console.error(e),e};var a=this.webpackJsonpvmui=this.webpackJsonpvmui||[],c=a.push.bind(a);a.push=r,a=a.slice();for(var l=0;l<a.length;l++)r(a[l]);var f=c;t()}([])</script><script src="./static/js/2.632b68e4.chunk.js"></script><script src="./static/js/main.fe86f8ba.chunk.js"></script></body></html>

View file

@ -1 +1 @@
body{font-family:-apple-system,BlinkMacSystemFont,"Segoe UI","Roboto","Oxygen","Ubuntu","Cantarell","Fira Sans","Droid Sans","Helvetica Neue",sans-serif;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}code{font-family:source-code-pro,Menlo,Monaco,Consolas,"Courier New",monospace}.MuiAccordionSummary-content{margin:10px 0!important}.cm-activeLine{background-color:inherit!important}.cm-wrap{border-radius:4px;border:1px solid #b9b9b9;font-size:10px}.one-line-scroll .cm-wrap{height:24px}.cm-content,.cm-gutter{min-height:51px}.one-line-scroll .cm-content,.one-line-scroll .cm-gutter{min-height:auto}.uplot .u-legend{display:grid;align-items:center;justify-content:start;text-align:left;margin-top:25px}.uplot .u-legend .u-series{font-size:12px}
body{font-family:-apple-system,BlinkMacSystemFont,"Segoe UI","Roboto","Oxygen","Ubuntu","Cantarell","Fira Sans","Droid Sans","Helvetica Neue",sans-serif;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}code{font-family:source-code-pro,Menlo,Monaco,Consolas,"Courier New",monospace}.MuiAccordionSummary-content{margin:10px 0!important}.cm-activeLine{background-color:inherit!important}.cm-wrap{border-radius:4px;border:1px solid #b9b9b9;font-size:10px}.one-line-scroll .cm-wrap{height:24px}.cm-content,.cm-gutter{min-height:51px}.one-line-scroll .cm-content,.one-line-scroll .cm-gutter{min-height:auto}.uplot .u-legend{display:grid;align-items:center;justify-content:start;text-align:left;margin-top:25px}.uplot .u-legend .u-series{font-size:12px}.u-tooltip{position:absolute;display:none;grid-gap:12px;max-width:300px;padding:8px;border-radius:4px;background:rgba(57,57,57,.9);color:#fff;font-size:10px;line-height:1.4em;font-weight:500;word-wrap:break-word;font-family:monospace;pointer-events:none;z-index:100}.u-tooltip-data{display:flex;flex-wrap:wrap;align-items:center;font-size:11px}.u-tooltip__info{display:grid;grid-gap:4px}.u-tooltip__marker{width:12px;height:12px;margin-right:4px}

File diff suppressed because one or more lines are too long

View file

@ -4,13 +4,6 @@ object-assign
@license MIT
*/
/*!
* chartjs-adapter-date-fns v2.0.0
* https://www.chartjs.org
* (c) 2021 chartjs-adapter-date-fns Contributors
* Released under the MIT license
*/
/*! *****************************************************************************
Copyright (c) Microsoft Corporation.
@ -34,12 +27,6 @@ PERFORMANCE OF THIS SOFTWARE.
* http://adamwdraper.github.com/Numeral-js/
*/
/*! Hammer.JS - v2.0.7 - 2016-04-22
* http://hammerjs.github.io/
*
* Copyright (c) 2016 Jorik Tangelder;
* Licensed under the MIT license */
/*! exports provided: default */
/*! exports provided: optionsUpdateState, dataMatch */

File diff suppressed because one or more lines are too long

View file

@ -1 +1 @@
(this.webpackJsonpvmui=this.webpackJsonpvmui||[]).push([[3],{271:function(t,n,e){"use strict";e.r(n),e.d(n,"getCLS",(function(){return l})),e.d(n,"getFCP",(function(){return g})),e.d(n,"getFID",(function(){return h})),e.d(n,"getLCP",(function(){return y})),e.d(n,"getTTFB",(function(){return F}));var i,a,r=function(){return"".concat(Date.now(),"-").concat(Math.floor(8999999999999*Math.random())+1e12)},o=function(t){var n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:-1;return{name:t,value:n,delta:0,entries:[],id:r(),isFinal:!1}},u=function(t,n){try{if(PerformanceObserver.supportedEntryTypes.includes(t)){var e=new PerformanceObserver((function(t){return t.getEntries().map(n)}));return e.observe({type:t,buffered:!0}),e}}catch(t){}},s=!1,c=!1,d=function(t){s=!t.persisted},f=function(){addEventListener("pagehide",d),addEventListener("beforeunload",(function(){}))},p=function(t){var n=arguments.length>1&&void 0!==arguments[1]&&arguments[1];c||(f(),c=!0),addEventListener("visibilitychange",(function(n){var e=n.timeStamp;"hidden"===document.visibilityState&&t({timeStamp:e,isUnloading:s})}),{capture:!0,once:n})},v=function(t,n,e,i){var a;return function(){e&&n.isFinal&&e.disconnect(),n.value>=0&&(i||n.isFinal||"hidden"===document.visibilityState)&&(n.delta=n.value-(a||0),(n.delta||n.isFinal||void 0===a)&&(t(n),a=n.value))}},l=function(t){var n,e=arguments.length>1&&void 0!==arguments[1]&&arguments[1],i=o("CLS",0),a=function(t){t.hadRecentInput||(i.value+=t.value,i.entries.push(t),n())},r=u("layout-shift",a);r&&(n=v(t,i,r,e),p((function(t){var e=t.isUnloading;r.takeRecords().map(a),e&&(i.isFinal=!0),n()})))},m=function(){return void 0===i&&(i="hidden"===document.visibilityState?0:1/0,p((function(t){var n=t.timeStamp;return i=n}),!0)),{get timeStamp(){return i}}},g=function(t){var n,e=o("FCP"),i=m(),a=u("paint",(function(t){"first-contentful-paint"===t.name&&t.startTime<i.timeStamp&&(e.value=t.startTime,e.isFinal=!0,e.entries.push(t),n())}));a&&(n=v(t,e,a))},h=function(t){var n=o("FID"),e=m(),i=function(t){t.startTime<e.timeStamp&&(n.value=t.processingStart-t.startTime,n.entries.push(t),n.isFinal=!0,r())},a=u("first-input",i),r=v(t,n,a);a?p((function(){a.takeRecords().map(i),a.disconnect()}),!0):window.perfMetrics&&window.perfMetrics.onFirstInputDelay&&window.perfMetrics.onFirstInputDelay((function(t,i){i.timeStamp<e.timeStamp&&(n.value=t,n.isFinal=!0,n.entries=[{entryType:"first-input",name:i.type,target:i.target,cancelable:i.cancelable,startTime:i.timeStamp,processingStart:i.timeStamp+t}],r())}))},S=function(){return a||(a=new Promise((function(t){return["scroll","keydown","pointerdown"].map((function(n){addEventListener(n,t,{once:!0,passive:!0,capture:!0})}))}))),a},y=function(t){var n,e=arguments.length>1&&void 0!==arguments[1]&&arguments[1],i=o("LCP"),a=m(),r=function(t){var e=t.startTime;e<a.timeStamp?(i.value=e,i.entries.push(t)):i.isFinal=!0,n()},s=u("largest-contentful-paint",r);if(s){n=v(t,i,s,e);var c=function(){i.isFinal||(s.takeRecords().map(r),i.isFinal=!0,n())};S().then(c),p(c,!0)}},F=function(t){var n,e=o("TTFB");n=function(){try{var n=performance.getEntriesByType("navigation")[0]||function(){var t=performance.timing,n={entryType:"navigation",startTime:0};for(var e in t)"navigationStart"!==e&&"toJSON"!==e&&(n[e]=Math.max(t[e]-t.navigationStart,0));return n}();e.value=e.delta=n.responseStart,e.entries=[n],e.isFinal=!0,t(e)}catch(t){}},"complete"===document.readyState?setTimeout(n,0):addEventListener("pageshow",n)}}}]);
(this.webpackJsonpvmui=this.webpackJsonpvmui||[]).push([[3],{266:function(t,n,e){"use strict";e.r(n),e.d(n,"getCLS",(function(){return l})),e.d(n,"getFCP",(function(){return g})),e.d(n,"getFID",(function(){return h})),e.d(n,"getLCP",(function(){return y})),e.d(n,"getTTFB",(function(){return F}));var i,a,r=function(){return"".concat(Date.now(),"-").concat(Math.floor(8999999999999*Math.random())+1e12)},o=function(t){var n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:-1;return{name:t,value:n,delta:0,entries:[],id:r(),isFinal:!1}},u=function(t,n){try{if(PerformanceObserver.supportedEntryTypes.includes(t)){var e=new PerformanceObserver((function(t){return t.getEntries().map(n)}));return e.observe({type:t,buffered:!0}),e}}catch(t){}},s=!1,c=!1,d=function(t){s=!t.persisted},f=function(){addEventListener("pagehide",d),addEventListener("beforeunload",(function(){}))},p=function(t){var n=arguments.length>1&&void 0!==arguments[1]&&arguments[1];c||(f(),c=!0),addEventListener("visibilitychange",(function(n){var e=n.timeStamp;"hidden"===document.visibilityState&&t({timeStamp:e,isUnloading:s})}),{capture:!0,once:n})},v=function(t,n,e,i){var a;return function(){e&&n.isFinal&&e.disconnect(),n.value>=0&&(i||n.isFinal||"hidden"===document.visibilityState)&&(n.delta=n.value-(a||0),(n.delta||n.isFinal||void 0===a)&&(t(n),a=n.value))}},l=function(t){var n,e=arguments.length>1&&void 0!==arguments[1]&&arguments[1],i=o("CLS",0),a=function(t){t.hadRecentInput||(i.value+=t.value,i.entries.push(t),n())},r=u("layout-shift",a);r&&(n=v(t,i,r,e),p((function(t){var e=t.isUnloading;r.takeRecords().map(a),e&&(i.isFinal=!0),n()})))},m=function(){return void 0===i&&(i="hidden"===document.visibilityState?0:1/0,p((function(t){var n=t.timeStamp;return i=n}),!0)),{get timeStamp(){return i}}},g=function(t){var n,e=o("FCP"),i=m(),a=u("paint",(function(t){"first-contentful-paint"===t.name&&t.startTime<i.timeStamp&&(e.value=t.startTime,e.isFinal=!0,e.entries.push(t),n())}));a&&(n=v(t,e,a))},h=function(t){var n=o("FID"),e=m(),i=function(t){t.startTime<e.timeStamp&&(n.value=t.processingStart-t.startTime,n.entries.push(t),n.isFinal=!0,r())},a=u("first-input",i),r=v(t,n,a);a?p((function(){a.takeRecords().map(i),a.disconnect()}),!0):window.perfMetrics&&window.perfMetrics.onFirstInputDelay&&window.perfMetrics.onFirstInputDelay((function(t,i){i.timeStamp<e.timeStamp&&(n.value=t,n.isFinal=!0,n.entries=[{entryType:"first-input",name:i.type,target:i.target,cancelable:i.cancelable,startTime:i.timeStamp,processingStart:i.timeStamp+t}],r())}))},S=function(){return a||(a=new Promise((function(t){return["scroll","keydown","pointerdown"].map((function(n){addEventListener(n,t,{once:!0,passive:!0,capture:!0})}))}))),a},y=function(t){var n,e=arguments.length>1&&void 0!==arguments[1]&&arguments[1],i=o("LCP"),a=m(),r=function(t){var e=t.startTime;e<a.timeStamp?(i.value=e,i.entries.push(t)):i.isFinal=!0,n()},s=u("largest-contentful-paint",r);if(s){n=v(t,i,s,e);var c=function(){i.isFinal||(s.takeRecords().map(r),i.isFinal=!0,n())};S().then(c),p(c,!0)}},F=function(t){var n,e=o("TTFB");n=function(){try{var n=performance.getEntriesByType("navigation")[0]||function(){var t=performance.timing,n={entryType:"navigation",startTime:0};for(var e in t)"navigationStart"!==e&&"toJSON"!==e&&(n[e]=Math.max(t[e]-t.navigationStart,0));return n}();e.value=e.delta=n.responseStart,e.entries=[n],e.isFinal=!0,t(e)}catch(t){}},"complete"===document.readyState?setTimeout(n,0):addEventListener("pageshow",n)}}}]);

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View file

@ -1 +1 @@
!function(e){function r(r){for(var n,i,a=r[0],c=r[1],l=r[2],s=0,p=[];s<a.length;s++)i=a[s],Object.prototype.hasOwnProperty.call(o,i)&&o[i]&&p.push(o[i][0]),o[i]=0;for(n in c)Object.prototype.hasOwnProperty.call(c,n)&&(e[n]=c[n]);for(f&&f(r);p.length;)p.shift()();return u.push.apply(u,l||[]),t()}function t(){for(var e,r=0;r<u.length;r++){for(var t=u[r],n=!0,a=1;a<t.length;a++){var c=t[a];0!==o[c]&&(n=!1)}n&&(u.splice(r--,1),e=i(i.s=t[0]))}return e}var n={},o={1:0},u=[];function i(r){if(n[r])return n[r].exports;var t=n[r]={i:r,l:!1,exports:{}};return e[r].call(t.exports,t,t.exports,i),t.l=!0,t.exports}i.e=function(e){var r=[],t=o[e];if(0!==t)if(t)r.push(t[2]);else{var n=new Promise((function(r,n){t=o[e]=[r,n]}));r.push(t[2]=n);var u,a=document.createElement("script");a.charset="utf-8",a.timeout=120,i.nc&&a.setAttribute("nonce",i.nc),a.src=function(e){return i.p+"static/js/"+({}[e]||e)+"."+{3:"3cf0cbc4"}[e]+".chunk.js"}(e);var c=new Error;u=function(r){a.onerror=a.onload=null,clearTimeout(l);var t=o[e];if(0!==t){if(t){var n=r&&("load"===r.type?"missing":r.type),u=r&&r.target&&r.target.src;c.message="Loading chunk "+e+" failed.\n("+n+": "+u+")",c.name="ChunkLoadError",c.type=n,c.request=u,t[1](c)}o[e]=void 0}};var l=setTimeout((function(){u({type:"timeout",target:a})}),12e4);a.onerror=a.onload=u,document.head.appendChild(a)}return Promise.all(r)},i.m=e,i.c=n,i.d=function(e,r,t){i.o(e,r)||Object.defineProperty(e,r,{enumerable:!0,get:t})},i.r=function(e){"undefined"!==typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},i.t=function(e,r){if(1&r&&(e=i(e)),8&r)return e;if(4&r&&"object"===typeof e&&e&&e.__esModule)return e;var t=Object.create(null);if(i.r(t),Object.defineProperty(t,"default",{enumerable:!0,value:e}),2&r&&"string"!=typeof e)for(var n in e)i.d(t,n,function(r){return e[r]}.bind(null,n));return t},i.n=function(e){var r=e&&e.__esModule?function(){return e.default}:function(){return e};return i.d(r,"a",r),r},i.o=function(e,r){return Object.prototype.hasOwnProperty.call(e,r)},i.p="./",i.oe=function(e){throw console.error(e),e};var a=this.webpackJsonpvmui=this.webpackJsonpvmui||[],c=a.push.bind(a);a.push=r,a=a.slice();for(var l=0;l<a.length;l++)r(a[l]);var f=c;t()}([]);
!function(e){function r(r){for(var n,i,a=r[0],c=r[1],l=r[2],s=0,p=[];s<a.length;s++)i=a[s],Object.prototype.hasOwnProperty.call(o,i)&&o[i]&&p.push(o[i][0]),o[i]=0;for(n in c)Object.prototype.hasOwnProperty.call(c,n)&&(e[n]=c[n]);for(f&&f(r);p.length;)p.shift()();return u.push.apply(u,l||[]),t()}function t(){for(var e,r=0;r<u.length;r++){for(var t=u[r],n=!0,a=1;a<t.length;a++){var c=t[a];0!==o[c]&&(n=!1)}n&&(u.splice(r--,1),e=i(i.s=t[0]))}return e}var n={},o={1:0},u=[];function i(r){if(n[r])return n[r].exports;var t=n[r]={i:r,l:!1,exports:{}};return e[r].call(t.exports,t,t.exports,i),t.l=!0,t.exports}i.e=function(e){var r=[],t=o[e];if(0!==t)if(t)r.push(t[2]);else{var n=new Promise((function(r,n){t=o[e]=[r,n]}));r.push(t[2]=n);var u,a=document.createElement("script");a.charset="utf-8",a.timeout=120,i.nc&&a.setAttribute("nonce",i.nc),a.src=function(e){return i.p+"static/js/"+({}[e]||e)+"."+{3:"daeccd9c"}[e]+".chunk.js"}(e);var c=new Error;u=function(r){a.onerror=a.onload=null,clearTimeout(l);var t=o[e];if(0!==t){if(t){var n=r&&("load"===r.type?"missing":r.type),u=r&&r.target&&r.target.src;c.message="Loading chunk "+e+" failed.\n("+n+": "+u+")",c.name="ChunkLoadError",c.type=n,c.request=u,t[1](c)}o[e]=void 0}};var l=setTimeout((function(){u({type:"timeout",target:a})}),12e4);a.onerror=a.onload=u,document.head.appendChild(a)}return Promise.all(r)},i.m=e,i.c=n,i.d=function(e,r,t){i.o(e,r)||Object.defineProperty(e,r,{enumerable:!0,get:t})},i.r=function(e){"undefined"!==typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},i.t=function(e,r){if(1&r&&(e=i(e)),8&r)return e;if(4&r&&"object"===typeof e&&e&&e.__esModule)return e;var t=Object.create(null);if(i.r(t),Object.defineProperty(t,"default",{enumerable:!0,value:e}),2&r&&"string"!=typeof e)for(var n in e)i.d(t,n,function(r){return e[r]}.bind(null,n));return t},i.n=function(e){var r=e&&e.__esModule?function(){return e.default}:function(){return e};return i.d(r,"a",r),r},i.o=function(e,r){return Object.prototype.hasOwnProperty.call(e,r)},i.p="./",i.oe=function(e){throw console.error(e),e};var a=this.webpackJsonpvmui=this.webpackJsonpvmui||[],c=a.push.bind(a);a.push=r,a=a.slice();for(var l=0;l<a.length;l++)r(a[l]);var f=c;t()}([]);

View file

@ -17,7 +17,6 @@
"@testing-library/jest-dom": "^5.11.6",
"@testing-library/react": "^11.1.2",
"@testing-library/user-event": "^12.2.2",
"@types/chart.js": "^2.9.34",
"@types/jest": "^26.0.15",
"@types/lodash.debounce": "^4.0.6",
"@types/lodash.get": "^4.4.6",
@ -27,9 +26,6 @@
"@types/react": "^16.9.56",
"@types/react-dom": "^16.9.9",
"@types/react-measure": "^2.0.6",
"chart.js": "^3.5.1",
"chartjs-adapter-date-fns": "^2.0.0",
"chartjs-plugin-zoom": "^1.1.1",
"codemirror-promql": "^0.10.2",
"date-fns": "^2.23.0",
"dayjs": "^1.10.4",
@ -38,7 +34,6 @@
"numeral": "^2.0.6",
"qs": "^6.5.2",
"react": "^17.0.1",
"react-chartjs-2": "^3.0.5",
"react-dom": "^17.0.1",
"react-measure": "^2.5.2",
"react-scripts": "4.0.0",
@ -2869,14 +2864,6 @@
"@babel/types": "^7.3.0"
}
},
"node_modules/@types/chart.js": {
"version": "2.9.34",
"resolved": "https://registry.npmjs.org/@types/chart.js/-/chart.js-2.9.34.tgz",
"integrity": "sha512-CtZVk+kh1IN67dv+fB0CWmCLCRrDJgqOj15qPic2B1VCMovNO6B7Vhf/TgPpNscjhAL1j+qUntDMWb9A4ZmPTg==",
"dependencies": {
"moment": "^2.10.2"
}
},
"node_modules/@types/eslint": {
"version": "7.2.5",
"resolved": "https://registry.npmjs.org/@types/eslint/-/eslint-7.2.5.tgz",
@ -5069,30 +5056,6 @@
"node": ">=10"
}
},
"node_modules/chart.js": {
"version": "3.5.1",
"resolved": "https://registry.npmjs.org/chart.js/-/chart.js-3.5.1.tgz",
"integrity": "sha512-m5kzt72I1WQ9LILwQC4syla/LD/N413RYv2Dx2nnTkRS9iv/ey1xLTt0DnPc/eWV4zI+BgEgDYBIzbQhZHc/PQ=="
},
"node_modules/chartjs-adapter-date-fns": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/chartjs-adapter-date-fns/-/chartjs-adapter-date-fns-2.0.0.tgz",
"integrity": "sha512-rmZINGLe+9IiiEB0kb57vH3UugAtYw33anRiw5kS2Tu87agpetDDoouquycWc9pRsKtQo5j+vLsYHyr8etAvFw==",
"peerDependencies": {
"chart.js": "^3.0.0"
}
},
"node_modules/chartjs-plugin-zoom": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/chartjs-plugin-zoom/-/chartjs-plugin-zoom-1.1.1.tgz",
"integrity": "sha512-1q54WOzK7FtAjkbemQeqvmFUV0btNYIQny2HbQ6Awq9wUtCz7Zmj6vIgp3C1DYMQwN0nqgpC3vnApqiwI7cSdQ==",
"dependencies": {
"hammerjs": "^2.0.8"
},
"peerDependencies": {
"chart.js": "^3.2.0"
}
},
"node_modules/check-types": {
"version": "11.1.2",
"resolved": "https://registry.npmjs.org/check-types/-/check-types-11.1.2.tgz",
@ -8956,14 +8919,6 @@
"node": ">=6"
}
},
"node_modules/hammerjs": {
"version": "2.0.8",
"resolved": "https://registry.npmjs.org/hammerjs/-/hammerjs-2.0.8.tgz",
"integrity": "sha1-BO93hiz/K7edMPdpIJWTAiK/YPE=",
"engines": {
"node": ">=0.8.0"
}
},
"node_modules/handle-thing": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/handle-thing/-/handle-thing-2.0.1.tgz",
@ -12496,14 +12451,6 @@
"mkdirp": "bin/cmd.js"
}
},
"node_modules/moment": {
"version": "2.29.1",
"resolved": "https://registry.npmjs.org/moment/-/moment-2.29.1.tgz",
"integrity": "sha512-kHmoybcPV8Sqy59DwNDY3Jefr64lK/by/da0ViFcuA4DH0vQg5Q6Ze5VimxkfQNSC+Mls/Kx53s7TjP1RhFEDQ==",
"engines": {
"node": "*"
}
},
"node_modules/move-concurrently": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/move-concurrently/-/move-concurrently-1.0.1.tgz",
@ -15218,18 +15165,6 @@
"semver": "bin/semver"
}
},
"node_modules/react-chartjs-2": {
"version": "3.0.5",
"resolved": "https://registry.npmjs.org/react-chartjs-2/-/react-chartjs-2-3.0.5.tgz",
"integrity": "sha512-fYr4E82agaZi9IFMe5GtOZ6WE/HWdxy/KywLNOzXsqgPkD2oo1IlrQLKMLUki/2UXko3p95TR2L8Q2rEss/opQ==",
"dependencies": {
"lodash": "^4.17.19"
},
"peerDependencies": {
"chart.js": "^3.5.0",
"react": "^16.8.0 || ^17.0.0"
}
},
"node_modules/react-dev-utils": {
"version": "11.0.4",
"resolved": "https://registry.npmjs.org/react-dev-utils/-/react-dev-utils-11.0.4.tgz",
@ -22907,14 +22842,6 @@
"@babel/types": "^7.3.0"
}
},
"@types/chart.js": {
"version": "2.9.34",
"resolved": "https://registry.npmjs.org/@types/chart.js/-/chart.js-2.9.34.tgz",
"integrity": "sha512-CtZVk+kh1IN67dv+fB0CWmCLCRrDJgqOj15qPic2B1VCMovNO6B7Vhf/TgPpNscjhAL1j+qUntDMWb9A4ZmPTg==",
"requires": {
"moment": "^2.10.2"
}
},
"@types/eslint": {
"version": "7.2.5",
"resolved": "https://registry.npmjs.org/@types/eslint/-/eslint-7.2.5.tgz",
@ -24703,25 +24630,6 @@
"resolved": "https://registry.npmjs.org/char-regex/-/char-regex-1.0.2.tgz",
"integrity": "sha512-kWWXztvZ5SBQV+eRgKFeh8q5sLuZY2+8WUIzlxWVTg+oGwY14qylx1KbKzHd8P6ZYkAg0xyIDU9JMHhyJMZ1jw=="
},
"chart.js": {
"version": "3.5.1",
"resolved": "https://registry.npmjs.org/chart.js/-/chart.js-3.5.1.tgz",
"integrity": "sha512-m5kzt72I1WQ9LILwQC4syla/LD/N413RYv2Dx2nnTkRS9iv/ey1xLTt0DnPc/eWV4zI+BgEgDYBIzbQhZHc/PQ=="
},
"chartjs-adapter-date-fns": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/chartjs-adapter-date-fns/-/chartjs-adapter-date-fns-2.0.0.tgz",
"integrity": "sha512-rmZINGLe+9IiiEB0kb57vH3UugAtYw33anRiw5kS2Tu87agpetDDoouquycWc9pRsKtQo5j+vLsYHyr8etAvFw==",
"requires": {}
},
"chartjs-plugin-zoom": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/chartjs-plugin-zoom/-/chartjs-plugin-zoom-1.1.1.tgz",
"integrity": "sha512-1q54WOzK7FtAjkbemQeqvmFUV0btNYIQny2HbQ6Awq9wUtCz7Zmj6vIgp3C1DYMQwN0nqgpC3vnApqiwI7cSdQ==",
"requires": {
"hammerjs": "^2.0.8"
}
},
"check-types": {
"version": "11.1.2",
"resolved": "https://registry.npmjs.org/check-types/-/check-types-11.1.2.tgz",
@ -27796,11 +27704,6 @@
"pify": "^4.0.1"
}
},
"hammerjs": {
"version": "2.0.8",
"resolved": "https://registry.npmjs.org/hammerjs/-/hammerjs-2.0.8.tgz",
"integrity": "sha1-BO93hiz/K7edMPdpIJWTAiK/YPE="
},
"handle-thing": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/handle-thing/-/handle-thing-2.0.1.tgz",
@ -30561,11 +30464,6 @@
"minimist": "^1.2.5"
}
},
"moment": {
"version": "2.29.1",
"resolved": "https://registry.npmjs.org/moment/-/moment-2.29.1.tgz",
"integrity": "sha512-kHmoybcPV8Sqy59DwNDY3Jefr64lK/by/da0ViFcuA4DH0vQg5Q6Ze5VimxkfQNSC+Mls/Kx53s7TjP1RhFEDQ=="
},
"move-concurrently": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/move-concurrently/-/move-concurrently-1.0.1.tgz",
@ -32763,14 +32661,6 @@
}
}
},
"react-chartjs-2": {
"version": "3.0.5",
"resolved": "https://registry.npmjs.org/react-chartjs-2/-/react-chartjs-2-3.0.5.tgz",
"integrity": "sha512-fYr4E82agaZi9IFMe5GtOZ6WE/HWdxy/KywLNOzXsqgPkD2oo1IlrQLKMLUki/2UXko3p95TR2L8Q2rEss/opQ==",
"requires": {
"lodash": "^4.17.19"
}
},
"react-dev-utils": {
"version": "11.0.4",
"resolved": "https://registry.npmjs.org/react-dev-utils/-/react-dev-utils-11.0.4.tgz",

View file

@ -13,7 +13,6 @@
"@testing-library/jest-dom": "^5.11.6",
"@testing-library/react": "^11.1.2",
"@testing-library/user-event": "^12.2.2",
"@types/chart.js": "^2.9.34",
"@types/jest": "^26.0.15",
"@types/lodash.debounce": "^4.0.6",
"@types/lodash.get": "^4.4.6",
@ -23,9 +22,6 @@
"@types/react": "^16.9.56",
"@types/react-dom": "^16.9.9",
"@types/react-measure": "^2.0.6",
"chart.js": "^3.5.1",
"chartjs-adapter-date-fns": "^2.0.0",
"chartjs-plugin-zoom": "^1.1.1",
"codemirror-promql": "^0.10.2",
"date-fns": "^2.23.0",
"dayjs": "^1.10.4",
@ -34,7 +30,6 @@
"numeral": "^2.0.6",
"qs": "^6.5.2",
"react": "^17.0.1",
"react-chartjs-2": "^3.0.5",
"react-dom": "^17.0.1",
"react-measure": "^2.5.2",
"react-scripts": "4.0.0",

View file

@ -3,6 +3,7 @@ import {SnackbarProvider} from "./contexts/Snackbar";
import HomeLayout from "./components/Home/HomeLayout";
import {StateProvider} from "./state/common/StateContext";
import {AuthStateProvider} from "./state/auth/AuthStateContext";
import {GraphStateProvider} from "./state/graph/GraphStateContext";
import {createMuiTheme, MuiThemeProvider} from "@material-ui/core";
import CssBaseline from "@material-ui/core/CssBaseline";
@ -26,9 +27,11 @@ const App: FC = () => {
<MuiThemeProvider theme={THEME}> {/* Material UI theme customization */}
<StateProvider> {/* Serialized into query string, common app settings */}
<AuthStateProvider> {/* Auth related info - optionally persisted to Local Storage */}
<SnackbarProvider> {/* Display various snackbars */}
<HomeLayout/>
</SnackbarProvider>
<GraphStateProvider> {/* Graph settings */}
<SnackbarProvider> {/* Display various snackbars */}
<HomeLayout/>
</SnackbarProvider>
</GraphStateProvider>
</AuthStateProvider>
</StateProvider>
</MuiThemeProvider>

View file

@ -1,7 +1,7 @@
import {TimeParams} from "../types";
export const getQueryRangeUrl = (server: string, query: string, period: TimeParams): string =>
`${server}/api/v1/query_range?query=${encodeURIComponent(query)}&start=${period.start}&end=${period.end}&step=${period.step}`;
export const getQueryRangeUrl = (server: string, query: string, period: TimeParams, nocache: boolean): string =>
`${server}/api/v1/query_range?query=${encodeURIComponent(query)}&start=${period.start}&end=${period.end}&step=${period.step}${nocache ? "&nocache=1" : ""}`;
export const getQueryUrl = (server: string, query: string, period: TimeParams): string =>
`${server}/api/v1/query?query=${encodeURIComponent(query)}&start=${period.start}&end=${period.end}&step=${period.step}`;

View file

@ -1,17 +1,6 @@
import React, {FC, useRef, useState} from "react";
import {
Accordion,
AccordionDetails,
AccordionSummary,
Box,
Grid,
IconButton,
TextField,
Typography,
FormControlLabel,
Tooltip,
Switch,
} from "@material-ui/core";
import { Accordion, AccordionDetails, AccordionSummary, Box, Grid, IconButton, TextField, Typography, FormControlLabel,
Tooltip, Switch } from "@material-ui/core";
import QueryEditor from "./QueryEditor";
import {TimeSelector} from "./TimeSelector";
import {useAppDispatch, useAppState} from "../../../state/common/StateContext";
@ -20,122 +9,139 @@ import SecurityIcon from "@material-ui/icons/Security";
import {AuthDialog} from "./AuthDialog";
import PlayCircleOutlineIcon from "@material-ui/icons/PlayCircleOutline";
import Portal from "@material-ui/core/Portal";
import Popover from "@material-ui/core/Popover";
import SettingsIcon from "@material-ui/icons/Settings";
import {saveToStorage} from "../../../utils/storage";
import {useGraphDispatch, useGraphState} from "../../../state/graph/GraphStateContext";
import debounce from "lodash.debounce";
const QueryConfigurator: FC = () => {
const {serverUrl, query, time: {duration}} = useAppState();
const {serverUrl, query, queryHistory, time: {duration}, queryControls: {autocomplete, nocache}} = useAppState();
const dispatch = useAppDispatch();
const {queryControls: {autocomplete}} = useAppState();
const onChangeAutocomplete = () => {
dispatch({type: "TOGGLE_AUTOCOMPLETE"});
saveToStorage("AUTOCOMPLETE", !autocomplete);
};
const onChangeCache = () => {
dispatch({type: "NO_CACHE"});
saveToStorage("NO_CACHE", !nocache);
};
const { yaxis } = useGraphState();
const graphDispatch = useGraphDispatch();
const onChangeYaxisLimits = () => { graphDispatch({type: "TOGGLE_ENABLE_YAXIS_LIMITS"}); };
const setMinLimit = ({target: {value}}: {target: {value: string}}) => {
graphDispatch({type: "SET_YAXIS_LIMITS", payload: [+value, yaxis.limits.range[1]]});
};
const setMaxLimit = ({target: {value}}: {target: {value: string}}) => {
graphDispatch({type: "SET_YAXIS_LIMITS", payload: [yaxis.limits.range[0], +value]});
};
const [dialogOpen, setDialogOpen] = useState(false);
const [expanded, setExpanded] = useState(true);
const [popoverOpen, setPopoverOpen] = useState(false);
const refSettings = useRef<SVGGElement | any>(null);
const queryContainer = useRef<HTMLDivElement>(null);
const onSetDuration = (dur: string) => dispatch({type: "SET_DURATION", payload: dur});
const onRunQuery = () => dispatch({type: "RUN_QUERY"});
const onSetQuery = (query: string) => dispatch({type: "SET_QUERY", payload: query});
const onRunQuery = () => {
const { values } = queryHistory;
if (query === values[values.length - 1]) return;
dispatch({type: "RUN_QUERY"});
dispatch({type: "SET_QUERY_HISTORY_INDEX", payload: values.length});
dispatch({type: "SET_QUERY_HISTORY_VALUES", payload: [...values, query]});
};
const onSetQuery = (newQuery: string) => {
if (query === newQuery) return;
dispatch({type: "SET_QUERY", payload: newQuery});
};
const setHistoryIndex = (step: number) => {
const index = queryHistory.index + step;
if (index < -1 || index > queryHistory.values.length) return;
dispatch({type: "SET_QUERY_HISTORY_INDEX", payload: index});
onSetQuery(queryHistory.values[index] || "");
};
const onSetServer = ({target: {value}}: {target: {value: string}}) => {
dispatch({type: "SET_SERVER", payload: value});
};
return (
<>
<Accordion expanded={expanded} onChange={() => setExpanded(prev => !prev)}>
<AccordionSummary
expandIcon={<ExpandMoreIcon/>}
aria-controls="panel1a-content"
id="panel1a-header"
>
<Box mr={2}>
<Typography variant="h6" component="h2">Query Configuration</Typography>
</Box>
<Box flexGrow={1} onClick={e => e.stopPropagation()} onFocusCapture={e => e.stopPropagation()}>
<Portal disablePortal={!expanded} container={queryContainer.current}>
<QueryEditor server={serverUrl} query={query} oneLiner={!expanded} autocomplete={autocomplete}
runQuery={onRunQuery}
setQuery={onSetQuery}/>
</Portal>
</Box>
</AccordionSummary>
<AccordionDetails>
<Grid container spacing={2}>
<Grid item xs={12} md={6}>
<Box>
<Box py={2} display="flex" alignItems="center">
<TextField variant="outlined" fullWidth label="Server URL" value={serverUrl}
inputProps={{
style: {fontFamily: "Monospace"}
}}
onChange={onSetServer}/>
<Box ml={1}>
<Tooltip title="Execute Query">
<IconButton onClick={onRunQuery}>
<PlayCircleOutlineIcon />
</IconButton>
</Tooltip>
</Box>
<Box>
<Tooltip title="Request Auth Settings">
<IconButton onClick={() => setDialogOpen(true)}>
<SecurityIcon/>
</IconButton>
</Tooltip>
</Box>
</Box>
<Box py={2} display="flex">
<Box flexGrow={1} mr={2}>
{/* for portal QueryEditor */}
<div ref={queryContainer} />
</Box>
<div>
<Tooltip title="Query Editor Settings">
<IconButton onClick={() => setPopoverOpen(!popoverOpen)}>
<SettingsIcon ref={refSettings}/>
</IconButton>
</Tooltip>
<Popover open={popoverOpen} transformOrigin={{vertical: -20, horizontal: "left"}}
onClose={() => setPopoverOpen(false)}
anchorEl={refSettings.current}>
<Box p={2}>
{<FormControlLabel
control={<Switch size="small" checked={autocomplete} onChange={onChangeAutocomplete}/>}
label="Autocomplete"
/>}
</Box>
</Popover>
</div>
return <>
<Accordion expanded={expanded} onChange={() => setExpanded(prev => !prev)}>
<AccordionSummary
expandIcon={<ExpandMoreIcon/>}
aria-controls="panel1a-content"
id="panel1a-header"
>
<Box mr={2}><Typography variant="h6" component="h2">Query Configuration</Typography></Box>
<Box flexGrow={1} onClick={e => e.stopPropagation()} onFocusCapture={e => e.stopPropagation()}>
<Portal disablePortal={!expanded} container={queryContainer.current}>
<Box display="flex" alignItems="center">
<Box width="100%">
<QueryEditor server={serverUrl} query={query} oneLiner={!expanded} autocomplete={autocomplete}
queryHistory={queryHistory} setHistoryIndex={setHistoryIndex} runQuery={onRunQuery} setQuery={onSetQuery}/>
</Box>
<Tooltip title="Execute Query">
<IconButton onClick={onRunQuery}><PlayCircleOutlineIcon /></IconButton>
</Tooltip>
</Box>
</Portal>
</Box>
</AccordionSummary>
<AccordionDetails>
<Grid container spacing={2}>
<Grid item xs={12} md={6}>
<Box display="grid" gridGap={16}>
<Box display="flex" alignItems="center">
<TextField variant="outlined" fullWidth label="Server URL" value={serverUrl}
inputProps={{style: {fontFamily: "Monospace"}}}
onChange={onSetServer}/>
<Box>
<Tooltip title="Request Auth Settings">
<IconButton onClick={() => setDialogOpen(true)}><SecurityIcon/></IconButton>
</Tooltip>
</Box>
</Box>
</Grid>
<Grid item xs={8} md={6} >
<Box style={{
borderRadius: "4px",
borderColor: "#b9b9b9",
borderStyle: "solid",
borderWidth: "1px",
height: "calc(100% - 18px)",
marginTop: "16px"
}}>
<TimeSelector setDuration={onSetDuration} duration={duration}/>
</Box>
</Grid>
<Box flexGrow={1} ><div ref={queryContainer} />{/* for portal QueryEditor */}</Box>
</Box>
</Grid>
</AccordionDetails>
</Accordion>
<AuthDialog open={dialogOpen} onClose={() => setDialogOpen(false)}/>
</>
);
<Grid item xs={8} md={6} >
<Box style={{
borderRadius: "4px",
borderColor: "#b9b9b9",
borderStyle: "solid",
borderWidth: "1px",
height: "100%",}}>
<TimeSelector setDuration={onSetDuration} duration={duration}/>
</Box>
</Grid>
<Grid item xs={12}>
<Box px={1} display="flex" alignItems="center" minHeight={52}>
<Box><FormControlLabel
control={<Switch size="small" checked={autocomplete} onChange={onChangeAutocomplete}/>} label="Enable autocomplete"
/></Box>
<Box ml={4}><FormControlLabel
control={<Switch size="small" checked={!nocache} onChange={onChangeCache}/>} label="Enable cache"
/></Box>
<Box ml={4} display="flex" alignItems="center">
<FormControlLabel
control={<Switch size="small" checked={yaxis.limits.enable} onChange={onChangeYaxisLimits}/>}
label="fix the limits for y-axis"
/>
{yaxis.limits.enable && <Box display="grid" gridTemplateColumns="120px 120px" gridGap={10}>
<TextField label="Min" type="number" size="small" variant="outlined"
defaultValue={yaxis.limits.range[0]} onChange={debounce(setMinLimit, 750)}/>
<TextField label="Max" type="number" size="small" variant="outlined"
defaultValue={yaxis.limits.range[1]} onChange={debounce(setMaxLimit, 750)}/>
</Box>}
</Box>
</Box>
</Grid>
</Grid>
</AccordionDetails>
</Accordion>
<AuthDialog open={dialogOpen} onClose={() => setDialogOpen(false)}/>
</>;
};
export default QueryConfigurator;

View file

@ -4,19 +4,21 @@ import {defaultKeymap} from "@codemirror/next/commands";
import React, {FC, useEffect, useRef, useState} from "react";
import { PromQLExtension } from "codemirror-promql";
import { basicSetup } from "@codemirror/next/basic-setup";
import {isMacOs} from "../../../utils/detect-os";
import {QueryHistory} from "../../../state/common/reducer";
export interface QueryEditorProps {
setHistoryIndex: (step: number) => void;
setQuery: (query: string) => void;
runQuery: () => void;
query: string;
queryHistory: QueryHistory;
server: string;
oneLiner?: boolean;
autocomplete: boolean
}
const QueryEditor: FC<QueryEditorProps> = ({
query, setQuery, runQuery, server, oneLiner = false, autocomplete
query, queryHistory, setHistoryIndex, setQuery, runQuery, server, oneLiner = false, autocomplete
}) => {
const ref = useRef<HTMLDivElement>(null);
@ -37,7 +39,6 @@ const QueryEditor: FC<QueryEditorProps> = ({
// update state on change of autocomplete server
useEffect(() => {
const promQL = new PromQLExtension();
promQL.activateCompletion(autocomplete);
promQL.setComplete({url: server});
@ -55,24 +56,26 @@ const QueryEditor: FC<QueryEditorProps> = ({
keymap(defaultKeymap),
listenerExtension,
promQL.asExtension(),
keymap([
{
key: isMacOs() ? "Cmd-Enter" : "Ctrl-Enter",
run: (): boolean => {
runQuery();
return true;
},
},
]),
]
}));
}, [server, editorView, autocomplete, queryHistory]);
}, [server, editorView, autocomplete]);
const onKeyUp = (e: React.KeyboardEvent<HTMLDivElement>): void => {
const {key, ctrlKey, metaKey} = e;
const ctrlMetaKey = ctrlKey || metaKey;
if (key === "Enter" && ctrlMetaKey) {
runQuery();
} else if (key === "ArrowUp" && ctrlMetaKey) {
setHistoryIndex(-1);
} else if (key === "ArrowDown" && ctrlMetaKey) {
setHistoryIndex(1);
}
};
return (
<>
{/*Class one-line-scroll and other codemirror styles are declared in index.css*/}
<div ref={ref} className={oneLiner ? "one-line-scroll" : undefined}/>
<div ref={ref} className={oneLiner ? "one-line-scroll" : undefined} onKeyUp={onKeyUp}/>
</>
);
};

View file

@ -13,7 +13,7 @@ export const useFetchQuery = (): {
liveData?: InstantMetricResult[],
error?: string,
} => {
const {query, displayType, serverUrl, time: {period}} = useAppState();
const {query, displayType, serverUrl, time: {period}, queryControls: {nocache}} = useAppState();
const {basicData, bearerData, authMethod} = useAuthState();
@ -43,7 +43,7 @@ export const useFetchQuery = (): {
const duration = (period.end - period.start)/2;
const doublePeriod = {...period, start: period.start - duration, end: period.end + duration};
return displayType === "chart"
? getQueryRangeUrl(serverUrl, query, doublePeriod)
? getQueryRangeUrl(serverUrl, query, doublePeriod, nocache)
: getQueryUrl(serverUrl, query, period);
} else {
setError("Please provide a valid URL");

View file

@ -39,7 +39,7 @@ const HomeLayout: FC = () => {
top: "40px",
opacity: ".4"
}}>
<Link color="inherit" href="https://github.com/VictoriaMetrics/vmui/issues/new" target="_blank">
<Link color="inherit" href="https://github.com/VictoriaMetrics/VictoriaMetrics/issues/new" target="_blank">
Create an issue
</Link>
</div>

View file

@ -1,7 +1,6 @@
import React, {FC} from "react";
import {MetricResult} from "../../../api/types";
import LineChart from "../../LineChart/LineChart";
import "../../../utils/chartjs-register-plugins";
export interface GraphViewProps {
data?: MetricResult[];

View file

@ -1,6 +1,4 @@
import React, {FC, useEffect, useMemo, useRef, useState} from "react";
import {getNameForMetric} from "../../utils/metric";
import "chartjs-adapter-date-fns";
import {useAppDispatch, useAppState} from "../../state/common/StateContext";
import {GraphViewProps} from "../Home/Views/GraphView";
import uPlot, {AlignedData as uPlotData, Options as uPlotOptions, Series as uPlotSeries} from "uplot";
@ -8,76 +6,67 @@ import UplotReact from "uplot-react";
import "uplot/dist/uPlot.min.css";
import numeral from "numeral";
import "./legend.css";
import "./tooltip.css";
import {useGraphDispatch, useGraphState} from "../../state/graph/GraphStateContext";
import {getDataChart, getLimitsTimes, getLimitsYaxis, getSeries, setTooltip} from "../../utils/uPlot";
const LineChart: FC<GraphViewProps> = ({data = []}) => {
const dispatch = useAppDispatch();
const {time: {period}} = useAppState();
const [dataChart, setDataChart] = useState<uPlotData>();
const [series, setSeries] = useState<uPlotSeries[]>([]);
const [scale, setScale] = useState({min: period.start, max: period.end});
const refContainer = useRef<HTMLDivElement>(null);
const [isPanning, setIsPanning] = useState(false);
const [zoomPos, setZoomPos] = useState(0);
const tooltipIdx = {seriesIdx: 1, dataIdx: 0};
const tooltipOffset = {left: 0, top: 0};
const getColorByName = (str: string): string => {
let hash = 0;
for (let i = 0; i < str.length; i++) {
hash = str.charCodeAt(i) + ((hash << 5) - hash);
const {yaxis} = useGraphState();
const graphDispatch = useGraphDispatch();
const setStateLimits = (range: [number, number]) => {
if (!yaxis.limits.enable || (yaxis.limits.range.every(item => !item))) {
graphDispatch({type: "SET_YAXIS_LIMITS", payload: range});
}
let colour = "#";
for (let i = 0; i < 3; i++) {
const value = (hash >> (i * 8)) & 0xFF;
colour += ("00" + value.toString(16)).substr(-2);
}
return colour;
};
const times = useMemo(() => {
const allTimes = data.map(d => d.values.map(v => v[0])).flat();
const start = Math.min(...allTimes);
const end = Math.max(...allTimes);
const [start, end] = getLimitsTimes(data);
const output = [];
for (let i = start; i < end; i += period.step || 1) {
output.push(i);
}
for (let i = start; i < end; i += period.step || 1) { output.push(i); }
return output;
}, [data]);
useEffect(() => {
const values = data.map(d => times.map(t => {
const v = d.values.find(v => v[0] === t);
return v ? +v[1] : null;
}));
const seriesValues = data.map(d => ({
label: getNameForMetric(d),
width: 1,
font: "11px Arial",
stroke: getColorByName(getNameForMetric(d))}));
setSeries([{}, ...seriesValues]);
setDataChart([times, ...values]);
}, [data]);
const series = useMemo((): uPlotSeries[] => getSeries(data), [data]);
const dataChart = useMemo((): uPlotData => getDataChart(data, times), [data]);
const tooltip = document.createElement("div");
tooltip.className = "u-tooltip";
const onReadyChart = (u: uPlot) => {
const factor = 0.85;
tooltipOffset.left = parseFloat(u.over.style.left);
tooltipOffset.top = parseFloat(u.over.style.top);
u.root.querySelector(".u-wrap")?.appendChild(tooltip);
// wheel drag pan
u.over.addEventListener("mousedown", e => {
if (e.button !== 0) return;
setIsPanning(true);
e.preventDefault();
const left0 = e.clientX;
const scXMin0 = u.scales.x.min || 1;
const scXMax0 = u.scales.x.max || 1;
const xUnitsPerPx = u.posToVal(1, "x") - u.posToVal(0, "x");
const onmove = (e: MouseEvent) => {
e.preventDefault();
const dx = xUnitsPerPx * (e.clientX - left0);
const min = scXMin0 - dx;
const max = scXMax0 - dx;
const dx = (u.posToVal(1, "x") - u.posToVal(0, "x")) * (e.clientX - left0);
const min = (u.scales.x.min || 1) - dx;
const max = (u.scales.x.max || 1) - dx;
u.setScale("x", {min, max});
setScale({min, max});
};
const onup = () => {
setIsPanning(false);
document.removeEventListener("mousemove", onmove);
document.removeEventListener("mouseup", onup);
};
@ -91,12 +80,11 @@ const LineChart: FC<GraphViewProps> = ({data = []}) => {
if (!e.ctrlKey && !e.metaKey) return;
e.preventDefault();
const {width} = u.over.getBoundingClientRect();
const {left = width/2} = u.cursor;
const leftPct = left/width;
const xVal = u.posToVal(left, "x");
if (u.cursor.left && u.cursor.left > 0) setZoomPos(u.cursor.left);
const xVal = u.posToVal(zoomPos, "x");
const oxRange = (u.scales.x.max || 0) - (u.scales.x.min || 0);
const nxRange = e.deltaY < 0 ? oxRange * factor : oxRange / factor;
const min = xVal - leftPct * nxRange;
const min = xVal - (zoomPos/width) * nxRange;
const max = min + nxRange;
u.batch(() => {
u.setScale("x", {min, max});
@ -105,7 +93,25 @@ const LineChart: FC<GraphViewProps> = ({data = []}) => {
});
};
useEffect(() => {setScale({min: period.start, max: period.end});}, [period]);
const setCursor = (u: uPlot) => {
if (tooltipIdx.dataIdx === u.cursor.idx) return;
tooltipIdx.dataIdx = u.cursor.idx || 0;
if (tooltipIdx.seriesIdx && tooltipIdx.dataIdx) {
setTooltip({u, tooltipIdx, data, series, tooltip, tooltipOffset});
}
};
const seriesFocus = (u: uPlot, sidx: (number | null)) => {
if (tooltipIdx.seriesIdx === sidx) return;
tooltipIdx.seriesIdx = sidx || 0;
sidx && tooltipIdx.dataIdx
? setTooltip({u, tooltipIdx, data, series, tooltip, tooltipOffset})
: tooltip.style.display = "none";
};
useEffect(() => { setStateLimits(getLimitsYaxis(data)); }, [data]);
useEffect(() => { setScale({min: period.start, max: period.end}); }, [period]);
useEffect(() => {
const duration = (period.end - period.start)/3;
@ -119,12 +125,8 @@ const LineChart: FC<GraphViewProps> = ({data = []}) => {
width: refContainer.current ? refContainer.current.offsetWidth : 400,
height: 500,
series: series,
plugins: [{
hooks: {
ready: onReadyChart
}
}],
cursor: {drag: {x: false, y: false}},
plugins: [{hooks: {ready: onReadyChart, setCursor, setSeries: seriesFocus}}],
cursor: {drag: {x: false, y: false}, focus: {prox: 30}},
axes: [
{space: 80},
{
@ -133,14 +135,14 @@ const LineChart: FC<GraphViewProps> = ({data = []}) => {
values: (self, ticks) => ticks.map(n => n > 1000 ? numeral(n).format("0.0a") : n)
}
],
scales: {x: {range: () => [scale.min, scale.max]}}
scales: {
x: {range: () => [scale.min, scale.max]},
y: {range: (self, min, max) => yaxis.limits.enable ? yaxis.limits.range : [min, max]}
}
};
return <div ref={refContainer}>
{dataChart && <UplotReact
options={options}
data={dataChart}
/>}
return <div ref={refContainer} style={{pointerEvents: isPanning ? "none" : "auto"}}>
{dataChart && <UplotReact options={options} data={dataChart}/>}
</div>;
};

View file

@ -0,0 +1,35 @@
.u-tooltip {
position: absolute;
display: none;
grid-gap: 12px;
max-width: 300px;
padding: 8px;
border-radius: 4px;
background: rgba(57, 57, 57, 0.9);
color: #fff;
font-size: 10px;
line-height: 1.4em;
font-weight: 500;
word-wrap: break-word;
font-family: monospace;
pointer-events: none;
z-index: 100;
}
.u-tooltip-data {
display: flex;
flex-wrap: wrap;
align-items: center;
font-size: 11px;
}
.u-tooltip__info {
display: grid;
grid-gap: 4px;
}
.u-tooltip__marker {
width: 12px;
height: 12px;
margin-right: 4px;
}

View file

@ -1,12 +1,7 @@
/* eslint max-lines: 0 */
import {DisplayType} from "../../components/Home/Configurator/DisplayTypeSwitch";
import {TimeParams, TimePeriod} from "../../types";
import {
dateFromSeconds,
formatDateToLocal,
getDateNowUTC,
getDurationFromPeriod,
getTimeperiodForDuration
} from "../../utils/time";
import {dateFromSeconds, formatDateToLocal, getDateNowUTC, getDurationFromPeriod, getTimeperiodForDuration} from "../../utils/time";
import {getFromStorage} from "../../utils/storage";
import {getDefaultServer} from "../../utils/default-server-url";
import {getQueryStringValue} from "../../utils/query-string";
@ -16,14 +11,21 @@ export interface TimeState {
period: TimeParams;
}
export interface QueryHistory {
index: number,
values: string[]
}
export interface AppState {
serverUrl: string;
displayType: DisplayType;
query: string;
time: TimeState;
queryHistory: QueryHistory,
queryControls: {
autoRefresh: boolean;
autocomplete: boolean
autocomplete: boolean,
nocache: boolean
}
}
@ -31,6 +33,8 @@ export type Action =
| { type: "SET_DISPLAY_TYPE", payload: DisplayType }
| { type: "SET_SERVER", payload: string }
| { type: "SET_QUERY", payload: string }
| { type: "SET_QUERY_HISTORY_INDEX", payload: number }
| { type: "SET_QUERY_HISTORY_VALUES", payload: string[] }
| { type: "SET_DURATION", payload: string }
| { type: "SET_UNTIL", payload: Date }
| { type: "SET_PERIOD", payload: TimePeriod }
@ -38,21 +42,25 @@ export type Action =
| { type: "RUN_QUERY_TO_NOW"}
| { type: "TOGGLE_AUTOREFRESH"}
| { type: "TOGGLE_AUTOCOMPLETE"}
| { type: "NO_CACHE"}
const duration = getQueryStringValue("g0.range_input", "1h") as string;
const endInput = formatDateToLocal(getQueryStringValue("g0.end_input", getDateNowUTC()) as Date);
const query = getQueryStringValue("g0.expr", getFromStorage("LAST_QUERY") as string || "\n") as string;
export const initialState: AppState = {
serverUrl: getDefaultServer(),
displayType: "chart",
query: getQueryStringValue("g0.expr", getFromStorage("LAST_QUERY") as string || "\n") as string, // demo_memory_usage_bytes
query: query, // demo_memory_usage_bytes
queryHistory: { index: 0, values: [query] },
time: {
duration,
period: getTimeperiodForDuration(duration, new Date(endInput))
},
queryControls: {
autoRefresh: false,
autocomplete: getFromStorage("AUTOCOMPLETE") as boolean || false
autocomplete: getFromStorage("AUTOCOMPLETE") as boolean || false,
nocache: getFromStorage("NO_CACHE") as boolean || false,
}
};
@ -73,6 +81,22 @@ export function reducer(state: AppState, action: Action): AppState {
...state,
query: action.payload
};
case "SET_QUERY_HISTORY_INDEX":
return {
...state,
queryHistory: {
...state.queryHistory,
index: action.payload
}
};
case "SET_QUERY_HISTORY_VALUES":
return {
...state,
queryHistory: {
...state.queryHistory,
values: action.payload
}
};
case "SET_DURATION":
return {
...state,
@ -121,6 +145,14 @@ export function reducer(state: AppState, action: Action): AppState {
autocomplete: !state.queryControls.autocomplete
}
};
case "NO_CACHE":
return {
...state,
queryControls: {
...state.queryControls,
nocache: !state.queryControls.nocache
}
};
case "RUN_QUERY":
return {
...state,

View file

@ -0,0 +1,25 @@
import React, {createContext, Dispatch, FC, useContext, useMemo, useReducer} from "react";
import {GraphAction, GraphState, initialGraphState, reducer} from "./reducer";
type GraphStateContextType = { state: GraphState, dispatch: Dispatch<GraphAction> };
export const GraphStateContext = createContext<GraphStateContextType>({} as GraphStateContextType);
export const useGraphState = (): GraphState => useContext(GraphStateContext).state;
export const useGraphDispatch = (): Dispatch<GraphAction> => useContext(GraphStateContext).dispatch;
export const GraphStateProvider: FC = ({children}) => {
const [state, dispatch] = useReducer(reducer, initialGraphState);
const contextValue = useMemo(() => {
return { state, dispatch };
}, [state, dispatch]);
return <GraphStateContext.Provider value={contextValue}>
{children}
</GraphStateContext.Provider>;
};

View file

@ -0,0 +1,49 @@
export interface YaxisState {
limits: {
enable: boolean,
range: [number, number]
}
}
export interface GraphState {
yaxis: YaxisState
}
export type GraphAction =
| { type: "TOGGLE_ENABLE_YAXIS_LIMITS" }
| { type: "SET_YAXIS_LIMITS", payload: [number, number] }
export const initialGraphState: GraphState = {
yaxis: {
limits: {enable: false, range: [0, 0]}
}
};
export function reducer(state: GraphState, action: GraphAction): GraphState {
switch (action.type) {
case "TOGGLE_ENABLE_YAXIS_LIMITS":
return {
...state,
yaxis: {
...state.yaxis,
limits: {
...state.yaxis.limits,
enable: !state.yaxis.limits.enable
}
}
};
case "SET_YAXIS_LIMITS":
return {
...state,
yaxis: {
...state.yaxis,
limits: {
...state.yaxis.limits,
range: action.payload
}
}
};
default:
throw new Error();
}
}

View file

@ -1,4 +0,0 @@
import {Chart} from "chart.js";
import zoomPlugin from "chartjs-plugin-zoom";
Chart.register(zoomPlugin);

View file

@ -0,0 +1,12 @@
export const getColorFromString = (str: string): string => {
let hash = 0;
for (let i = 0; i < str.length; i++) {
hash = str.charCodeAt(i) + ((hash << 5) - hash);
}
let colour = "#";
for (let i = 0; i < 3; i++) {
const value = (hash >> (i * 8)) & 0xFF;
colour += ("00" + value.toString(16)).substr(-2);
}
return colour;
};

View file

@ -2,7 +2,8 @@ export type StorageKeys = "LAST_QUERY"
| "BASIC_AUTH_DATA"
| "BEARER_AUTH_DATA"
| "AUTH_TYPE"
| "AUTOCOMPLETE";
| "AUTOCOMPLETE"
| "NO_CACHE"
export const saveToStorage = (key: StorageKeys, value: string | boolean | Record<string, unknown>): void => {
if (value) {

View file

@ -0,0 +1,64 @@
import uPlot, {AlignedData, Series} from "uplot";
import {getColorFromString} from "./color";
import dayjs from "dayjs";
import {MetricResult} from "../api/types";
import {getNameForMetric} from "./metric";
interface SetupTooltip {
u: uPlot,
data: MetricResult[],
series: Series[],
tooltip: HTMLDivElement,
tooltipOffset: {left: number, top: number},
tooltipIdx: {seriesIdx: number, dataIdx: number}
}
export const setTooltip = ({ u, tooltipIdx, data, series, tooltip, tooltipOffset }: SetupTooltip) : void => {
const {seriesIdx, dataIdx} = tooltipIdx;
const dataSeries = u.data[seriesIdx][dataIdx];
const dataTime = u.data[0][dataIdx];
const metric = data[seriesIdx - 1]?.metric || {};
const color = getColorFromString(series[seriesIdx].label || "");
const {width, height} = u.over.getBoundingClientRect();
const top = u.valToPos((dataSeries || 0), "y");
const lft = u.valToPos(dataTime, "x");
const {width: tooltipWidth, height: tooltipHeight} = tooltip.getBoundingClientRect();
const overflowX = lft + tooltipWidth >= width;
const overflowY = top + tooltipHeight >= height;
tooltip.style.display = "grid";
tooltip.style.top = `${tooltipOffset.top + top + 10 - (overflowY ? tooltipHeight + 10 : 0)}px`;
tooltip.style.left = `${tooltipOffset.left + lft + 10 - (overflowX ? tooltipWidth + 20 : 0)}px`;
const date = dayjs(new Date(dataTime * 1000)).format("YYYY-MM-DD HH:mm:ss:SSS (Z)");
const info = Object.keys(metric).filter(k => k !== "__name__").map(k => `<div><b>${k}</b>: ${metric[k]}</div>`).join("");
const marker = `<div class="u-tooltip__marker" style="background: ${color}"></div>`;
tooltip.innerHTML = `<div>${date}</div>
<div class="u-tooltip-data">
${marker}${metric.__name__ || ""}: <b>${dataSeries}</b>
</div>
<div class="u-tooltip__info">${info}</div>`;
};
export const getSeries = (data: MetricResult[]): Series[] => [{}, ...data.map(d => ({
label: getNameForMetric(d),
width: 1.5,
stroke: getColorFromString(getNameForMetric(d))
}))];
export const getLimitsTimes = (data: MetricResult[]): [number, number] => {
const allTimes = data.map(d => d.values.map(v => v[0])).flat().sort((a,b) => a-b);
return [allTimes[0], allTimes[allTimes.length - 1]];
};
export const getLimitsYaxis = (data: MetricResult[]): [number, number] => {
const allValues = data.map(d => d.values.map(v => +v[1])).flat().sort((a,b) => a-b);
return [allValues[0], allValues[allValues.length - 1]];
};
export const getDataChart = (data: MetricResult[], times: number[]): AlignedData => {
return [times, ...data.map(d => times.map(t => {
const v = d.values.find(v => v[0] === t);
return v ? +v[1] : null;
}))];
};

View file

@ -5,7 +5,7 @@
"type": "grafana",
"id": "grafana",
"name": "Grafana",
"version": "8.1.2"
"version": "8.2.0"
},
{
"type": "panel",
@ -59,10 +59,11 @@
},
"description": "Overview for VictoriaMetrics vmagent v1.64.0 or higher",
"editable": true,
"fiscalYearStartMonth": 0,
"gnetId": null,
"graphTooltip": 1,
"id": null,
"iteration": 1630485687361,
"iteration": 1634561115384,
"links": [
{
"icon": "doc",
@ -90,6 +91,7 @@
"url": "https://github.com/VictoriaMetrics/VictoriaMetrics/releases"
}
],
"liveNow": false,
"panels": [
{
"collapsed": false,
@ -145,7 +147,7 @@
"text": {},
"textMode": "auto"
},
"pluginVersion": "8.1.2",
"pluginVersion": "8.2.0",
"targets": [
{
"expr": "sum(vm_promscrape_targets{job=~\"$job\", instance=~\"$instance\", status=\"up\"})",
@ -209,7 +211,7 @@
"text": {},
"textMode": "auto"
},
"pluginVersion": "8.1.2",
"pluginVersion": "8.2.0",
"targets": [
{
"expr": "sum(vm_promscrape_targets{job=~\"$job\", instance=~\"$instance\", status=\"down\"})",
@ -276,7 +278,7 @@
"text": {},
"textMode": "auto"
},
"pluginVersion": "8.1.2",
"pluginVersion": "8.2.0",
"targets": [
{
"expr": "sum(increase(vm_log_messages_total{job=~\"$job\", instance=~\"$instance\", level!=\"info\"}[30m]))",
@ -335,7 +337,7 @@
"text": {},
"textMode": "auto"
},
"pluginVersion": "8.1.2",
"pluginVersion": "8.2.0",
"targets": [
{
"expr": "sum(vm_persistentqueue_bytes_pending{job=~\"$job\", instance=~\"$instance\"})",
@ -485,7 +487,7 @@
"alertThreshold": true
},
"percentage": false,
"pluginVersion": "8.1.2",
"pluginVersion": "8.2.0",
"pointradius": 2,
"points": false,
"renderer": "flot",
@ -588,7 +590,7 @@
"alertThreshold": true
},
"percentage": false,
"pluginVersion": "8.1.2",
"pluginVersion": "8.2.0",
"pointradius": 2,
"points": false,
"renderer": "flot",
@ -701,7 +703,7 @@
"alertThreshold": true
},
"percentage": false,
"pluginVersion": "8.1.2",
"pluginVersion": "8.2.0",
"pointradius": 2,
"points": false,
"renderer": "flot",
@ -801,7 +803,7 @@
"alertThreshold": true
},
"percentage": false,
"pluginVersion": "8.1.2",
"pluginVersion": "8.2.0",
"pointradius": 2,
"points": false,
"renderer": "flot",
@ -917,7 +919,7 @@
"alertThreshold": true
},
"percentage": false,
"pluginVersion": "8.1.2",
"pluginVersion": "8.2.0",
"pointradius": 2,
"points": false,
"renderer": "flot",
@ -1024,7 +1026,7 @@
"alertThreshold": true
},
"percentage": false,
"pluginVersion": "8.1.2",
"pluginVersion": "8.2.0",
"pointradius": 2,
"points": false,
"renderer": "flot",
@ -1130,7 +1132,7 @@
"alertThreshold": true
},
"percentage": false,
"pluginVersion": "8.1.2",
"pluginVersion": "8.2.0",
"pointradius": 2,
"points": false,
"renderer": "flot",
@ -1260,7 +1262,7 @@
"alertThreshold": true
},
"percentage": false,
"pluginVersion": "8.1.2",
"pluginVersion": "8.2.0",
"pointradius": 2,
"points": false,
"renderer": "flot",
@ -1367,7 +1369,7 @@
"alertThreshold": true
},
"percentage": false,
"pluginVersion": "8.1.2",
"pluginVersion": "8.2.0",
"pointradius": 2,
"points": false,
"renderer": "flot",
@ -1468,7 +1470,7 @@
"alertThreshold": true
},
"percentage": false,
"pluginVersion": "8.1.2",
"pluginVersion": "8.2.0",
"pointradius": 2,
"points": false,
"renderer": "flot",
@ -2602,7 +2604,7 @@
"h": 8,
"w": 12,
"x": 0,
"y": 4
"y": 51
},
"hiddenSeries": false,
"id": 60,
@ -2625,7 +2627,7 @@
"alertThreshold": true
},
"percentage": false,
"pluginVersion": "8.1.2",
"pluginVersion": "8.2.0",
"pointradius": 2,
"points": false,
"renderer": "flot",
@ -2703,7 +2705,7 @@
"h": 8,
"w": 12,
"x": 12,
"y": 4
"y": 51
},
"hiddenSeries": false,
"id": 66,
@ -2726,7 +2728,7 @@
"alertThreshold": true
},
"percentage": false,
"pluginVersion": "8.1.2",
"pluginVersion": "8.2.0",
"pointradius": 2,
"points": false,
"renderer": "flot",
@ -2803,7 +2805,7 @@
"h": 8,
"w": 12,
"x": 0,
"y": 12
"y": 59
},
"hiddenSeries": false,
"id": 61,
@ -2826,7 +2828,7 @@
"alertThreshold": true
},
"percentage": false,
"pluginVersion": "8.1.2",
"pluginVersion": "8.2.0",
"pointradius": 2,
"points": false,
"renderer": "flot",
@ -2903,7 +2905,7 @@
"h": 8,
"w": 12,
"x": 12,
"y": 12
"y": 59
},
"hiddenSeries": false,
"id": 65,
@ -2926,7 +2928,7 @@
"alertThreshold": true
},
"percentage": false,
"pluginVersion": "8.1.2",
"pluginVersion": "8.2.0",
"pointradius": 2,
"points": false,
"renderer": "flot",
@ -3003,7 +3005,7 @@
"h": 8,
"w": 12,
"x": 0,
"y": 20
"y": 67
},
"heatmap": {},
"hideZeroBuckets": false,
@ -3069,7 +3071,7 @@
"h": 8,
"w": 12,
"x": 12,
"y": 20
"y": 67
},
"hiddenSeries": false,
"id": 84,
@ -3092,7 +3094,7 @@
"alertThreshold": true
},
"percentage": false,
"pluginVersion": "8.1.2",
"pluginVersion": "8.2.0",
"pointradius": 2,
"points": false,
"renderer": "flot",
@ -3181,7 +3183,7 @@
"h": 8,
"w": 12,
"x": 0,
"y": 28
"y": 75
},
"heatmap": {},
"hideZeroBuckets": false,
@ -3247,7 +3249,7 @@
"h": 8,
"w": 12,
"x": 12,
"y": 28
"y": 75
},
"heatmap": {},
"hideZeroBuckets": false,
@ -3292,6 +3294,228 @@
"yBucketBound": "auto",
"yBucketNumber": null,
"yBucketSize": null
},
{
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"datasource": "$ds",
"description": "Shows the current limit usage of unique series over an hourly period. Vmagent will start to drop series once the limit is reached.\n\nPlease note, panel will be blank if `remoteWrite.maxHourlySeries` is not set.",
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
"y": 83
},
"hiddenSeries": false,
"id": 88,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": false,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"nullPointMode": "null",
"options": {
"alertThreshold": true
},
"percentage": false,
"pluginVersion": "8.2.0",
"pointradius": 2,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
"exemplar": true,
"expr": "(vmagent_hourly_series_limit_current_series{job=~\"$job\", instance=~\"$instance\"} / vmagent_hourly_series_limit_max_series{job=~\"$job\", instance=~\"$instance\"}) * 100",
"interval": "",
"legendFormat": "current limit usage",
"refId": "A"
},
{
"exemplar": true,
"expr": "vmagent_daily_series_limit_max_series{job=~\"$job\", instance=~\"$instance\"}",
"hide": true,
"interval": "",
"legendFormat": "limit",
"refId": "B"
}
],
"thresholds": [
{
"$$hashKey": "object:234",
"colorMode": "critical",
"fill": true,
"line": true,
"op": "gt",
"value": 90,
"yaxis": "left"
}
],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "Hourly series limit",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"$$hashKey": "object:216",
"format": "percent",
"label": null,
"logBase": 1,
"max": "100",
"min": null,
"show": true
},
{
"$$hashKey": "object:217",
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"datasource": "$ds",
"description": "Shows the current limit usage of unique series over a daily period. Vmagent will start to drop series once the limit is reached.\n\nPlease note, panel will be blank if `remoteWrite.maxDailySeries` is not set.",
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 83
},
"hiddenSeries": false,
"id": 90,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": false,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"nullPointMode": "null",
"options": {
"alertThreshold": true
},
"percentage": false,
"pluginVersion": "8.2.0",
"pointradius": 2,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
"exemplar": true,
"expr": "(vmagent_daily_series_limit_current_series{job=~\"$job\", instance=~\"$instance\"} / vmagent_daily_series_limit_max_series{job=~\"$job\", instance=~\"$instance\"}) * 100",
"interval": "",
"legendFormat": "current limit usage",
"refId": "A"
},
{
"exemplar": true,
"expr": "vmagent_daily_series_limit_max_series{job=~\"$job\", instance=~\"$instance\"}",
"hide": true,
"interval": "",
"legendFormat": "limit",
"refId": "B"
}
],
"thresholds": [
{
"$$hashKey": "object:234",
"colorMode": "critical",
"fill": true,
"line": true,
"op": "gt",
"value": 90,
"yaxis": "left"
}
],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "Daily series limit",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"$$hashKey": "object:216",
"format": "percent",
"label": null,
"logBase": 1,
"max": "100",
"min": null,
"show": true
},
{
"$$hashKey": "object:217",
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
}
],
"title": "Remote write",
@ -4073,7 +4297,7 @@
}
],
"refresh": false,
"schemaVersion": 30,
"schemaVersion": 31,
"style": "dark",
"tags": [
"vmagent",
@ -4199,5 +4423,5 @@
"timezone": "",
"title": "vmagent",
"uid": "G7Z9GzMGz",
"version": 1
"version": 3
}

View file

@ -261,3 +261,22 @@ groups:
This usually means that `-remoteWrite.queues` command-line flag must be increased in order to increase
the number of connections per each remote storage."
- alert: SeriesLimitHourReached
expr: (vmagent_hourly_series_limit_current_series / vmagent_hourly_series_limit_max_series) > 0.9
labels:
severity: critical
annotations:
dashboard: "http://localhost:3000/d/G7Z9GzMGz?viewPanel=88&var-instance={{ $labels.instance }}"
summary: "Instance {{ $labels.instance }} reached 90% of the limit"
description: "Max series limit set via -remoteWrite.maxHourlySeries flag is close to reaching the max value.
Then samples for new time series will be dropped instead of sending them to remote storage systems."
- alert: SeriesLimitDayReached
expr: (vmagent_daily_series_limit_current_series / vmagent_daily_series_limit_max_series) > 0.9
labels:
severity: critical
annotations:
dashboard: "http://localhost:3000/d/G7Z9GzMGz?viewPanel=90&var-instance={{ $labels.instance }}"
summary: "Instance {{ $labels.instance }} reached 90% of the limit"
description: "Max series limit set via -remoteWrite.maxDailySeries flag is close to reaching the max value.
Then samples for new time series will be dropped instead of sending them to remote storage systems."

View file

@ -39,7 +39,7 @@ services:
restart: always
grafana:
container_name: grafana
image: grafana/grafana:8.1.2
image: grafana/grafana:8.2.2
depends_on:
- "victoriametrics"
ports:

View file

@ -80,6 +80,7 @@ See also [case studies](https://docs.victoriametrics.com/CaseStudies.html).
* [Speeding up backups for big time series databases](https://valyala.medium.com/speeding-up-backups-for-big-time-series-databases-533c1a927883)
* [Improving histogram usability for Prometheus and Grafana](https://valyala.medium.com/improving-histogram-usability-for-prometheus-and-grafana-bc7e5df0e350)
* [Why irate from Prometheus doesn't capture spikes](https://valyala.medium.com/why-irate-from-prometheus-doesnt-capture-spikes-45f9896d7832)
* [VictoriaMetrics: PromQL compliance](https://medium.com/@romanhavronenko/victoriametrics-promql-compliance-d4318203f51e)
### Tutorials, guides and how-to articles

View file

@ -7,6 +7,33 @@ sort: 15
## tip
## [v1.68.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.68.0)
* FEATURE: vmagent: expose `-promscrape.config` contents at `/config` page as Prometheus does. See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1695).
* FEATURE: vmagent: add `show original labels` button per each scrape target displayed at `http://vmagent;8429/targets` page. This should improve debuggability for service discovery and relabeling issues similar to [this one](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1664). See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1698).
* FEATURE: vmagent: shard targets among cluster nodes after the relabeling is applied. This should guarantee that targets with the same set of labels go to the same `vmagent` node in the cluster. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1687).
* FEATURE: vmagent: atomatically switch to [stream parsing mode](https://docs.victoriametrics.com/vmagent.html#stream-parsing-mode) if the response from the given target exceeds the command-line flag value `-promscrape.minResponseSizeForStreamParse`. This should reduce memory usage when `vmagent` scrapes targets with non-uniform response sizes (this is the case in Kubernetes monitoring).
* FEATURE: vmagent: send Prometheus-like staleness marks in [stream parsing mode](https://docs.victoriametrics.com/vmagent.html#stream-parsing-mode). Previously staleness marks wern't sent in stream parsing mode. See [these docs](https://docs.victoriametrics.com/vmagent.html#prometheus-staleness-markers) for details.
* FEATURE: vmagent: properly calculate `scrape_series_added` metric for targets in [stream parsing mode](https://docs.victoriametrics.com/vmagent.html#stream-parsing-mode). Previously it was set to 0 in stream parsing mode. See [more details about this metric](https://prometheus.io/docs/concepts/jobs_instances/#automatically-generated-labels-and-time-series).
* FEATURE: vmagent: expose `promscrape_series_limit_max_series` and `promscrape_series_limit_current_series` metrics at `http://vmagent:8429/metrics` for scrape targets with the [enabled series limiter](https://docs.victoriametrics.com/vmagent.html#cardinality-limiter).
* FEATURE: vmagent: return error if `sample_limit` or `series_limit` options are set when [stream parsing mode](https://docs.victoriametrics.com/vmagent.html#stream-parsing-mode) is enabled, since these limits cannot be applied in stream parsing mode.
* FEATURE: vmalert: add `-remoteRead.disablePathAppend` command-line flag, which allows specifying the full `-remoteRead.url`. If `-remoteRead.disablePathAppend` is set, then `vmalert` doesn't add `/api/v1/query` suffix to `-remoteRead.url`.
* FEATURE: add trigonometric functions, which are going to be added in [Prometheus 2.31](https://github.com/prometheus/prometheus/pull/9239): [acosh](https://docs.victoriametrics.com/MetricsQL.html#acosh), [asinh](https://docs.victoriametrics.com/MetricsQL.html#asinh), [atan](https://docs.victoriametrics.com/MetricsQL.html#atan), [atanh](https://docs.victoriametrics.com/MetricsQL.html#atanh), [cosh](https://docs.victoriametrics.com/MetricsQL.html#cosh), [deg](https://docs.victoriametrics.com/MetricsQL.html#deg), [rad](https://docs.victoriametrics.com/MetricsQL.html#rad), [sinh](https://docs.victoriametrics.com/MetricsQL.html#sinh), [tan](https://docs.victoriametrics.com/MetricsQL.html#tan), [tanh](https://docs.victoriametrics.com/MetricsQL.html#tanh). Also add `atan2` binary operator. See [this pull request](https://github.com/prometheus/prometheus/pull/9248).
* FEATURE: consistently return the same set of time series from [limitk](https://docs.victoriametrics.com/MetricsQL.html#limitk) function. This improves the usability of periodically refreshed graphs.
* FEATURE: [vmui](https://docs.victoriametrics.com/#vmui): varios UX improvements. See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/1711) and [these docs](https://docs.victoriametrics.com/#vmui).
* FEATURE: [vmauth](https://docs.victoriametrics.com/vmauth.html): add ability to specify HTTP headers, which will be sent in requests to backends. See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1736).
* FEATURE: add `/flags` page to all the VictoriaMetrics components. This page contains command-line flags passed to the component.
* FEATURE: allow using tab separators additionally to whitespace separators when [ingesting data in Graphite plaintext protocol](https://docs.victoriametrics.com/#how-to-send-data-from-graphite-compatible-agents-such-as-statsd). Such separators are [supported by Carbon-c-relay](https://github.com/grobian/carbon-c-relay/commit/f3ffe6cc2b52b07d14acbda649ad3fd6babdd528).
* BUGFIX: vmstorage: fix `unaligned 64-bit atomic operation` panic on 32-bit architectures (arm and 386). The panic has been introduced in v1.67.0.
* BUGFIX: vmalert, vmauth: prevent from frequent closing of TCP connections established to backends under high load. This should reduce the number of TCP sockets in `TIME_WAIT` state at `vmalert` and `vmauth` under high load. See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/1704).
* BUGFIX: vmalert: correctly calculate alert ID including extra labels. Previously, ID for alert entity was generated without alertname or groupname. This led to collision, when multiple alerting rules within the same group producing same labelsets. E.g. expr: `sum(metric1) by (job) > 0` and expr: `sum(metric2) by (job) > 0` could result into same labelset `job: "job"`. The bugfix adds all extra labels right after receiving response from the datasource. See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/1734).
* BUGFIX: vmalert: fix links in [Web UI](https://docs.victoriametrics.com/vmalert.html#web). See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/1717).
* BUGFIX: vmagent: set `honor_timestamps: true` by default in [scrape configs](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config) if this options isn't set explicitly. This aligns the behaviour with Prometheus.
* BUGFIX: vmagent: group scrape targets by the original job names at `http://vmagent:8429/targets` page like Prometheus does. Previously they were grouped by the job name after relabeling, which may result in unexpected empty target groups. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1707).
* BUGFIX: [vmctl](https://docs.victoriametrics.com/vmctl.html): fix importing boolean fields from InfluxDB line protocol. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1709).
## [v1.67.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.67.0)
* FEATURE: add ability to accept metrics from [DataDog agent](https://docs.datadoghq.com/agent/) and [DogStatsD](https://docs.datadoghq.com/developers/dogstatsd/). See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-send-data-from-datadog-agent). This option simplifies the migration path from DataDog to VictoriaMetrics. See also [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/206).

View file

@ -7,6 +7,7 @@ sort: 11
Below please find public case studies and talks from VictoriaMetrics users. You can also join our [community Slack channel](https://slack.victoriametrics.com/)
where you can chat with VictoriaMetrics users to get additional references, reviews and case studies.
* [AbiosGaming](#aboisgaming)
* [adidas](#adidas)
* [Adsterra](#adsterra)
* [ARNES](#arnes)
@ -14,12 +15,16 @@ where you can chat with VictoriaMetrics users to get additional references, revi
* [CERN](#cern)
* [COLOPL](#colopl)
* [Dreamteam](#dreamteam)
* [Fly.io](#flyio)
* [German Research Center for Artificial Intelligence](#german-research-center-for-artificial-intelligence)
* [Grammarly](#grammarly)
* [Groove X](#groove-x)
* [Idealo.de](#idealode)
* [MHI Vestas Offshore Wind](#mhi-vestas-offshore-wind)
* [Percona](#percona)
* [Razorpay](#razorpay)
* [Sensedia](#sensedia)
* [Smarkets](#smarkets)
* [Synthesio](#synthesio)
* [Wedos.com](#wedoscom)
* [Wix.com](#wixcom)
@ -29,6 +34,25 @@ where you can chat with VictoriaMetrics users to get additional references, revi
You can also read [articles about VictoriaMetrics from our users](https://docs.victoriametrics.com/Articles.html#third-party-articles-and-slides-about-victoriametrics).
## AbiosGaming
[AbiosGaming](https://abiosgaming.com/) provides industry leading esports data and technology across the globe.
> At Abios, we are running Grafana and Prometheus for our operational insights. We are collecting all sorts of operational metrics such as request latency, active WebSocket connections, and cache statistics to determine if things are working as we expect them to.
> Prometheus explicitly recommends their users not to use high cardinality labels for their time-series data, which is exactly what we want to do. Prometheus is thus a poor solution to keep using. However, since we were already using Prometheus, we needed an alternative solution to be fully compatible with the Prometheus query language.
> The options we decided to try were TimescaleDB together with Promscale to act as a remote write intermediary and VictoriaMetrics. In both cases we still used Prometheus Operator to launch Prometheus instances to scrape metrics and send them to the respective storage layers.
> The biggest difference for our day-to-day operation is perhaps that VictoriaMetrics does not have a Write-Ahead log. The WAL has caused us trouble when Prometheus has experienced issues and starts to run out of RAM when replaying the WAL, thus entering a crash-loop.
> All in all, we are quite impressed with VictoriaMetrics. Not only is the core time-series database well designed, easy to deploy and operate, and performant but the entire ecosystem around it seems to have been given an equal amount of love. There are utilities for things such as taking snapshots (backups) and storing to S3 (and reloading from S3), a Kubernetes Operator, and authentication proxies. It also provides a cluster deployment option if we were to scale up to those numbers.
> From a usability point of view, VictoriaMetrics is the clear winner. Neither Prometheus nor TimescaleDB managed to do any kind of aggregations on our high cardinality metrics, whereas VictoriaMetrics does.
See [the full article](https://abiosgaming.com/press/high-cardinality-aggregations/).
## adidas
See our [slides](https://promcon.io/2019-munich/slides/remote-write-storage-wars.pdf) and [video](https://youtu.be/OsH6gPdxR4s)
@ -224,6 +248,19 @@ VictoriaMetrics in production environment runs on 2 M5 EC2 instances in "HA" mod
2 Prometheus instances are writing to both VMs, with 2 [Promxy](https://github.com/jacksontj/promxy) replicas
as the load balancer for reads.
## Fly.io
[Fly.io](https://fly.io/about/) is a platform for running full stack apps and databases close to your users.
> Victoria Metrics (“Vicky”), in a clustered configuration, is our metrics database. We run a cluster of fairly big Vicky hosts.
> Like everyone else, we started with a simple Prometheus server. That worked until it didnt. We spent some time scaling it with Thanos, and Thanos was a lot, as far as ops hassle goes. Wed dabbled with Vicky just as a long-term storage engine for vanilla Prometheus, with promxy set up to deduplicate metrics.
> Vicky grew into a more ambitious offering, and added its own Prometheus scraper; we adopted it and scaled it as far as we reasonably could in a single-node configuration. Scaling requirements ultimately pushed us into a clustered deployment; we run an HA cluster (fronted by haproxy). Current Vicky has a really straightforward multi-tenant API — its easy to namespace metrics for customers — and it chugs along for us without too much minding.
See [the full post](https://fly.io/blog/measuring-fly/).
## German Research Center for Artificial Intelligence
[German Research Center for Artificial Intelligence](https://en.wikipedia.org/wiki/German_Research_Centre_for_Artificial_Intelligence) (DFKI) is one of the world's largest nonprofit contract research institutes for software technology based on artificial intelligence (AI) methods. DFKI was founded in 1988, and has facilities in the German cities of Kaiserslautern, Saarbrücken, Bremen and Berlin.
@ -352,6 +389,34 @@ Numbers with current, limited roll out:
- Retention period: 3 years
## Percona
[Percona](https://www.percona.com/) is a leader in providing best-of-breed enterprise-class support, consulting, managed services, training and software for MySQL®, MariaDB®, MongoDB®, PostgreSQL® and other open source databases in on-premises and cloud environments.
Percona migrated from Prometheus to VictoriaMetrics in the [Percona Monitoring and Management](https://www.percona.com/software/database-tools/percona-monitoring-and-management) product. This allowed [reducing resource usage](https://www.percona.com/blog/2020/12/23/observations-on-better-resource-usage-with-percona-monitoring-and-management-v2-12-0/) and [getting rid of complex firewall setup](https://www.percona.com/blog/2020/12/01/foiled-by-the-firewall-a-tale-of-transition-from-prometheus-to-victoriametrics/), while [improving user experience](https://www.percona.com/blog/2020/02/28/better-prometheus-rate-function-with-victoriametrics/).
## Razorpay
[Razorpay](https://razorpay.com/) aims to revolutionize money management for online businesses by providing clean, developer-friendly APIs and hassle-free integration.
> As a fintech organization, we move billions of dollars every month. Our customers and merchants have entrusted us with a paramount responsibility. To handle our ever-growing business, building a robust observability stack is not just “nice to have”, but absolutely essential. And all of this starts with better monitoring and metrics.
> We executed a variety of POCs on various solutions and finally arrived at the following technologies: M3DB, Thanos, Cortex and VictoriaMetrics. The clear winner was VictoriaMetrics.
> The following are some of the basic observations we derived from Victoria Metrics:
> * Simple components, each horizontally scalable.
> * Clear separation between writes and reads.
> * Runs from default configurations, with no extra frills.
> * Default retention starts with 1 month
> * Storage, ingestion, and reads can be easily scaled.
> * High Compression store ~ 70% more compression.
> * Currently running in production with commodity hardware with a good mix of spot instances.
> * Successfully ran some of the worst Grafana dashboards/queries that have historically failed to run.
See [the full article](https://engineering.razorpay.com/scaling-to-trillions-of-metric-data-points-f569a5b654f2).
## Sensedia
[Sensedia](https://www.sensedia.com) is a leading integration solutions provider with more than 120 enterprise clients across a range of sectors. Its world-class portfolio includes: an API Management Platform, Adaptive Governance, Events Hub, Service Mesh, Cloud Connectors and Strategic Professional Services' teams.
@ -377,6 +442,22 @@ Numbers:
- Query response time (99th percentile): 500ms
## Smarkets
[Smarkets](https://smarkets.com/) simplifies peer-to-peer trading on sporting and political events.
> We always wanted our developers to have out-of-the-box monitoring available for any application or service. Before we adopted Kubernetes this was achieved either with Prometheus metrics, or with statsd being sent over to the underlying host and then converted into Prometheus metrics. As we expanded our Kubernetes adoption and started to split clusters, we also wanted developers to be able to expose metrics directly to Prometheus by annotating services. Those metrics were then only available inside the cluster so they couldnt be scraped globally.
> We considered three different solutions to improve our architecture:
> * Prometheus + Cortex
> * Prometheus + Thanos Receive
> * Prometheus + Victoria Metrics
> We selected Victoria Metrics. Our new architecture has been very stable since it was put into production. With the previous setup we would have had two or three cardinality explosions in a two-week period, with this new one we have none.
See [the full article](https://smarketshq.com/monitoring-kubernetes-clusters-41a4b24c19e3).
## Synthesio
[Synthesio](https://www.synthesio.com/) is the leading social intelligence tool for social media monitoring and analytics.

View file

@ -273,10 +273,12 @@ If old time series are constantly substituted by new time series at a high rate,
* Increased size of inverted index, which is stored at `<-storageDataPath>/indexdb`, since the inverted index contains entries for every label of every time series with at least a single ingested sample
* Slow down of queries over multiple days.
The solution against high churn rate is to identify and eliminate labels with frequently changed values. The [/api/v1/status/tsdb](https://docs.victoriametrics.com/#tsdb-stats) page can help determining these labels.
## What is high cardinality?
High cardinality usually means high number of [active time series](#what-is-active-time-series). High cardinality may lead to high memory usage and/or to high percentage of [slow inserts](#what-is-slow-insert). The source of high cardinality is usually a label with big number of unique values, which presents in big share of the ingested time series. The solution is to identify and remove the source of high cardinality with the help of `/api/v1/status/tsdb` page - see [these docs](https://docs.victoriametrics.com/#tsdb-stats).
High cardinality usually means high number of [active time series](#what-is-active-time-series). High cardinality may lead to high memory usage and/or to high percentage of [slow inserts](#what-is-slow-insert). The source of high cardinality is usually a label with big number of unique values, which presents in big share of the ingested time series. The solution is to identify and remove the source of high cardinality with the help of [/api/v1/status/tsdb](https://docs.victoriametrics.com/#tsdb-stats).
## What is slow insert?
@ -284,6 +286,11 @@ High cardinality usually means high number of [active time series](#what-is-acti
VictoriaMetrics maintains in-memory cache for mapping of [active time series](#what-is-active-time-series) into internal series ids. The cache size depends on the available memory for VictoriaMetrics in the host system. If the information about all the active time series doesn't fit the cache, then VictoriaMetrics needs to read and unpack the information from disk on every incoming sample for time series missing in the cache. This operation is much slower than the cache lookup, so such insert is named `slow insert`. High percentage of slow inserts on the [official dashboard for VictoriaMetrics](https://docs.victoriametrics.com/#monitoring) indicates on memory shortage for the current number of [active time series](#what-is-active-time-series). Such a condition usually leads to significant slowdown for data ingestion, to significantly increased disk IO and CPU usage. The solution is to add more memory or to reduce the number of [active time series](#what-is-active-time-series). The `/api/v1/status/tsdb` page can be helpful for locating the source of high number of active time seriess - see [these docs](https://docs.victoriametrics.com/#tsdb-stats).
## Why MetricsQL isn't 100% compatible with PromQL?
[MetricsQL](https://docs.victoriametrics.com/MetricsQL.html) provides better user experience than PromQL. It fixes a few annoying issues in PromQL. This prevents MetricsQL to be 100% compatible with PromQL. See [this article](https://medium.com/@romanhavronenko/victoriametrics-promql-compliance-d4318203f51e) for details.
## How to migrate data from Prometheus to VictoriaMetrics?
Please see [these docs](https://docs.victoriametrics.com/vmctl.html#migrating-data-from-prometheus).

View file

@ -6,6 +6,8 @@ sort: 13
[VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics) implements MetricsQL - query language inspired by [PromQL](https://prometheus.io/docs/prometheus/latest/querying/basics/).
MetricsQL is backwards-compatible with PromQL, so Grafana dashboards backed by Prometheus datasource should work the same after switching from Prometheus to VictoriaMetrics.
However, there are some [intentional differences](https://medium.com/@romanhavronenko/victoriametrics-promql-compliance-d4318203f51e) between these two languages.
[Standalone MetricsQL package](https://godoc.org/github.com/VictoriaMetrics/metricsql) can be used for parsing MetricsQL in external apps.
If you are unfamiliar with PromQL, then it is suggested reading [this tutorial for beginners](https://medium.com/@valyala/promql-tutorial-for-beginners-9ab455142085).
@ -18,6 +20,8 @@ The following functionality is implemented differently in MetricsQL compared to
* MetricsQL removes all the `NaN` values from the output, so some queries like `(-1)^0.5` return empty results in VictoriaMetrics, while returning a series of `NaN` values in Prometheus. Note that Grafana doesn't draw any lines or dots for `NaN` values, so the end result looks the same for both VictoriaMetrics and Prometheus.
* MetricsQL keeps metric names after applying functions, which don't change the meaining of the original time series. For example, [min_over_time(foo)](#min_over_time) or [round(foo)](#round) leaves `foo` metric name in the result. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/674) for details.
Read more about the diffferences between PromQL and MetricsQL in [this article](https://medium.com/@romanhavronenko/victoriametrics-promql-compliance-d4318203f51e).
Other PromQL functionality should work the same in MetricsQL. [File an issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues) if you notice discrepancies between PromQL and MetricsQL results other than mentioned above.
## MetricsQL features
@ -349,11 +353,27 @@ See also [implicit query conversions](#implicit-query-conversions).
#### acos
`acos(q)` returns `arccos(v)` for every `v` point of every time series returned by `q`. Metric names are stripped from the resulting series. See also [asin](#asin) and [cos](#cos).
`acos(q)` returns [inverse cosine](https://en.wikipedia.org/wiki/Inverse_trigonometric_functions) for every point of every time series returned by `q`. Metric names are stripped from the resulting series. This function is supported by PromQL. See also [asin](#asin) and [cos](#cos).
#### acosh
`acosh(q)` returns [inverse hyperbolic cosine](https://en.wikipedia.org/wiki/Inverse_hyperbolic_functions#Inverse_hyperbolic_cosine) for every point of every time series returned by `q`. Metric names are stripped from the resulting series. This function is supported by PromQL. See also [sinh](#cosh).
#### asin
`asin(q)` returns `arcsin(v)` for every `v` point of every time series returned by `q`. Metric names are stripped from the resulting series. See also [acos](#acos) and [sin](#sin).
`asin(q)` returns [inverse sine](https://en.wikipedia.org/wiki/Inverse_trigonometric_functions) for every point of every time series returned by `q`. Metric names are stripped from the resulting series. This function is supported by PromQL. See also [acos](#acos) and [sin](#sin).
#### asinh
`asinh(q)` returns [inverse hyperbolic sine](https://en.wikipedia.org/wiki/Inverse_hyperbolic_functions#Inverse_hyperbolic_sine) for every point of every time series returned by `q`. Metric names are stripped from the resulting series. This function is supported by PromQL. See also [sinh](#sinh).
#### atan
`atan(q)` returns [inverse tangent](https://en.wikipedia.org/wiki/Inverse_trigonometric_functions) for every point of every time series returned by `q`. Metric names are stripped from the resulting series. This function is supported by PromQL. See also [tan](#tan).
#### atanh
`atanh(q)` returns [inverse hyperbolic tangent](https://en.wikipedia.org/wiki/Inverse_hyperbolic_functions#Inverse_hyperbolic_tangent) for every point of every time series returned by `q`. Metric names are stripped from the resulting series. This function is supported by PromQL. See also [tanh](#tanh).
#### bitmap_and
@ -389,7 +409,11 @@ See also [implicit query conversions](#implicit-query-conversions).
#### cos
`cos(q)` returns `cos(v)` for every `v` point of every time series returned by `q`. Metric names are stripped from the resulting series. See also [sin](#sin).
`cos(q)` returns `cos(v)` for every `v` point of every time series returned by `q`. Metric names are stripped from the resulting series. This function is supported by PromQL. See also [sin](#sin).
#### cosh
`cosh(q)` returns [hyperbolic cosine](https://en.wikipedia.org/wiki/Hyperbolic_functions) for every point of every time series returned by `q`. Metric names are stripped from the resulting series. This function is supported by PromQL. This function is supported by PromQL. See also [acosh](#acosh).
#### day_of_month
@ -403,6 +427,10 @@ See also [implicit query conversions](#implicit-query-conversions).
`days_in_month(q)` returns the number of days in the month identified by every point of every time series returned by `q`. It is expected that `q` returns unix timestamps. The returned values are in the range `[28...31]`. Metric names are stripped from the resulting series. This function is supported by PromQL.
#### deg
`deg(q)` converts [Radians to degrees](https://en.wikipedia.org/wiki/Radian#Conversions) for every point of every time series returned by `q`. Metric names are stripped from the resulting series. This function is supported by PromQL. See also [rad](#rad).
#### end
`end()` returns the unix timestamp in seconds for the last point. See also [start](#start). It is known as `end` query arg passed to [/api/v1/query_range](https://prometheus.io/docs/prometheus/latest/querying/api/#range-queries).
@ -477,7 +505,12 @@ See also [implicit query conversions](#implicit-query-conversions).
#### pi
`pi()` returns [Pi number](https://en.wikipedia.org/wiki/Pi).
`pi()` returns [Pi number](https://en.wikipedia.org/wiki/Pi). This function is supported by PromQL.
#### rad
`rad(q)` converts [degrees to Radians](https://en.wikipedia.org/wiki/Radian#Conversions) for every point of every time series returned by `q`. Metric names are stripped from the resulting series. This function is supported by PromQL. See also [deg](#deg).
#### prometheus_buckets
@ -565,7 +598,19 @@ See also [implicit query conversions](#implicit-query-conversions).
#### sin
`sin(q)` returns `sin(v)` for every `v` point of every time series returned by `q`. Metric names are stripped from the resulting series. See also [cos](#cos).
`sin(q)` returns `sin(v)` for every `v` point of every time series returned by `q`. Metric names are stripped from the resulting series. This function is supported by MetricsQL. See also [cos](#cos).
#### sinh
`sinh(q)` returns [hyperbolic sine](https://en.wikipedia.org/wiki/Hyperbolic_functions) for every point of every time series returned by `q`. Metric names are stripped from the resulting series. This function is supported by MetricsQL. See also [cosh](#cosh).
#### tan
`tan(q)` returns `tan(v)` for every `v` point of every time series returned by `q`. Metric names are stripped from the resulting series. This function is supported by MetricsQL. See also [atan](#atan).
#### tanh
`tanh(q)` returns [hyperbolic tangent](https://en.wikipedia.org/wiki/Hyperbolic_functions) for every point of every time series returned by `q`. Metric names are stripped from the resulting series. This function is supported by MetricsQL. See also [atanh](#atanh).
#### smooth_exponential
@ -613,7 +658,7 @@ See also [implicit query conversions](#implicit-query-conversions).
#### union
`union(q1, ..., qN)` returns a union of time series returned from `q1`, ..., `qN`. The `union` function name can be skipped - the following queries are quivalent: `union(q1, q2)` and `(q1, q2)`.
`union(q1, ..., qN)` returns a union of time series returned from `q1`, ..., `qN`. The `union` function name can be skipped - the following queries are quivalent: `union(q1, q2)` and `(q1, q2)`. It is expected that each `q*` query returns time series with unique sets of labels. Otherwise only the first time series out of series with identical set of labels is returned. Use [alias](#alias) and [label_set](#label_set) functions for giving unique labelsets per each `q*` query:
#### vector
@ -762,7 +807,7 @@ See also [implicit query conversions](#implicit-query-conversions).
#### limitk
`limitk(k, q) by (group_labels)` returns up to `k` time series per each `group_labels` out of time series returned by `q`. The returned set of time series can change with each call.
`limitk(k, q) by (group_labels)` returns up to `k` time series per each `group_labels` out of time series returned by `q`. The returned set of time series remain the same across calls.
#### mad

View file

@ -28,6 +28,7 @@ Enterprise binaries can be downloaded and evaluated for free from [the releases
Case studies:
* [AbiosGaming](https://docs.victoriametrics.com/CaseStudies.html#abiosgaming)
* [adidas](https://docs.victoriametrics.com/CaseStudies.html#adidas)
* [Adsterra](https://docs.victoriametrics.com/CaseStudies.html#adsterra)
* [ARNES](https://docs.victoriametrics.com/CaseStudies.html#arnes)
@ -35,12 +36,16 @@ Case studies:
* [CERN](https://docs.victoriametrics.com/CaseStudies.html#cern)
* [COLOPL](https://docs.victoriametrics.com/CaseStudies.html#colopl)
* [Dreamteam](https://docs.victoriametrics.com/CaseStudies.html#dreamteam)
* [Fly.io](https://docs.victoriametrics.com/CaseStudies.html#flyio)
* [German Research Center for Artificial Intelligence](https://docs.victoriametrics.com/CaseStudies.html#german-research-center-for-artificial-intelligence)
* [Grammarly](https://docs.victoriametrics.com/CaseStudies.html#grammarly)
* [Groove X](https://docs.victoriametrics.com/CaseStudies.html#groove-x)
* [Idealo.de](https://docs.victoriametrics.com/CaseStudies.html#idealode)
* [MHI Vestas Offshore Wind](https://docs.victoriametrics.com/CaseStudies.html#mhi-vestas-offshore-wind)
* [Razorpay](https://docs.victoriametrics.com/CaseStudies.html#razorpay)
* [Percona](https://docs.victoriametrics.com/CaseStudies.html#percona)
* [Sensedia](https://docs.victoriametrics.com/CaseStudies.html#sensedia)
* [Smarkets](https://docs.victoriametrics.com/CaseStudies.html#smarkets)
* [Synthesio](https://docs.victoriametrics.com/CaseStudies.html#synthesio)
* [Wedos.com](https://docs.victoriametrics.com/CaseStudies.html#wedoscom)
* [Wix.com](https://docs.victoriametrics.com/CaseStudies.html#wixcom)
@ -604,6 +609,12 @@ The UI allows exploring query results via graphs and tables. Graphs support scro
* Drag the graph to the left / right in order to move the displayed time range into the past / future.
* Hold `Ctrl` (or `Cmd` on MacOS) and scroll up / down in order to zoom in / out the graph.
Query history can be navigated by holding `Ctrl` (or `Cmd` on MacOS) and pressing `up` or `down` arrows on the keyboard while the cursor is located in the query input field.
When querying the [backfilled data](https://docs.victoriametrics.com/#backfilling), it may be useful disabling response cache by clicking `Enable cache` checkbox.
See the [example VMUI at VictoriaMetrics playground](https://play.victoriametrics.com/select/accounting/1/6a716b0f-38bc-4856-90ce-448fd713e3fe/prometheus/graph/?g0.expr=100%20*%20sum(rate(process_cpu_seconds_total))%20by%20(job)&g0.range_input=1d).
## How to build from sources
@ -1540,6 +1551,9 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
The maximum number of CPU cores to use for big merges. Default value is used if set to 0
-csvTrimTimestamp duration
Trim timestamps when importing csv data to this duration. Minimum practical duration is 1ms. Higher duration (i.e. 1s) may be used for reducing disk space usage for timestamp data (default 1ms)
-datadog.maxInsertRequestSize size
The maximum size in bytes of a single DataDog POST request to /api/v1/series
Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 67108864)
-dedup.minScrapeInterval duration
Leave only the first sample in every time series per each discrete interval equal to -dedup.minScrapeInterval > 0. See https://docs.victoriametrics.com/#deduplication for details
-deleteAuthKey string
@ -1705,8 +1719,11 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
-promscrape.maxScrapeSize size
The maximum size of scrape response in bytes to process from Prometheus targets. Bigger responses are rejected
Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 16777216)
-promscrape.minResponseSizeForStreamParse size
The minimum target response size for automatic switching to stream parsing mode, which can reduce memory usage. See https://docs.victoriametrics.com/vmagent.html#stream-parsing-mode
Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 1000000)
-promscrape.noStaleMarkers
Whether to disable sending Prometheus stale markers for metrics when scrape target disappears. This option may reduce memory usage if stale markers aren't needed for your setup. See also https://docs.victoriametrics.com/vmagent.html#stream-parsing-mode
Whether to disable sending Prometheus stale markers for metrics when scrape target disappears. This option may reduce memory usage if stale markers aren't needed for your setup. This option also disables populating the scrape_series_added metric. See https://prometheus.io/docs/concepts/jobs_instances/#automatically-generated-labels-and-time-series
-promscrape.openstackSDCheckInterval duration
Interval for checking for changes in openstack API server. This works only if openstack_sd_configs is configured in '-promscrape.config' file. See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#openstack_sd_config for details (default 30s)
-promscrape.seriesLimitPerTarget int
@ -1718,7 +1735,7 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
-promscrape.suppressScrapeErrors
Whether to suppress scrape errors logging. The last error for each target is always available at '/targets' page even if scrape errors logging is suppressed
-relabelConfig string
Optional path to a file with relabeling rules, which are applied to all the ingested metrics. See https://docs.victoriametrics.com/#relabeling for details
Optional path to a file with relabeling rules, which are applied to all the ingested metrics. See https://docs.victoriametrics.com/#relabeling for details. The config is reloaded on SIGHUP signal
-relabelDebug
Whether to log metrics before and after relabeling with -relabelConfig. If the -relabelDebug is enabled, then the metrics aren't sent to storage. This is useful for debugging the relabeling configs
-retentionPeriod value
@ -1795,6 +1812,9 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
The maximum number of unique series can be added to the storage during the last 24 hours. Excess series are logged and dropped. This can be useful for limiting series churn rate. See also -storage.maxHourlySeries
-storage.maxHourlySeries int
The maximum number of unique series can be added to the storage during the last hour. Excess series are logged and dropped. This can be useful for limiting series cardinality. See also -storage.maxDailySeries
-storage.minFreeDiskSpaceBytes size
The minimum free disk space at -storageDataPath after which the storage stops accepting new data
Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 10000000)
-storageDataPath string
Path to storage data (default "victoria-metrics-data")
-tls

View file

@ -32,6 +32,7 @@ Enterprise binaries can be downloaded and evaluated for free from [the releases
Case studies:
* [AbiosGaming](https://docs.victoriametrics.com/CaseStudies.html#abiosgaming)
* [adidas](https://docs.victoriametrics.com/CaseStudies.html#adidas)
* [Adsterra](https://docs.victoriametrics.com/CaseStudies.html#adsterra)
* [ARNES](https://docs.victoriametrics.com/CaseStudies.html#arnes)
@ -39,12 +40,16 @@ Case studies:
* [CERN](https://docs.victoriametrics.com/CaseStudies.html#cern)
* [COLOPL](https://docs.victoriametrics.com/CaseStudies.html#colopl)
* [Dreamteam](https://docs.victoriametrics.com/CaseStudies.html#dreamteam)
* [Fly.io](https://docs.victoriametrics.com/CaseStudies.html#flyio)
* [German Research Center for Artificial Intelligence](https://docs.victoriametrics.com/CaseStudies.html#german-research-center-for-artificial-intelligence)
* [Grammarly](https://docs.victoriametrics.com/CaseStudies.html#grammarly)
* [Groove X](https://docs.victoriametrics.com/CaseStudies.html#groove-x)
* [Idealo.de](https://docs.victoriametrics.com/CaseStudies.html#idealode)
* [MHI Vestas Offshore Wind](https://docs.victoriametrics.com/CaseStudies.html#mhi-vestas-offshore-wind)
* [Razorpay](https://docs.victoriametrics.com/CaseStudies.html#razorpay)
* [Percona](https://docs.victoriametrics.com/CaseStudies.html#percona)
* [Sensedia](https://docs.victoriametrics.com/CaseStudies.html#sensedia)
* [Smarkets](https://docs.victoriametrics.com/CaseStudies.html#smarkets)
* [Synthesio](https://docs.victoriametrics.com/CaseStudies.html#synthesio)
* [Wedos.com](https://docs.victoriametrics.com/CaseStudies.html#wedoscom)
* [Wix.com](https://docs.victoriametrics.com/CaseStudies.html#wixcom)
@ -608,6 +613,12 @@ The UI allows exploring query results via graphs and tables. Graphs support scro
* Drag the graph to the left / right in order to move the displayed time range into the past / future.
* Hold `Ctrl` (or `Cmd` on MacOS) and scroll up / down in order to zoom in / out the graph.
Query history can be navigated by holding `Ctrl` (or `Cmd` on MacOS) and pressing `up` or `down` arrows on the keyboard while the cursor is located in the query input field.
When querying the [backfilled data](https://docs.victoriametrics.com/#backfilling), it may be useful disabling response cache by clicking `Enable cache` checkbox.
See the [example VMUI at VictoriaMetrics playground](https://play.victoriametrics.com/select/accounting/1/6a716b0f-38bc-4856-90ce-448fd713e3fe/prometheus/graph/?g0.expr=100%20*%20sum(rate(process_cpu_seconds_total))%20by%20(job)&g0.range_input=1d).
## How to build from sources
@ -1544,6 +1555,9 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
The maximum number of CPU cores to use for big merges. Default value is used if set to 0
-csvTrimTimestamp duration
Trim timestamps when importing csv data to this duration. Minimum practical duration is 1ms. Higher duration (i.e. 1s) may be used for reducing disk space usage for timestamp data (default 1ms)
-datadog.maxInsertRequestSize size
The maximum size in bytes of a single DataDog POST request to /api/v1/series
Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 67108864)
-dedup.minScrapeInterval duration
Leave only the first sample in every time series per each discrete interval equal to -dedup.minScrapeInterval > 0. See https://docs.victoriametrics.com/#deduplication for details
-deleteAuthKey string
@ -1709,8 +1723,11 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
-promscrape.maxScrapeSize size
The maximum size of scrape response in bytes to process from Prometheus targets. Bigger responses are rejected
Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 16777216)
-promscrape.minResponseSizeForStreamParse size
The minimum target response size for automatic switching to stream parsing mode, which can reduce memory usage. See https://docs.victoriametrics.com/vmagent.html#stream-parsing-mode
Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 1000000)
-promscrape.noStaleMarkers
Whether to disable sending Prometheus stale markers for metrics when scrape target disappears. This option may reduce memory usage if stale markers aren't needed for your setup. See also https://docs.victoriametrics.com/vmagent.html#stream-parsing-mode
Whether to disable sending Prometheus stale markers for metrics when scrape target disappears. This option may reduce memory usage if stale markers aren't needed for your setup. This option also disables populating the scrape_series_added metric. See https://prometheus.io/docs/concepts/jobs_instances/#automatically-generated-labels-and-time-series
-promscrape.openstackSDCheckInterval duration
Interval for checking for changes in openstack API server. This works only if openstack_sd_configs is configured in '-promscrape.config' file. See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#openstack_sd_config for details (default 30s)
-promscrape.seriesLimitPerTarget int
@ -1722,7 +1739,7 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
-promscrape.suppressScrapeErrors
Whether to suppress scrape errors logging. The last error for each target is always available at '/targets' page even if scrape errors logging is suppressed
-relabelConfig string
Optional path to a file with relabeling rules, which are applied to all the ingested metrics. See https://docs.victoriametrics.com/#relabeling for details
Optional path to a file with relabeling rules, which are applied to all the ingested metrics. See https://docs.victoriametrics.com/#relabeling for details. The config is reloaded on SIGHUP signal
-relabelDebug
Whether to log metrics before and after relabeling with -relabelConfig. If the -relabelDebug is enabled, then the metrics aren't sent to storage. This is useful for debugging the relabeling configs
-retentionPeriod value
@ -1799,6 +1816,9 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
The maximum number of unique series can be added to the storage during the last 24 hours. Excess series are logged and dropped. This can be useful for limiting series churn rate. See also -storage.maxHourlySeries
-storage.maxHourlySeries int
The maximum number of unique series can be added to the storage during the last hour. Excess series are logged and dropped. This can be useful for limiting series cardinality. See also -storage.maxDailySeries
-storage.minFreeDiskSpaceBytes size
The minimum free disk space at -storageDataPath after which the storage stops accepting new data
Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 10000000)
-storageDataPath string
Path to storage data (default "victoria-metrics-data")
-tls

View file

@ -6,4 +6,5 @@ sort: 21
1. [K8s monitoring via VM Single](k8s-monitoring-via-vm-single.html)
2. [K8s monitoring via VM Cluster](k8s-monitoring-via-vm-cluster.html)
3. [HA monitoring setup in K8s via VM Cluster](k8s-ha-monitoring-via-vm-cluster.html)
3. [HA monitoring setup in K8s via VM Cluster](k8s-ha-monitoring-via-vm-cluster.html)
4. [Getting started with VM Operator](getting-started-with-vm-operator.html)

View file

@ -0,0 +1,317 @@
# Getting started with VM Operator
**The guide covers:**
* The setup of a [VM Operator](https://github.com/VictoriaMetrics/helm-charts/tree/master/charts/victoria-metrics-operator) via Helm in [Kubernetes](https://kubernetes.io/) with Helm charts.
* The setup of a [VictoriaMetrics Cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html) via [VM Operator](https://github.com/VictoriaMetrics/helm-charts/tree/master/charts/victoria-metrics-operator).
* How to add CRD for a [VictoriaMetrics Cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html) via [VM Operator](https://github.com/VictoriaMetrics/helm-charts/tree/master/charts/victoria-metrics-operator).
* How to visualize stored data
* How to store metrics in [VictoriaMetrics](https://victoriametrics.com)
**Preconditions**
* [Kubernetes cluster 1.20.9-gke.1001](https://cloud.google.com/kubernetes-engine). We use a GKE cluster from [GCP](https://cloud.google.com/) but this guide also applies to any Kubernetes cluster. For example, [Amazon EKS](https://aws.amazon.com/ru/eks/).
* [Helm 3](https://helm.sh/docs/intro/install).
* [kubectl 1.21+](https://kubernetes.io/docs/tasks/tools/install-kubectl).
## 1. VictoriaMetrics Helm repository
See how to work with a [VictoriaMetrics Helm repository in previous guide](https://docs.victoriametrics.com/guides/k8s-monitoring-via-vm-cluster.html#1-victoriametrics-helm-repository).
## 2. Install the VM Operator from the Helm chart
<div class="with-copy" markdown="1">
```bash
helm install operator vm/victoria-metrics-operator
```
</div>
The expected output is:
```bash
NAME: vmoperator
LAST DEPLOYED: Thu Sep 30 17:30:30 2021
NAMESPACE: default
STATUS: deployed
REVISION: 1
TEST SUITE: None
NOTES:
victoria-metrics-operator has been installed. Check its status by running:
kubectl --namespace default get pods -l "app.kubernetes.io/instance=vmoperator"
Get more information on https://github.com/VictoriaMetrics/helm-charts/tree/master/charts/victoria-metrics-operator.
See "Getting started guide for VM Operator" on https://docs.victoriametrics.com/guides/getting-started-with-vm-operator.html.
```
Run the following command to check that VM Operator is up and running:
<div class="with-copy" markdown="1">
```bash
kubectl --namespace default get pods -l "app.kubernetes.io/instance=vmoperator"
```
</div>
The expected output:
```bash
NAME READY STATUS RESTARTS AGE
vmoperator-victoria-metrics-operator-67cff44cd6-s47n6 1/1 Running 0 77s
```
## 3. Install VictoriaMetrics Cluster
> For this example we will use default value for `name: example-vmcluster-persistent`. Change it value up to your needs.
Run the following command to install [VictoriaMetrics Cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html) via [VM Operator](https://github.com/VictoriaMetrics/helm-charts/tree/master/charts/victoria-metrics-operator):
<div class="with-copy" markdown="1" id="example-cluster-config">
```bash
cat << EOF | kubectl apply -f -
apiVersion: operator.victoriametrics.com/v1beta1
kind: VMCluster
metadata:
name: example-vmcluster-persistent
spec:
# Add fields here
retentionPeriod: "12"
vmstorage:
replicaCount: 2
vmselect:
replicaCount: 2
vminsert:
replicaCount: 2
EOF
```
</div>
The expected output:
```bash
vmcluster.operator.victoriametrics.com/example-vmcluster-persistent created
```
* By applying this CRD we install the [VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html) to the default [namespace](https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/) of your k8s cluster with following params:
* `retentionPeriod: "12"` defines the [retention](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#retention) to 12 months.
* `replicaCount: 2` creates two replicas of vmselect, vminsert and vmstorage.
Please note that it may take some time for the pods to start. To check that the pods are started, run the following command:
<div class="with-copy" markdown="1" id="example-cluster-config">
```bash
kubectl get pods | grep vmcluster
```
</div>
The expected output:
```bash
NAME READY STATUS RESTARTS AGE
vminsert-example-vmcluster-persistent-845849cb84-9vb6f 1/1 Running 0 5m15s
vminsert-example-vmcluster-persistent-845849cb84-r7mmk 1/1 Running 0 5m15s
vmselect-example-vmcluster-persistent-0 1/1 Running 0 5m21s
vmselect-example-vmcluster-persistent-1 1/1 Running 0 5m21s
vmstorage-example-vmcluster-persistent-0 1/1 Running 0 5m25s
vmstorage-example-vmcluster-persistent-1 1/1 Running 0 5m25s
```
There is an extra command to get information about the cluster state:
<div class="with-copy" markdown="1" id="services">
```bash
kubectl get vmclusters
```
</div>
The expected output:
```bash
NAME INSERT COUNT STORAGE COUNT SELECT COUNT AGE STATUS
example-vmcluster-persistent 2 2 2 5m53s operational
```
Internet traffic goes through the Kubernetes Load balancer which use the set of Pods targeted by a [Kubernetes Service](https://kubernetes.io/docs/concepts/services-networking/service/). The service in [VictoriaMetrics Cluster architecture](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#architecture-overview) which accepts the ingested data named `vminsert` and in Kubernetes it is a `vminsert ` service. So we need to use it for remote_write url.
To get the name of `vminsert` services, please run the following command:
<div class="with-copy" markdown="1" id="services">
```bash
kubectl get svc | grep vminsert
```
</div>
The expected output:
```bash
vminsert-example-vmcluster-persistent ClusterIP 10.107.47.136 <none> 8480/TCP 5m58s
```
To scrape metrics from Kubernetes with a VictoriaMetrics Cluster we will need to install [VMAgent](https://docs.victoriametrics.com/vmagent.html) with some additional configurations.
Copy `vminsert-example-vmcluster-persistent` (or whatever user put into metadata.name field [https://docs.victoriametrics.com/getting-started-with-vm-operator.html#example-cluster-config](https://docs.victoriametrics.com/getting-started-with-vm-operator.html#example-cluster-config)) service name and add it to the `remoteWrite` URL from [quick-start example](https://github.com/VictoriaMetrics/operator/blob/master/docs/quick-start.MD#vmagent).
Here is an example of the full configuration that we need to apply:
<div class="with-copy" markdown="1">
```bash
cat <<EOF | kubectl apply -f -
apiVersion: operator.victoriametrics.com/v1beta1
kind: VMAgent
metadata:
name: example-vmagent
spec:
serviceScrapeNamespaceSelector: {}
podScrapeNamespaceSelector: {}
podScrapeSelector: {}
serviceScrapeSelector: {}
nodeScrapeSelector: {}
nodeScrapeNamespaceSelector: {}
staticScrapeSelector: {}
staticScrapeNamespaceSelector: {}
replicaCount: 1
remoteWrite:
- url: "http://vminsert-example-vmcluster-persistent.default.svc.cluster.local:8480/insert/0/prometheus/api/v1/write"
EOF
```
</div>
The expected output:
```bash
vmagent.operator.victoriametrics.com/example-vmagent created
```
>`remoteWrite.url` for VMAgent consists of the following parts:
> "service_name.VMCluster_namespace.svc.kubernetes_cluster_domain" that in our case will look like vminsert-example-vmcluster-persistent.default.svc.cluster.local
Verify that `VMAgent` is up and running by executing the following command:
<div class="with-copy" markdown="1">
```bash
kubectl get pods | grep vmagent
```
</div>
The expected output is:
```bash
vmagent-example-vmagent-7996844b5f-b5rzs 2/2 Running 0 9s
```
> There are two containers for VMagent: the first one is a VMagent and the second one is a sidecard with a secret. VMagent use a secret with configuration wich is mounted to the special sidecar. It observes the changes with configuration and send a signal to reload configuration for the VMagent.
Run the following command to make `VMAgent`'s port accessible from the local machine:
<div class="with-copy" markdown="1">
</div>
```bash
kubectl port-forward svc/vmagent-example-vmagent 8429:8429
```
The expected output is:
```bash
Forwarding from 127.0.0.1:8429 -> 8429
Forwarding from [::1]:8429 -> 8429
```
To check that `VMAgent` collects metrics from the k8s cluster open in the browser [http://127.0.0.1:8429/targets](http://127.0.0.1:8429/targets) .
You will see something like this:
<p align="center">
<img src="guide-vmcluster-k8s-via-vm-operator.png" width="800" alt="">
</p>
`VMAgent` connects to [kubernetes service discovery](https://kubernetes.io/docs/concepts/services-networking/service/) and gets targets which needs to be scraped. This service discovery is controlled by [VictoriaMetrics Operator](https://github.com/VictoriaMetrics/operator)
## 4. Verifying VictoriaMetrics cluster
See [how to install and connect Grafana to VictoriaMetrics](https://docs.victoriametrics.com/guides/k8s-monitoring-via-vm-cluster.html#4-install-and-connect-grafana-to-victoriametrics-with-helm) but with one addition - we should get the name of `vmselect` service from the freshly installed VictoriaMetrics Cluster because it will now be different.
To get the new service name, please run the following command:
<div class="with-copy" markdown="1" id="services">
```bash
kubectl get svc | grep vmselect
```
</div>
The expected output:
```bash
vmselect-example-vmcluster-persistent ClusterIP None <none> 8481/TCP 7m
```
The final config will look like this:
<div class="with-copy" markdown="1">
```yaml
cat <<EOF | helm install my-grafana grafana/grafana -f -
datasources:
datasources.yaml:
apiVersion: 1
datasources:
- name: victoriametrics
type: prometheus
orgId: 1
url: http://vmselect-example-vmcluster-persistent.default.svc.cluster.local:8481/select/0/prometheus/
access: proxy
isDefault: true
updateIntervalSeconds: 10
editable: true
dashboardProviders:
dashboardproviders.yaml:
apiVersion: 1
providers:
- name: 'default'
orgId: 1
folder: ''
type: file
disableDeletion: true
editable: true
options:
path: /var/lib/grafana/dashboards/default
dashboards:
default:
victoriametrics:
gnetId: 11176
revision: 16
datasource: victoriametrics
vmagent:
gnetId: 12683
revision: 6
datasource: victoriametrics
kubernetes:
gnetId: 14205
revision: 1
datasource: victoriametrics
EOF
```
</div>
## 5. Check the result you obtained in your browser
To check that [VictoriaMetrics](https://victoriametrics.com) collecting metrics from the k8s cluster open in your browser [http://127.0.0.1:3000/dashboards](http://127.0.0.1:3000/dashboards) and choose the `VictoriaMetrics - cluster` dashboard. Use `admin` for login and the `password` that you previously got from kubectl.
<p align="center">
<img src="guide-vmcluster-k8s-via-vm-operator-grafana1.png" width="800" alt="grafana dashboards">
</p>
The expected output is:
<p align="center">
<img src="guide-vmcluster-k8s-via-vm-operator-grafana2.png" width="800" alt="grafana dashboards">
</p>
## 6. Summary
* We set up Kubernetes Operator for VictoriaMetrics with using CRD.
* We collected metrics from all running services and stored them in the VictoriaMetrics database.

Binary file not shown.

After

Width:  |  Height:  |  Size: 55 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 236 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 204 KiB

View file

@ -306,12 +306,14 @@ You can read more about relabeling in the following articles:
* If the scrape target is removed from the list of targets, then stale markers are sent for all the metrics scraped from this target.
* Stale markers are sent for all the scraped metrics on graceful shutdown of `vmagent`.
Prometheus staleness markers aren't sent to `-remoteWrite.url` in [stream parsing mode](#stream-parsing-mode) or if `-promscrape.noStaleMarkers` command-line is set.
Prometheus staleness markers' tracking needs additional memory, since it must store the previous response body per each scrape target in order to compare it to the current response body. The memory usage may be reduced by passing `-promscrape.noStaleMarkers` command-line flag to `vmagent`. This disables staleness tracking. This also disables tracking the number of new time series per each scrape with the auto-generated `scrape_series_added` metric. See [these docs](https://prometheus.io/docs/concepts/jobs_instances/#automatically-generated-labels-and-time-series) for details.
## Stream parsing mode
By default `vmagent` reads the full response from scrape target into memory, then parses it, applies [relabeling](#relabeling) and then pushes the resulting metrics to the configured `-remoteWrite.url`. This mode works good for the majority of cases when the scrape target exposes small number of metrics (e.g. less than 10 thousand). But this mode may take big amounts of memory when the scrape target exposes big number of metrics. In this case it is recommended enabling stream parsing mode. When this mode is enabled, then `vmagent` reads response from scrape target in chunks, then immediately processes every chunk and pushes the processed metrics to remote storage. This allows saving memory when scraping targets that expose millions of metrics. Stream parsing mode may be enabled in the following places:
By default `vmagent` reads the full response body from scrape target into memory, then parses it, applies [relabeling](#relabeling) and then pushes the resulting metrics to the configured `-remoteWrite.url`. This mode works good for the majority of cases when the scrape target exposes small number of metrics (e.g. less than 10 thousand). But this mode may take big amounts of memory when the scrape target exposes big number of metrics. In this case it is recommended enabling stream parsing mode. When this mode is enabled, then `vmagent` reads response from scrape target in chunks, then immediately processes every chunk and pushes the processed metrics to remote storage. This allows saving memory when scraping targets that expose millions of metrics.
Stream parsing mode is automatically enabled for scrape targets returning response bodies with sizes bigger than the `-promscrape.minResponseSizeForStreamParse` command-line flag value. Additionally, the stream parsing mode can be explicitly enabled in the following places:
- Via `-promscrape.streamParse` command-line flag. In this case all the scrape targets defined in the file pointed by `-promscrape.config` are scraped in stream parsing mode.
- Via `stream_parse: true` option at `scrape_configs` section. In this case all the scrape targets defined in this section are scraped in stream parsing mode.
@ -333,7 +335,7 @@ scrape_configs:
'match[]': ['{__name__!=""}']
```
Note that `sample_limit` option doesn't prevent from data push to remote storage if stream parsing is enabled because the parsed data is pushed to remote storage as soon as it is parsed.
Note that `sample_limit` and `series_limit` options cannot be used in stream parsing mode because the parsed data is pushed to remote storage as soon as it is parsed.
## Scraping big number of targets
@ -453,7 +455,8 @@ It may be useful to perform `vmagent` rolling update without any scrape loss.
as `vmagent` establishes at least a single TCP connection per target.
* If `vmagent` uses too big amounts of memory, then the following options can help:
* Enabling stream parsing. See [these docs](#stream-parsing-mode).
* Disabling staleness tracking with `-promscrape.noStaleMarkers` option. See [these docs](#prometheus-staleness-markers).
* Enabling stream parsing mode. See [these docs](#stream-parsing-mode).
* Reducing the number of output queues with `-remoteWrite.queues` command-line option.
* Reducing the amounts of RAM vmagent can use for in-memory buffering with `-memory.allowedPercent` or `-memory.allowedBytes` command-line option. Another option is to reduce memory limits in Docker and/or Kuberntes if `vmagent` runs under these systems.
* Reducing the number of CPU cores vmagent can use by passing `GOMAXPROCS=N` environment variable to `vmagent`, where `N` is the desired limit on CPU cores. Another option is to reduce CPU limits in Docker or Kubernetes if `vmagent` runs under these systems.
@ -710,6 +713,9 @@ See the docs at https://docs.victoriametrics.com/vmagent.html .
-csvTrimTimestamp duration
Trim timestamps when importing csv data to this duration. Minimum practical duration is 1ms. Higher duration (i.e. 1s) may be used for reducing disk space usage for timestamp data (default 1ms)
-datadog.maxInsertRequestSize size
The maximum size in bytes of a single DataDog POST request to /api/v1/series
Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 67108864)
-dryRun
Whether to check only config files without running vmagent. The following files are checked: -promscrape.config, -remoteWrite.relabelConfig, -remoteWrite.urlRelabelConfig . Unknown config entries are allowed in -promscrape.config by default. This can be changed with -promscrape.config.strictParse
-enableTCP6
@ -857,8 +863,11 @@ See the docs at https://docs.victoriametrics.com/vmagent.html .
-promscrape.maxScrapeSize size
The maximum size of scrape response in bytes to process from Prometheus targets. Bigger responses are rejected
Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 16777216)
-promscrape.minResponseSizeForStreamParse size
The minimum target response size for automatic switching to stream parsing mode, which can reduce memory usage. See https://docs.victoriametrics.com/vmagent.html#stream-parsing-mode
Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 1000000)
-promscrape.noStaleMarkers
Whether to disable sending Prometheus stale markers for metrics when scrape target disappears. This option may reduce memory usage if stale markers aren't needed for your setup. See also https://docs.victoriametrics.com/vmagent.html#stream-parsing-mode
Whether to disable sending Prometheus stale markers for metrics when scrape target disappears. This option may reduce memory usage if stale markers aren't needed for your setup. This option also disables populating the scrape_series_added metric. See https://prometheus.io/docs/concepts/jobs_instances/#automatically-generated-labels-and-time-series
-promscrape.openstackSDCheckInterval duration
Interval for checking for changes in openstack API server. This works only if openstack_sd_configs is configured in '-promscrape.config' file. See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#openstack_sd_config for details (default 30s)
-promscrape.seriesLimitPerTarget int

View file

@ -355,12 +355,12 @@ See full description for these flags in `./vmalert --help`.
## Monitoring
`vmalert` exports various metrics in Prometheus exposition format at `http://vmalert-host:8880/metrics` page.
We recommend setting up regular scraping of this page either through `vmagent` or by Prometheus so that the exported
`vmalert` exports various metrics in Prometheus exposition format at `http://vmalert-host:8880/metrics` page.
We recommend setting up regular scraping of this page either through `vmagent` or by Prometheus so that the exported
metrics may be analyzed later.
Use official [Grafana dashboard](https://grafana.com/grafana/dashboards/14950) for `vmalert` overview.
If you have suggestions for improvements or have found a bug - please open an issue on github or add
If you have suggestions for improvements or have found a bug - please open an issue on github or add
a review to the dashboard.
@ -500,6 +500,8 @@ The shortlist of configuration flags is the following:
Optional bearer auth token to use for -remoteRead.url.
-remoteRead.bearerTokenFile string
Optional path to bearer token file to use for -remoteRead.url.
-remoteRead.disablePathAppend
Whether to disable automatic appending of '/api/v1/query' path to the configured -remoteRead.url.
-remoteRead.ignoreRestoreErrors
Whether to ignore errors from remote storage when restoring alerts state on startup. (default true)
-remoteRead.lookback duration
@ -515,7 +517,7 @@ The shortlist of configuration flags is the following:
-remoteRead.tlsServerName string
Optional TLS server name to use for connections to -remoteRead.url. By default the server name from -remoteRead.url is used
-remoteRead.url vmalert
Optional URL to VictoriaMetrics or vmselect that will be used to restore alerts state. This configuration makes sense only if vmalert was configured with `remoteWrite.url` before and has been successfully persisted its state. E.g. http://127.0.0.1:8428
Optional URL to VictoriaMetrics or vmselect that will be used to restore alerts state. This configuration makes sense only if vmalert was configured with `remoteWrite.url` before and has been successfully persisted its state. E.g. http://127.0.0.1:8428. See also -remoteRead.disablePathAppend
-remoteWrite.basicAuth.password string
Optional basic auth password for -remoteWrite.url
-remoteWrite.basicAuth.passwordFile string

View file

@ -41,9 +41,8 @@ Each `url_prefix` in the [-auth.config](#auth-config) may contain either a singl
`-auth.config` is represented in the following simple `yml` format:
```yml
# Arbitrary number of usernames may be put here.
# Usernames must be unique.
# Username and bearer_token values must be unique.
users:
# Requests with the 'Authorization: Bearer XXXX' header are proxied to http://localhost:8428 .
@ -51,6 +50,14 @@ users:
- bearer_token: "XXXX"
url_prefix: "http://localhost:8428"
# Requests with the 'Authorization: Bearer YYY' header are proxied to http://localhost:8428 ,
# The `X-Scope-OrgID: foobar` http header is added to every proxied request.
# For example, http://vmauth:8427/api/v1/query is proxied to http://localhost:8428/api/v1/query
- bearer_token: "YYY"
url_prefix: "http://localhost:8428"
headers:
- "X-Scope-OrgID: foobar"
# The user for querying local single-node VictoriaMetrics.
# All the requests to http://vmauth:8427 with the given Basic Auth (username:password)
# will be proxied to http://localhost:8428 .
@ -93,7 +100,6 @@ users:
- "http://vminsert1:8480/insert/42/prometheus"
- "http://vminsert2:8480/insert/42/prometheus"
# A single user for querying and inserting data:
# - Requests to http://vmauth:8427/api/v1/query, http://vmauth:8427/api/v1/query_range
# and http://vmauth:8427/api/v1/label/<label_name>/values are proxied to the following urls in a round-robin manner:
@ -101,7 +107,8 @@ users:
# - http://vmselect2:8481/select/42/prometheus
# For example, http://vmauth:8427/api/v1/query is proxied to http://vmselect1:8480/select/42/prometheus/api/v1/query
# or to http://vmselect2:8480/select/42/prometheus/api/v1/query .
# - Requests to http://vmauth:8427/api/v1/write are proxied to http://vminsert:8480/insert/42/prometheus/api/v1/write
# - Requests to http://vmauth:8427/api/v1/write are proxied to http://vminsert:8480/insert/42/prometheus/api/v1/write .
# The "X-Scope-OrgID: abc" http header is added to these requests.
- username: "foobar"
url_map:
- src_paths:
@ -113,7 +120,8 @@ users:
- "http://vmselect2:8481/select/42/prometheus"
- src_paths: ["/api/v1/write"]
url_prefix: "http://vminsert:8480/insert/42/prometheus"
```
headers:
- "X-Scope-OrgID: abc"```
The config may contain `%{ENV_VAR}` placeholders, which are substituted by the corresponding `ENV_VAR` environment variable values.
This may be useful for passing secrets to the config.

22
go.mod
View file

@ -1,17 +1,16 @@
module github.com/VictoriaMetrics/VictoriaMetrics
require (
cloud.google.com/go v0.97.0 // indirect
cloud.google.com/go/storage v1.17.0
cloud.google.com/go/storage v1.18.2
github.com/VictoriaMetrics/fastcache v1.7.0
// Do not use the original github.com/valyala/fasthttp because of issues
// like https://github.com/valyala/fasthttp/commit/996610f021ff45fdc98c2ce7884d5fa4e7f9199b
github.com/VictoriaMetrics/fasthttp v1.1.0
github.com/VictoriaMetrics/metrics v1.18.0
github.com/VictoriaMetrics/metricsql v0.26.0
github.com/VictoriaMetrics/metricsql v0.27.0
github.com/VividCortex/ewma v1.2.0 // indirect
github.com/aws/aws-sdk-go v1.40.58
github.com/aws/aws-sdk-go v1.41.8
github.com/cespare/xxhash/v2 v2.1.2
github.com/cheggaaa/pb/v3 v3.0.8
github.com/cpuguy83/go-md2man/v2 v2.0.1 // indirect
@ -23,24 +22,21 @@ require (
github.com/mattn/go-colorable v0.1.11 // indirect
github.com/mattn/go-runewidth v0.0.13 // indirect
github.com/oklog/ulid v1.3.1
github.com/prometheus/common v0.31.1 // indirect
github.com/prometheus/common v0.32.1 // indirect
github.com/prometheus/prometheus v1.8.2-0.20201119142752-3ad25a6dc3d9
github.com/urfave/cli/v2 v2.3.0
github.com/valyala/fastjson v1.6.3
github.com/valyala/fastrand v1.1.0
github.com/valyala/fasttemplate v1.2.1
github.com/valyala/gozstd v1.13.0
github.com/valyala/gozstd v1.14.1
github.com/valyala/quicktemplate v1.7.0
golang.org/x/net v0.0.0-20211007125505-59d4e928ea9d
golang.org/x/net v0.0.0-20211020060615-d418f374d309
golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1
golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac
google.golang.org/api v0.58.0
google.golang.org/genproto v0.0.0-20211007155348-82e027067bd4 // indirect
golang.org/x/sys v0.0.0-20211020174200-9d6173849985
google.golang.org/api v0.59.0
google.golang.org/genproto v0.0.0-20211021150943-2b146023228c // indirect
google.golang.org/grpc v1.41.0 // indirect
gopkg.in/yaml.v2 v2.4.0
)
// This is needed until https://github.com/googleapis/google-cloud-go/issues/4783 is resolved
replace cloud.google.com/go v0.94.1 => cloud.google.com/go v0.93.3
go 1.16

40
go.sum
View file

@ -25,6 +25,7 @@ cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSU
cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY=
cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ=
cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI=
cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4=
cloud.google.com/go v0.97.0 h1:3DXvAyifywvq64LfkKaMOmkWPS1CikIQdMe2lY9vxU8=
cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
@ -46,8 +47,8 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
cloud.google.com/go/storage v1.17.0 h1:CDpe3jS3EiD5nGlbtvyA4EUfkF6k9GMrxLR8+hLmoec=
cloud.google.com/go/storage v1.17.0/go.mod h1:0wRtHSM3Npk/QJYdwcpRNVRVJlH2OxyWF9Dws3J+MtE=
cloud.google.com/go/storage v1.18.2 h1:5NQw6tOn3eMm0oE8vTkfjau18kjL79FlMjy/CHTpmoY=
cloud.google.com/go/storage v1.18.2/go.mod h1:AiIj7BWXyhO5gGVmYJ+S8tbkCx3yb0IMjua8Aw4naVM=
collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/Azure/azure-sdk-for-go v41.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
@ -107,8 +108,8 @@ github.com/VictoriaMetrics/fasthttp v1.1.0/go.mod h1:/7DMcogqd+aaD3G3Hg5kFgoFwlR
github.com/VictoriaMetrics/metrics v1.12.2/go.mod h1:Z1tSfPfngDn12bTfZSCqArT3OPY3u88J12hSoOhuiRE=
github.com/VictoriaMetrics/metrics v1.18.0 h1:vov5NxDHRSXFbdiH4dYLYEjKLoAXXSQ7hcnG8TSD9JQ=
github.com/VictoriaMetrics/metrics v1.18.0/go.mod h1:ArjwVz7WpgpegX/JpB0zpNF2h2232kErkEnzH1sxMmA=
github.com/VictoriaMetrics/metricsql v0.26.0 h1:lJBRn9vn9kst7hfNzSsQorulzNYQtX7JxWWWxh/udfI=
github.com/VictoriaMetrics/metricsql v0.26.0/go.mod h1:ylO7YITho/Iw6P71oEaGyHbO94bGoGtzWfLGqFhMIg8=
github.com/VictoriaMetrics/metricsql v0.27.0 h1:S6xWFKEyu+EbPS3tYr1cWeRza61L3e4tYcbBqMakuX0=
github.com/VictoriaMetrics/metricsql v0.27.0/go.mod h1:ylO7YITho/Iw6P71oEaGyHbO94bGoGtzWfLGqFhMIg8=
github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmxzcbUokwA=
github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow=
github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4=
@ -153,8 +154,8 @@ github.com/aws/aws-sdk-go v1.30.12/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZve
github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48=
github.com/aws/aws-sdk-go v1.35.31/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
github.com/aws/aws-sdk-go v1.40.45/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q=
github.com/aws/aws-sdk-go v1.40.58 h1:SFa94nBsXyaS+cXluXlvqLwsQdeD7A/unJcWEld1xZ0=
github.com/aws/aws-sdk-go v1.40.58/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q=
github.com/aws/aws-sdk-go v1.41.8 h1:j6imzwVyWQYuQxbkPmg2MdMmLB+Zw+U3Ewi59YF8Rwk=
github.com/aws/aws-sdk-go v1.41.8/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q=
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
github.com/aws/aws-sdk-go-v2 v1.9.1/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4=
github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.8.1/go.mod h1:CM+19rL1+4dFWnOQKwDc7H1KwXTz+h61oUSHyhV0b3o=
@ -847,8 +848,8 @@ github.com/prometheus/common v0.14.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16
github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
github.com/prometheus/common v0.31.1 h1:d18hG4PkHnNAKNMOmFuXFaiY8Us0nird/2m60uS1AMs=
github.com/prometheus/common v0.31.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4=
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
@ -958,8 +959,8 @@ github.com/valyala/fastrand v1.1.0/go.mod h1:HWqCzkrkg6QXT8V2EXWvXCoow7vLwOFN002
github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8=
github.com/valyala/fasttemplate v1.2.1 h1:TVEnxayobAdVkhQfrfes2IzOB6o+z4roRkPF52WA1u4=
github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
github.com/valyala/gozstd v1.13.0 h1:M9qgbElBZsHlh8a4jjHO4lY42xLJeb+KWVBwFBAapRo=
github.com/valyala/gozstd v1.13.0/go.mod h1:y5Ew47GLlP37EkTB+B4s7r6A5rdaeB7ftbl9zoYiIPQ=
github.com/valyala/gozstd v1.14.1 h1:xkPAeHe8U/w/ocS6PywjkH406lKdratZuxhb1UTgO/s=
github.com/valyala/gozstd v1.14.1/go.mod h1:y5Ew47GLlP37EkTB+B4s7r6A5rdaeB7ftbl9zoYiIPQ=
github.com/valyala/histogram v1.1.2/go.mod h1:CZAr6gK9dbD7hYx2s8WSPh0p5x5wETjC+2b3PJVtEdg=
github.com/valyala/histogram v1.2.0 h1:wyYGAZZt3CpwUiIb9AU/Zbllg1llXyrtApRS815OLoQ=
github.com/valyala/histogram v1.2.0/go.mod h1:Hb4kBwb4UxsaNbbbh+RRz8ZR6pdodR57tzWUS3BUzXY=
@ -1155,8 +1156,8 @@ golang.org/x/net v0.0.0-20210510120150-4163338589ed/go.mod h1:9nx3DQGgdP8bBQD5qx
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210917221730-978cfadd31cf/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211007125505-59d4e928ea9d h1:QWMn1lFvU/nZ58ssWqiFJMd3DKIII8NYc4sn708XgKs=
golang.org/x/net v0.0.0-20211007125505-59d4e928ea9d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211020060615-d418f374d309 h1:A0lJIi+hcTR6aajJH4YqKWwohY4aW9RO7oRMcdv+HKI=
golang.org/x/net v0.0.0-20211020060615-d418f374d309/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@ -1280,8 +1281,9 @@ golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20210910150752-751e447fb3d0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac h1:oN6lz7iLW/YC7un8pq+9bOLyXrprv2+DKfkJY+2LJJw=
golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211020174200-9d6173849985 h1:LOlKVhfDyahgmqa97awczplwkjzNaELFg3zRIJ13RYo=
golang.org/x/sys v0.0.0-20211020174200-9d6173849985/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@ -1425,10 +1427,12 @@ google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtuk
google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw=
google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU=
google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k=
google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI=
google.golang.org/api v0.58.0 h1:MDkAbYIB1JpSgCTOCYYoIec/coMlKK4oVbpnBLLcyT0=
google.golang.org/api v0.58.0/go.mod h1:cAbP2FsxoGVNwtgNAmmn3y5G1TWAiVYRmg4yku3lv+E=
google.golang.org/api v0.59.0 h1:fPfFO7gttlXYo2ALuD3HxJzh8vaF++4youI0BkFL6GE=
google.golang.org/api v0.59.0/go.mod h1:sT2boj7M9YJxZzgeZqXogmhfmRWDtPzT31xkieUbuZU=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@ -1495,13 +1499,15 @@ google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKr
google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w=
google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
google.golang.org/genproto v0.0.0-20210917145530-b395a37504d4/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
google.golang.org/genproto v0.0.0-20210921142501-181ce0d877f6/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20211007155348-82e027067bd4 h1:YXPV/eKW0ZWRdB5tyI6aPoaa2Wxb4OSlFrTREMdwn64=
google.golang.org/genproto v0.0.0-20211007155348-82e027067bd4/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20211008145708-270636b82663/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20211016002631-37fc39342514/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20211021150943-2b146023228c h1:FqrtZMB5Wr+/RecOM3uPJNPfWR8Upb5hAPnt7PU6i4k=
google.golang.org/genproto v0.0.0-20211021150943-2b146023228c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=

View file

@ -40,7 +40,7 @@ func Parse() {
// Get flag value from environment var.
fname := getEnvFlagName(f.Name)
if v, ok := os.LookupEnv(fname); ok {
if err := f.Value.Set(v); err != nil {
if err := flag.Set(f.Name, v); err != nil {
// Do not use lib/logger here, since it is uninitialized yet.
log.Fatalf("cannot set flag %s to %q, which is read from environment variable %q: %s", f.Name, v, fname, err)
}

20
lib/flagutil/flag.go Normal file
View file

@ -0,0 +1,20 @@
package flagutil
import (
"flag"
"fmt"
"io"
"strings"
)
// WriteFlags writes all the explicitly set flags to w.
func WriteFlags(w io.Writer) {
flag.Visit(func(f *flag.Flag) {
lname := strings.ToLower(f.Name)
value := f.Value.String()
if IsSecretFlag(lname) {
value = "secret"
}
fmt.Fprintf(w, "-%s=%q\n", f.Name, value)
})
}

View file

@ -22,6 +22,7 @@ import (
"time"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/netutil"
"github.com/VictoriaMetrics/metrics"
@ -279,6 +280,10 @@ func handlerWrapper(s *server, w http.ResponseWriter, r *http.Request, rh Reques
WritePrometheusMetrics(w)
metricsHandlerDuration.UpdateDuration(startTime)
return
case "/flags":
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
flagutil.WriteFlags(w)
return
default:
if strings.HasPrefix(r.URL.Path, "/debug/pprof/") {
pprofRequests.Inc()

View file

@ -106,37 +106,35 @@ func newClient(sw *ScrapeWork) *client {
MaxIdempotentRequestAttempts: 1,
}
var sc *http.Client
if *streamParse || sw.StreamParse {
var proxyURLFunc func(*http.Request) (*url.URL, error)
if proxyURL := sw.ProxyURL.URL(); proxyURL != nil {
proxyURLFunc = http.ProxyURL(proxyURL)
}
sc = &http.Client{
Transport: &http.Transport{
TLSClientConfig: tlsCfg,
Proxy: proxyURLFunc,
TLSHandshakeTimeout: 10 * time.Second,
IdleConnTimeout: 2 * sw.ScrapeInterval,
DisableCompression: *disableCompression || sw.DisableCompression,
DisableKeepAlives: *disableKeepAlive || sw.DisableKeepAlive,
DialContext: statStdDial,
MaxIdleConnsPerHost: 100,
var proxyURLFunc func(*http.Request) (*url.URL, error)
if proxyURL := sw.ProxyURL.URL(); proxyURL != nil {
proxyURLFunc = http.ProxyURL(proxyURL)
}
sc = &http.Client{
Transport: &http.Transport{
TLSClientConfig: tlsCfg,
Proxy: proxyURLFunc,
TLSHandshakeTimeout: 10 * time.Second,
IdleConnTimeout: 2 * sw.ScrapeInterval,
DisableCompression: *disableCompression || sw.DisableCompression,
DisableKeepAlives: *disableKeepAlive || sw.DisableKeepAlive,
DialContext: statStdDial,
MaxIdleConnsPerHost: 100,
// Set timeout for receiving the first response byte,
// since the duration for reading the full response can be much bigger because of stream parsing.
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1017#issuecomment-767235047
ResponseHeaderTimeout: sw.ScrapeTimeout,
},
// Set 30x bigger timeout than the sw.ScrapeTimeout, since the duration for reading the full response
// can be much bigger because of stream parsing.
// Set timeout for receiving the first response byte,
// since the duration for reading the full response can be much bigger because of stream parsing.
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1017#issuecomment-767235047
Timeout: 30 * sw.ScrapeTimeout,
}
if sw.DenyRedirects {
sc.CheckRedirect = func(req *http.Request, via []*http.Request) error {
return http.ErrUseLastResponse
}
ResponseHeaderTimeout: sw.ScrapeTimeout,
},
// Set 30x bigger timeout than the sw.ScrapeTimeout, since the duration for reading the full response
// can be much bigger because of stream parsing.
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1017#issuecomment-767235047
Timeout: 30 * sw.ScrapeTimeout,
}
if sw.DenyRedirects {
sc.CheckRedirect = func(req *http.Request, via []*http.Request) error {
return http.ErrUseLastResponse
}
}
return &client{

View file

@ -58,8 +58,8 @@ var (
// Config represents essential parts from Prometheus config defined at https://prometheus.io/docs/prometheus/latest/configuration/configuration/
type Config struct {
Global GlobalConfig `yaml:"global,omitempty"`
ScrapeConfigs []ScrapeConfig `yaml:"scrape_configs"`
ScrapeConfigFiles []string `yaml:"scrape_config_files"`
ScrapeConfigs []ScrapeConfig `yaml:"scrape_configs,omitempty"`
ScrapeConfigFiles []string `yaml:"scrape_config_files,omitempty"`
// This is set to the directory from where the config has been loaded.
baseDir string
@ -120,8 +120,8 @@ type ScrapeConfig struct {
ScrapeTimeout time.Duration `yaml:"scrape_timeout,omitempty"`
MetricsPath string `yaml:"metrics_path,omitempty"`
HonorLabels bool `yaml:"honor_labels,omitempty"`
HonorTimestamps bool `yaml:"honor_timestamps,omitempty"`
FollowRedirects *bool `yaml:"follow_redirects"` // omitempty isn't set, since the default value for this flag is true.
HonorTimestamps *bool `yaml:"honor_timestamps,omitempty"`
FollowRedirects *bool `yaml:"follow_redirects,omitempty"`
Scheme string `yaml:"scheme,omitempty"`
Params map[string][]string `yaml:"params,omitempty"`
HTTPClientConfig promauth.HTTPClientConfig `yaml:",inline"`
@ -720,7 +720,10 @@ func getScrapeWorkConfig(sc *ScrapeConfig, baseDir string, globalCfg *GlobalConf
scrapeTimeout = scrapeInterval
}
honorLabels := sc.HonorLabels
honorTimestamps := sc.HonorTimestamps
honorTimestamps := true
if sc.HonorTimestamps != nil {
honorTimestamps = *sc.HonorTimestamps
}
denyRedirects := false
if sc.FollowRedirects != nil {
denyRedirects = !*sc.FollowRedirects
@ -753,6 +756,12 @@ func getScrapeWorkConfig(sc *ScrapeConfig, baseDir string, globalCfg *GlobalConf
if err != nil {
return nil, fmt.Errorf("cannot parse `metric_relabel_configs` for `job_name` %q: %w", jobName, err)
}
if (*streamParse || sc.StreamParse) && sc.SampleLimit > 0 {
return nil, fmt.Errorf("cannot use stream parsing mode when `sample_limit` is set for `job_name` %q", jobName)
}
if (*streamParse || sc.StreamParse) && sc.SeriesLimit > 0 {
return nil, fmt.Errorf("cannot use stream parsing mode when `series_limit` is set for `job_name` %q", jobName)
}
swc := &scrapeWorkConfig{
scrapeInterval: scrapeInterval,
scrapeTimeout: scrapeTimeout,
@ -926,11 +935,14 @@ func (stc *StaticConfig) appendScrapeWork(dst []*ScrapeWork, swc *scrapeWorkConf
return dst
}
func appendScrapeWorkKey(dst []byte, target string, extraLabels, metaLabels map[string]string) []byte {
dst = append(dst, target...)
dst = append(dst, ',')
dst = appendSortedKeyValuePairs(dst, extraLabels)
dst = appendSortedKeyValuePairs(dst, metaLabels)
func appendScrapeWorkKey(dst []byte, labels []prompbmarshal.Label) []byte {
for _, label := range labels {
// Do not use strconv.AppendQuote, since it is slow according to CPU profile.
dst = append(dst, label.Name...)
dst = append(dst, '=')
dst = append(dst, label.Value...)
dst = append(dst, ',')
}
return dst
}
@ -955,44 +967,13 @@ func needSkipScrapeWork(key string, membersCount, replicasCount, memberNum int)
return true
}
func appendSortedKeyValuePairs(dst []byte, m map[string]string) []byte {
keys := make([]string, 0, len(m))
for k := range m {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
// Do not use strconv.AppendQuote, since it is slow according to CPU profile.
dst = append(dst, k...)
dst = append(dst, '=')
dst = append(dst, m[k]...)
dst = append(dst, ',')
}
dst = append(dst, '\n')
return dst
}
var scrapeWorkKeyBufPool bytesutil.ByteBufferPool
func (swc *scrapeWorkConfig) getScrapeWork(target string, extraLabels, metaLabels map[string]string) (*ScrapeWork, error) {
// Verify whether the scrape work must be skipped because of `-promscrape.cluster.*` configs.
if *clusterMembersCount > 1 {
bb := scrapeWorkKeyBufPool.Get()
bb.B = appendScrapeWorkKey(bb.B[:0], target, extraLabels, metaLabels)
needSkip := needSkipScrapeWork(bytesutil.ToUnsafeString(bb.B), *clusterMembersCount, *clusterReplicationFactor, *clusterMemberNum)
scrapeWorkKeyBufPool.Put(bb)
if needSkip {
return nil, nil
}
}
labels := mergeLabels(swc, target, extraLabels, metaLabels)
var originalLabels []prompbmarshal.Label
if !*dropOriginalLabels {
originalLabels = append([]prompbmarshal.Label{}, labels...)
promrelabel.SortLabels(originalLabels)
// Reduce memory usage by interning all the strings in originalLabels.
internLabelStrings(originalLabels)
}
labels = swc.relabelConfigs.Apply(labels, 0, false)
labels = promrelabel.RemoveMetaLabels(labels[:0], labels)
@ -1001,6 +982,24 @@ func (swc *scrapeWorkConfig) getScrapeWork(target string, extraLabels, metaLabel
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/825 for details.
labels = append([]prompbmarshal.Label{}, labels...)
// Verify whether the scrape work must be skipped because of `-promscrape.cluster.*` configs.
// Perform the verification on labels after the relabeling in order to guarantee that targets with the same set of labels
// go to the same vmagent shard.
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1687#issuecomment-940629495
if *clusterMembersCount > 1 {
bb := scrapeWorkKeyBufPool.Get()
bb.B = appendScrapeWorkKey(bb.B[:0], labels)
needSkip := needSkipScrapeWork(bytesutil.ToUnsafeString(bb.B), *clusterMembersCount, *clusterReplicationFactor, *clusterMemberNum)
scrapeWorkKeyBufPool.Put(bb)
if needSkip {
return nil, nil
}
}
if !*dropOriginalLabels {
promrelabel.SortLabels(originalLabels)
// Reduce memory usage by interning all the strings in originalLabels.
internLabelStrings(originalLabels)
}
if len(labels) == 0 {
// Drop target without labels.
droppedTargetsMap.Register(originalLabels)

View file

@ -128,9 +128,10 @@ scrape_configs:
sws := cfg.getStaticScrapeWork()
resetNonEssentialFields(sws)
swsExpected := []*ScrapeWork{{
ScrapeURL: "http://black:9115/probe?module=dns_udp_example&target=8.8.8.8",
ScrapeInterval: defaultScrapeInterval,
ScrapeTimeout: defaultScrapeTimeout,
ScrapeURL: "http://black:9115/probe?module=dns_udp_example&target=8.8.8.8",
ScrapeInterval: defaultScrapeInterval,
ScrapeTimeout: defaultScrapeTimeout,
HonorTimestamps: true,
Labels: []prompbmarshal.Label{
{
Name: "__address__",
@ -574,8 +575,7 @@ scrape_configs:
ScrapeURL: "http://host1:80/abc/de",
ScrapeInterval: defaultScrapeInterval,
ScrapeTimeout: defaultScrapeTimeout,
HonorLabels: false,
HonorTimestamps: false,
HonorTimestamps: true,
Labels: []prompbmarshal.Label{
{
Name: "__address__",
@ -622,8 +622,7 @@ scrape_configs:
ScrapeURL: "http://host2:80/abc/de",
ScrapeInterval: defaultScrapeInterval,
ScrapeTimeout: defaultScrapeTimeout,
HonorLabels: false,
HonorTimestamps: false,
HonorTimestamps: true,
Labels: []prompbmarshal.Label{
{
Name: "__address__",
@ -670,8 +669,7 @@ scrape_configs:
ScrapeURL: "http://localhost:9090/abc/de",
ScrapeInterval: defaultScrapeInterval,
ScrapeTimeout: defaultScrapeTimeout,
HonorLabels: false,
HonorTimestamps: false,
HonorTimestamps: true,
Labels: []prompbmarshal.Label{
{
Name: "__address__",
@ -740,8 +738,7 @@ scrape_configs:
ScrapeURL: "http://foo.bar:1234/metrics",
ScrapeInterval: defaultScrapeInterval,
ScrapeTimeout: defaultScrapeTimeout,
HonorLabels: false,
HonorTimestamps: false,
HonorTimestamps: true,
Labels: []prompbmarshal.Label{
{
Name: "__address__",
@ -791,8 +788,7 @@ scrape_configs:
ScrapeURL: "http://foo.bar:1234/metrics",
ScrapeInterval: defaultScrapeInterval,
ScrapeTimeout: defaultScrapeTimeout,
HonorLabels: false,
HonorTimestamps: false,
HonorTimestamps: true,
Labels: []prompbmarshal.Label{
{
Name: "__address__",
@ -847,7 +843,7 @@ scrape_configs:
metrics_path: /foo/bar
scheme: https
honor_labels: true
honor_timestamps: true
honor_timestamps: false
follow_redirects: false
params:
p: ["x&y", "="]
@ -873,7 +869,7 @@ scrape_configs:
ScrapeInterval: 54 * time.Second,
ScrapeTimeout: 5 * time.Second,
HonorLabels: true,
HonorTimestamps: true,
HonorTimestamps: false,
DenyRedirects: true,
Labels: []prompbmarshal.Label{
{
@ -923,7 +919,7 @@ scrape_configs:
ScrapeInterval: 54 * time.Second,
ScrapeTimeout: 5 * time.Second,
HonorLabels: true,
HonorTimestamps: true,
HonorTimestamps: false,
DenyRedirects: true,
Labels: []prompbmarshal.Label{
{
@ -969,9 +965,10 @@ scrape_configs:
jobNameOriginal: "foo",
},
{
ScrapeURL: "http://1.2.3.4:80/metrics",
ScrapeInterval: 8 * time.Second,
ScrapeTimeout: 8 * time.Second,
ScrapeURL: "http://1.2.3.4:80/metrics",
ScrapeInterval: 8 * time.Second,
ScrapeTimeout: 8 * time.Second,
HonorTimestamps: true,
Labels: []prompbmarshal.Label{
{
Name: "__address__",
@ -1010,9 +1007,10 @@ scrape_configs:
jobNameOriginal: "qwer",
},
{
ScrapeURL: "http://foobar:80/metrics",
ScrapeInterval: 8 * time.Second,
ScrapeTimeout: 8 * time.Second,
ScrapeURL: "http://foobar:80/metrics",
ScrapeInterval: 8 * time.Second,
ScrapeTimeout: 8 * time.Second,
HonorTimestamps: true,
Labels: []prompbmarshal.Label{
{
Name: "__address__",
@ -1085,9 +1083,10 @@ scrape_configs:
- targets: ["foo.bar:1234", "drop-this-target"]
`, []*ScrapeWork{
{
ScrapeURL: "http://foo.bar:1234/metrics?x=keep_me",
ScrapeInterval: defaultScrapeInterval,
ScrapeTimeout: defaultScrapeTimeout,
ScrapeURL: "http://foo.bar:1234/metrics?x=keep_me",
ScrapeInterval: defaultScrapeInterval,
ScrapeTimeout: defaultScrapeTimeout,
HonorTimestamps: true,
Labels: []prompbmarshal.Label{
{
Name: "__address__",
@ -1164,9 +1163,10 @@ scrape_configs:
- targets: ["foo.bar:1234"]
`, []*ScrapeWork{
{
ScrapeURL: "mailto://foo.bar:1234/abc.de?a=b",
ScrapeInterval: defaultScrapeInterval,
ScrapeTimeout: defaultScrapeTimeout,
ScrapeURL: "mailto://foo.bar:1234/abc.de?a=b",
ScrapeInterval: defaultScrapeInterval,
ScrapeTimeout: defaultScrapeTimeout,
HonorTimestamps: true,
Labels: []prompbmarshal.Label{
{
Name: "__address__",
@ -1228,9 +1228,10 @@ scrape_configs:
- targets: ["foo.bar:1234", "xyz"]
`, []*ScrapeWork{
{
ScrapeURL: "http://foo.bar:1234/metrics",
ScrapeInterval: defaultScrapeInterval,
ScrapeTimeout: defaultScrapeTimeout,
ScrapeURL: "http://foo.bar:1234/metrics",
ScrapeInterval: defaultScrapeInterval,
ScrapeTimeout: defaultScrapeTimeout,
HonorTimestamps: true,
Labels: []prompbmarshal.Label{
{
Name: "__address__",
@ -1261,9 +1262,10 @@ scrape_configs:
- targets: ["foo.bar:1234"]
`, []*ScrapeWork{
{
ScrapeURL: "http://foo.bar:1234/metrics",
ScrapeInterval: defaultScrapeInterval,
ScrapeTimeout: defaultScrapeTimeout,
ScrapeURL: "http://foo.bar:1234/metrics",
ScrapeInterval: defaultScrapeInterval,
ScrapeTimeout: defaultScrapeTimeout,
HonorTimestamps: true,
Labels: []prompbmarshal.Label{
{
Name: "__address__",
@ -1310,9 +1312,10 @@ scrape_configs:
- targets: ["foo.bar:1234"]
`, []*ScrapeWork{
{
ScrapeURL: "http://foo.bar:1234/metrics",
ScrapeInterval: defaultScrapeInterval,
ScrapeTimeout: defaultScrapeTimeout,
ScrapeURL: "http://foo.bar:1234/metrics",
ScrapeInterval: defaultScrapeInterval,
ScrapeTimeout: defaultScrapeTimeout,
HonorTimestamps: true,
Labels: []prompbmarshal.Label{
{
Name: "__address__",
@ -1355,9 +1358,10 @@ scrape_configs:
- targets: ["foo.bar:1234"]
`, []*ScrapeWork{
{
ScrapeURL: "http://foo.bar:1234/metrics",
ScrapeInterval: defaultScrapeInterval,
ScrapeTimeout: defaultScrapeTimeout,
ScrapeURL: "http://foo.bar:1234/metrics",
ScrapeInterval: defaultScrapeInterval,
ScrapeTimeout: defaultScrapeTimeout,
HonorTimestamps: true,
Labels: []prompbmarshal.Label{
{
Name: "__address__",
@ -1414,9 +1418,10 @@ scrape_configs:
job: yyy
`, []*ScrapeWork{
{
ScrapeURL: "http://pp:80/metrics?a=c&a=xy",
ScrapeInterval: defaultScrapeInterval,
ScrapeTimeout: defaultScrapeTimeout,
ScrapeURL: "http://pp:80/metrics?a=c&a=xy",
ScrapeInterval: defaultScrapeInterval,
ScrapeTimeout: defaultScrapeTimeout,
HonorTimestamps: true,
Labels: []prompbmarshal.Label{
{
Name: "__address__",
@ -1491,9 +1496,10 @@ scrape_configs:
replacement: true
`, []*ScrapeWork{
{
ScrapeURL: "http://127.0.0.1:9116/snmp?module=if_mib&target=192.168.1.2",
ScrapeInterval: defaultScrapeInterval,
ScrapeTimeout: defaultScrapeTimeout,
ScrapeURL: "http://127.0.0.1:9116/snmp?module=if_mib&target=192.168.1.2",
ScrapeInterval: defaultScrapeInterval,
ScrapeTimeout: defaultScrapeTimeout,
HonorTimestamps: true,
Labels: []prompbmarshal.Label{
{
Name: "__address__",
@ -1562,9 +1568,10 @@ scrape_configs:
target_label: __metrics_path__
`, []*ScrapeWork{
{
ScrapeURL: "http://foo.bar:1234/metricspath",
ScrapeInterval: defaultScrapeInterval,
ScrapeTimeout: defaultScrapeTimeout,
ScrapeURL: "http://foo.bar:1234/metricspath",
ScrapeInterval: defaultScrapeInterval,
ScrapeTimeout: defaultScrapeTimeout,
HonorTimestamps: true,
Labels: []prompbmarshal.Label{
{
Name: "__address__",

View file

@ -5,6 +5,7 @@ import (
"fmt"
"io"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discoveryutils"
)
@ -103,6 +104,14 @@ func (eps *Endpoints) getTargetLabels(gw *groupWatcher) []map[string]string {
ms = appendEndpointLabelsForAddresses(ms, gw, podPortsSeen, eps, ess.NotReadyAddresses, epp, svc, "false")
}
}
// See https://kubernetes.io/docs/reference/labels-annotations-taints/#endpoints-kubernetes-io-over-capacity
// and https://github.com/kubernetes/kubernetes/pull/99975
switch eps.Metadata.Annotations.GetByName("endpoints.kubernetes.io/over-capacity") {
case "truncated":
logger.Warnf(`the number of targets for "role: endpoints" %q exceeds 1000 and has been truncated; please use "role: endpointslice" instead`, eps.Metadata.key())
case "warning":
logger.Warnf(`the number of targets for "role: endpoints" %q exceeds 1000 and will be truncated in the next k8s releases; please use "role: endpointslice" instead`, eps.Metadata.key())
}
// Append labels for skipped ports on seen pods.
portSeen := func(port int, ports []int) bool {

View file

@ -33,6 +33,16 @@ func JoinHostPort(host string, port int) string {
// SortedLabels represents sorted labels.
type SortedLabels []prompbmarshal.Label
// GetByName returns the label with the given name from sls.
func (sls *SortedLabels) GetByName(name string) string {
for _, lb := range *sls {
if lb.Name == name {
return lb.Value
}
}
return ""
}
// UnmarshalJSON unmarshals JSON from data.
func (sls *SortedLabels) UnmarshalJSON(data []byte) error {
var m map[string]string

View file

@ -4,6 +4,7 @@ import (
"bytes"
"flag"
"fmt"
"io"
"sync"
"sync/atomic"
"time"
@ -70,8 +71,22 @@ var (
// PendingScrapeConfigs - zero value means, that
// all scrapeConfigs are inited and ready for work.
PendingScrapeConfigs int32
// configData contains -promscrape.config data
configData atomic.Value
)
// WriteConfigData writes -promscrape.config contents to w
func WriteConfigData(w io.Writer) {
v := configData.Load()
if v == nil {
// Nothing to write to w
return
}
b := v.(*[]byte)
_, _ = w.Write(*b)
}
func runScraper(configFile string, pushData func(wr *prompbmarshal.WriteRequest), globalStopCh <-chan struct{}) {
if configFile == "" {
// Nothing to scrape.
@ -89,6 +104,7 @@ func runScraper(configFile string, pushData func(wr *prompbmarshal.WriteRequest)
logger.Fatalf("cannot read %q: %s", configFile, err)
}
data := cfg.marshal()
configData.Store(&data)
cfg.mustStart()
scs := newScrapeConfigs(pushData)
@ -132,6 +148,7 @@ func runScraper(configFile string, pushData func(wr *prompbmarshal.WriteRequest)
cfgNew.mustStart()
cfg = cfgNew
data = dataNew
configData.Store(&data)
case <-tickerCh:
cfgNew, err := loadConfig(configFile)
if err != nil {
@ -147,6 +164,7 @@ func runScraper(configFile string, pushData func(wr *prompbmarshal.WriteRequest)
cfgNew.mustStart()
cfg = cfgNew
data = dataNew
configData.Store(&data)
case <-globalStopCh:
cfg.mustStop()
logger.Infof("stopping Prometheus scrapers")

View file

@ -13,6 +13,8 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bloomfilter"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/decimal"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/leveledbytebufferpool"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promauth"
@ -28,8 +30,9 @@ import (
var (
suppressScrapeErrors = flag.Bool("promscrape.suppressScrapeErrors", false, "Whether to suppress scrape errors logging. "+
"The last error for each target is always available at '/targets' page even if scrape errors logging is suppressed")
noStaleMarkers = flag.Bool("promscrape.noStaleMarkers", false, "Whether to disable sending Prometheus stale markers for metrics when scrape target disappears. This option may reduce memory usage if stale markers aren't needed for your setup. See also https://docs.victoriametrics.com/vmagent.html#stream-parsing-mode")
seriesLimitPerTarget = flag.Int("promscrape.seriesLimitPerTarget", 0, "Optional limit on the number of unique time series a single scrape target can expose. See https://docs.victoriametrics.com/vmagent.html#cardinality-limiter for more info")
noStaleMarkers = flag.Bool("promscrape.noStaleMarkers", false, "Whether to disable sending Prometheus stale markers for metrics when scrape target disappears. This option may reduce memory usage if stale markers aren't needed for your setup. This option also disables populating the scrape_series_added metric. See https://prometheus.io/docs/concepts/jobs_instances/#automatically-generated-labels-and-time-series")
seriesLimitPerTarget = flag.Int("promscrape.seriesLimitPerTarget", 0, "Optional limit on the number of unique time series a single scrape target can expose. See https://docs.victoriametrics.com/vmagent.html#cardinality-limiter for more info")
minResponseSizeForStreamParse = flagutil.NewBytes("promscrape.minResponseSizeForStreamParse", 1e6, "The minimum target response size for automatic switching to stream parsing mode, which can reduce memory usage. See https://docs.victoriametrics.com/vmagent.html#stream-parsing-mode")
)
// ScrapeWork represents a unit of work for scraping Prometheus metrics.
@ -115,6 +118,12 @@ type ScrapeWork struct {
jobNameOriginal string
}
func (sw *ScrapeWork) canSwitchToStreamParseMode() bool {
// Deny switching to stream parse mode if `sample_limit` or `series_limit` options are set,
// since these limits cannot be applied in stream parsing mode.
return sw.SampleLimit <= 0 && sw.SeriesLimit <= 0
}
// key returns unique identifier for the given sw.
//
// it can be used for comparing for equality for two ScrapeWork objects.
@ -190,6 +199,9 @@ type scrapeWork struct {
// Optional limiter on the number of unique series per scrape target.
seriesLimiter *bloomfilter.Limiter
// Optional counter on the number of dropped samples if the limit on the number of unique series is set.
seriesLimiterRowsDroppedTotal *metrics.Counter
// prevBodyLen contains the previous response body length for the given scrape work.
// It is used as a hint in order to reduce memory usage for body buffers.
prevBodyLen int
@ -199,7 +211,48 @@ type scrapeWork struct {
prevLabelsLen int
// lastScrape holds the last response from scrape target.
// It is used for staleness tracking and for populating scrape_series_added metric.
// The lastScrape isn't populated if -promscrape.noStaleMarkers is set. This reduces memory usage.
lastScrape []byte
// lastScrapeCompressed is used for storing the compressed lastScrape between scrapes
// in stream parsing mode in order to reduce memory usage when the lastScrape size
// equals to or exceeds -promscrape.minResponseSizeForStreamParse
lastScrapeCompressed []byte
}
func (sw *scrapeWork) loadLastScrape() string {
if len(sw.lastScrapeCompressed) > 0 {
b, err := encoding.DecompressZSTD(sw.lastScrape[:0], sw.lastScrapeCompressed)
if err != nil {
logger.Panicf("BUG: cannot unpack compressed previous response: %s", err)
}
sw.lastScrape = b
}
return bytesutil.ToUnsafeString(sw.lastScrape)
}
func (sw *scrapeWork) storeLastScrape(lastScrape []byte) {
mustCompress := minResponseSizeForStreamParse.N > 0 && len(lastScrape) >= minResponseSizeForStreamParse.N
if mustCompress {
sw.lastScrapeCompressed = encoding.CompressZSTDLevel(sw.lastScrapeCompressed[:0], lastScrape, 1)
sw.lastScrape = nil
} else {
sw.lastScrape = append(sw.lastScrape[:0], lastScrape...)
sw.lastScrapeCompressed = nil
}
}
func (sw *scrapeWork) finalizeLastScrape() {
if len(sw.lastScrapeCompressed) > 0 {
// The compressed lastScrape is available in sw.lastScrapeCompressed.
// Release the memory occupied by sw.lastScrape, so it won't be occupied between scrapes.
sw.lastScrape = nil
}
if len(sw.lastScrape) > 0 {
// Release the memory occupied by sw.lastScrapeCompressed, so it won't be occupied between scrapes.
sw.lastScrapeCompressed = nil
}
}
func (sw *scrapeWork) run(stopCh <-chan struct{}) {
@ -251,8 +304,16 @@ func (sw *scrapeWork) run(stopCh <-chan struct{}) {
select {
case <-stopCh:
t := time.Now().UnixNano() / 1e6
sw.sendStaleSeries("", t, true)
lastScrape := sw.loadLastScrape()
sw.sendStaleSeries(lastScrape, "", t, true)
if sw.seriesLimiter != nil {
job := sw.Config.Job()
metrics.UnregisterMetric(fmt.Sprintf(`promscrape_series_limit_rows_dropped_total{scrape_job_original=%q,scrape_job=%q,scrape_target=%q}`,
sw.Config.jobNameOriginal, job, sw.Config.ScrapeURL))
metrics.UnregisterMetric(fmt.Sprintf(`promscrape_series_limit_max_series{scrape_job_original=%q,scrape_job=%q,scrape_target=%q}`,
sw.Config.jobNameOriginal, job, sw.Config.ScrapeURL))
metrics.UnregisterMetric(fmt.Sprintf(`promscrape_series_limit_current_series{scrape_job_original=%q,scrape_job=%q,scrape_target=%q}`,
sw.Config.jobNameOriginal, job, sw.Config.ScrapeURL))
sw.seriesLimiter.MustStop()
}
return
@ -290,10 +351,17 @@ var (
pushDataDuration = metrics.NewHistogram("vm_promscrape_push_data_duration_seconds")
)
func (sw *scrapeWork) mustSwitchToStreamParseMode(responseSize int) bool {
if minResponseSizeForStreamParse.N <= 0 {
return false
}
return sw.Config.canSwitchToStreamParseMode() && responseSize >= minResponseSizeForStreamParse.N
}
func (sw *scrapeWork) scrapeInternal(scrapeTimestamp, realTimestamp int64) error {
if *streamParse || sw.Config.StreamParse {
if *streamParse || sw.Config.StreamParse || sw.mustSwitchToStreamParseMode(sw.prevBodyLen) {
// Read data from scrape targets in streaming manner.
// This case is optimized for targets exposing millions and more of metrics per target.
// This case is optimized for targets exposing more than ten thousand of metrics per target.
return sw.scrapeStream(scrapeTimestamp, realTimestamp)
}
@ -309,9 +377,9 @@ func (sw *scrapeWork) scrapeInternal(scrapeTimestamp, realTimestamp int64) error
scrapeResponseSize.Update(float64(len(body.B)))
up := 1
wc := writeRequestCtxPool.Get(sw.prevLabelsLen)
lastScrape := sw.loadLastScrape()
bodyString := bytesutil.ToUnsafeString(body.B)
lastScrape := bytesutil.ToUnsafeString(sw.lastScrape)
areIdenticalSeries := parser.AreIdenticalSeriesFast(lastScrape, bodyString)
areIdenticalSeries := *noStaleMarkers || parser.AreIdenticalSeriesFast(lastScrape, bodyString)
if err != nil {
up = 0
scrapesFailed.Inc()
@ -340,7 +408,7 @@ func (sw *scrapeWork) scrapeInternal(scrapeTimestamp, realTimestamp int64) error
// The returned value for seriesAdded may be bigger than the real number of added series
// if some series were removed during relabeling.
// This is a trade-off between performance and accuracy.
seriesAdded = sw.getSeriesAdded(bodyString)
seriesAdded = sw.getSeriesAdded(lastScrape, bodyString)
}
if sw.seriesLimitExceeded || !areIdenticalSeries {
if sw.applySeriesLimit(wc) {
@ -355,15 +423,25 @@ func (sw *scrapeWork) scrapeInternal(scrapeTimestamp, realTimestamp int64) error
sw.addAutoTimeseries(wc, "scrape_timeout_seconds", sw.Config.ScrapeTimeout.Seconds(), scrapeTimestamp)
sw.pushData(&wc.writeRequest)
sw.prevLabelsLen = len(wc.labels)
sw.prevBodyLen = len(bodyString)
wc.reset()
writeRequestCtxPool.Put(wc)
// body must be released only after wc is released, since wc refers to body.
sw.prevBodyLen = len(body.B)
if !areIdenticalSeries {
sw.sendStaleSeries(bodyString, scrapeTimestamp, false)
mustSwitchToStreamParse := sw.mustSwitchToStreamParseMode(len(bodyString))
if !mustSwitchToStreamParse {
// Return wc to the pool if the parsed response size was smaller than -promscrape.minResponseSizeForStreamParse
// This should reduce memory usage when scraping targets with big responses.
writeRequestCtxPool.Put(wc)
}
// body must be released only after wc is released, since wc refers to body.
if !areIdenticalSeries {
sw.sendStaleSeries(lastScrape, bodyString, scrapeTimestamp, false)
sw.storeLastScrape(body.B)
}
sw.finalizeLastScrape()
if !mustSwitchToStreamParse {
// Return wc to the pool only if its size is smaller than -promscrape.minResponseSizeForStreamParse
// This should reduce memory usage when scraping targets which return big responses.
leveledbytebufferpool.Put(body)
}
sw.lastScrape = append(sw.lastScrape[:0], bodyString...)
leveledbytebufferpool.Put(body)
tsmGlobal.Update(sw.Config, sw.ScrapeGroup, up == 1, realTimestamp, int64(duration*1000), samplesScraped, err)
return err
}
@ -374,18 +452,38 @@ func (sw *scrapeWork) pushData(wr *prompbmarshal.WriteRequest) {
pushDataDuration.UpdateDuration(startTime)
}
type streamBodyReader struct {
sr *streamReader
body []byte
bodyLen int
captureBody bool
}
func (sbr *streamBodyReader) Read(b []byte) (int, error) {
n, err := sbr.sr.Read(b)
sbr.bodyLen += n
if sbr.captureBody {
sbr.body = append(sbr.body, b[:n]...)
}
return n, err
}
func (sw *scrapeWork) scrapeStream(scrapeTimestamp, realTimestamp int64) error {
samplesScraped := 0
samplesPostRelabeling := 0
responseSize := int64(0)
wc := writeRequestCtxPool.Get(sw.prevLabelsLen)
// Do not pool sbr and do not pre-allocate sbr.body in order to reduce memory usage when scraping big responses.
sbr := &streamBodyReader{
captureBody: !*noStaleMarkers,
}
sr, err := sw.GetStreamReader()
if err != nil {
err = fmt.Errorf("cannot read data: %s", err)
} else {
var mu sync.Mutex
err = parser.ParseStream(sr, scrapeTimestamp, false, func(rows []parser.Row) error {
sbr.sr = sr
err = parser.ParseStream(sbr, scrapeTimestamp, false, func(rows []parser.Row) error {
mu.Lock()
defer mu.Unlock()
samplesScraped += len(rows)
@ -406,15 +504,17 @@ func (sw *scrapeWork) scrapeStream(scrapeTimestamp, realTimestamp int64) error {
wc.resetNoRows()
return nil
}, sw.logError)
responseSize = sr.bytesRead
sr.MustClose()
}
lastScrape := sw.loadLastScrape()
bodyString := bytesutil.ToUnsafeString(sbr.body)
areIdenticalSeries := *noStaleMarkers || parser.AreIdenticalSeriesFast(lastScrape, bodyString)
scrapedSamples.Update(float64(samplesScraped))
endTimestamp := time.Now().UnixNano() / 1e6
duration := float64(endTimestamp-realTimestamp) / 1e3
scrapeDuration.Update(duration)
scrapeResponseSize.Update(float64(responseSize))
scrapeResponseSize.Update(float64(sbr.bodyLen))
up := 1
if err != nil {
if samplesScraped == 0 {
@ -422,18 +522,29 @@ func (sw *scrapeWork) scrapeStream(scrapeTimestamp, realTimestamp int64) error {
}
scrapesFailed.Inc()
}
seriesAdded := 0
if !areIdenticalSeries {
// The returned value for seriesAdded may be bigger than the real number of added series
// if some series were removed during relabeling.
// This is a trade-off between performance and accuracy.
seriesAdded = sw.getSeriesAdded(lastScrape, bodyString)
}
sw.addAutoTimeseries(wc, "up", float64(up), scrapeTimestamp)
sw.addAutoTimeseries(wc, "scrape_duration_seconds", duration, scrapeTimestamp)
sw.addAutoTimeseries(wc, "scrape_samples_scraped", float64(samplesScraped), scrapeTimestamp)
sw.addAutoTimeseries(wc, "scrape_samples_post_metric_relabeling", float64(samplesPostRelabeling), scrapeTimestamp)
// scrape_series_added isn't calculated in streaming mode,
// since it may need unlimited amounts of memory when scraping targets with millions of exposed metrics.
sw.addAutoTimeseries(wc, "scrape_series_added", 0, scrapeTimestamp)
sw.addAutoTimeseries(wc, "scrape_series_added", float64(seriesAdded), scrapeTimestamp)
sw.addAutoTimeseries(wc, "scrape_timeout_seconds", sw.Config.ScrapeTimeout.Seconds(), scrapeTimestamp)
sw.pushData(&wc.writeRequest)
sw.prevLabelsLen = len(wc.labels)
sw.prevBodyLen = sbr.bodyLen
wc.reset()
writeRequestCtxPool.Put(wc)
if !areIdenticalSeries {
sw.sendStaleSeries(lastScrape, bodyString, scrapeTimestamp, false)
sw.storeLastScrape(sbr.body)
}
sw.finalizeLastScrape()
tsmGlobal.Update(sw.Config, sw.ScrapeGroup, up == 1, realTimestamp, int64(duration*1000), samplesScraped, err)
// Do not track active series in streaming mode, since this may need too big amounts of memory
// when the target exports too big number of metrics.
@ -506,11 +617,10 @@ func (wc *writeRequestCtx) resetNoRows() {
var writeRequestCtxPool leveledWriteRequestCtxPool
func (sw *scrapeWork) getSeriesAdded(currScrape string) int {
func (sw *scrapeWork) getSeriesAdded(lastScrape, currScrape string) int {
if currScrape == "" {
return 0
}
lastScrape := bytesutil.ToUnsafeString(sw.lastScrape)
bodyString := parser.GetRowsDiff(currScrape, lastScrape)
return strings.Count(bodyString, "\n")
}
@ -521,22 +631,31 @@ func (sw *scrapeWork) applySeriesLimit(wc *writeRequestCtx) bool {
seriesLimit = sw.Config.SeriesLimit
}
if sw.seriesLimiter == nil && seriesLimit > 0 {
job := sw.Config.Job()
sw.seriesLimiter = bloomfilter.NewLimiter(seriesLimit, 24*time.Hour)
sw.seriesLimiterRowsDroppedTotal = metrics.GetOrCreateCounter(fmt.Sprintf(`promscrape_series_limit_rows_dropped_total{scrape_job_original=%q,scrape_job=%q,scrape_target=%q}`,
sw.Config.jobNameOriginal, job, sw.Config.ScrapeURL))
_ = metrics.GetOrCreateGauge(fmt.Sprintf(`promscrape_series_limit_max_series{scrape_job_original=%q,scrape_job=%q,scrape_target=%q}`,
sw.Config.jobNameOriginal, job, sw.Config.ScrapeURL), func() float64 {
return float64(sw.seriesLimiter.MaxItems())
})
_ = metrics.GetOrCreateGauge(fmt.Sprintf(`promscrape_series_limit_current_series{scrape_job_original=%q,scrape_job=%q,scrape_target=%q}`,
sw.Config.jobNameOriginal, job, sw.Config.ScrapeURL), func() float64 {
return float64(sw.seriesLimiter.CurrentItems())
})
}
hsl := sw.seriesLimiter
if hsl == nil {
return false
}
dstSeries := wc.writeRequest.Timeseries[:0]
job := sw.Config.Job()
limitExceeded := false
for _, ts := range wc.writeRequest.Timeseries {
h := sw.getLabelsHash(ts.Labels)
if !hsl.Add(h) {
// The limit on the number of hourly unique series per scrape target has been exceeded.
// Drop the metric.
metrics.GetOrCreateCounter(fmt.Sprintf(`promscrape_series_limit_rows_dropped_total{scrape_job_original=%q,scrape_job=%q,scrape_target=%q}`,
sw.Config.jobNameOriginal, job, sw.Config.ScrapeURL)).Inc()
sw.seriesLimiterRowsDroppedTotal.Inc()
limitExceeded = true
continue
}
@ -546,11 +665,10 @@ func (sw *scrapeWork) applySeriesLimit(wc *writeRequestCtx) bool {
return limitExceeded
}
func (sw *scrapeWork) sendStaleSeries(currScrape string, timestamp int64, addAutoSeries bool) {
func (sw *scrapeWork) sendStaleSeries(lastScrape, currScrape string, timestamp int64, addAutoSeries bool) {
if *noStaleMarkers {
return
}
lastScrape := bytesutil.ToUnsafeString(sw.lastScrape)
bodyString := lastScrape
if currScrape != "" {
bodyString = parser.GetRowsDiff(lastScrape, currScrape)

Some files were not shown because too many files have changed in this diff Show more