mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2025-02-19 15:30:17 +00:00
Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files
This commit is contained in:
commit
eba0e6dbc0
753 changed files with 40630 additions and 12116 deletions
2
.github/workflows/check-licenses.yml
vendored
2
.github/workflows/check-licenses.yml
vendored
|
@ -17,7 +17,7 @@ jobs:
|
|||
- name: Setup Go
|
||||
uses: actions/setup-go@main
|
||||
with:
|
||||
go-version: 1.20.3
|
||||
go-version: 1.20.4
|
||||
id: go
|
||||
- name: Code checkout
|
||||
uses: actions/checkout@master
|
||||
|
|
2
.github/workflows/codeql-analysis.yml
vendored
2
.github/workflows/codeql-analysis.yml
vendored
|
@ -57,7 +57,7 @@ jobs:
|
|||
- name: Set up Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: 1.20.3
|
||||
go-version: 1.20.4
|
||||
check-latest: true
|
||||
cache: true
|
||||
if: ${{ matrix.language == 'go' }}
|
||||
|
|
6
.github/workflows/main.yml
vendored
6
.github/workflows/main.yml
vendored
|
@ -32,7 +32,7 @@ jobs:
|
|||
- name: Setup Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: 1.20.3
|
||||
go-version: 1.20.4
|
||||
check-latest: true
|
||||
cache: true
|
||||
|
||||
|
@ -56,7 +56,7 @@ jobs:
|
|||
- name: Setup Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: 1.20.3
|
||||
go-version: 1.20.4
|
||||
check-latest: true
|
||||
cache: true
|
||||
|
||||
|
@ -81,7 +81,7 @@ jobs:
|
|||
id: go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: 1.20.3
|
||||
go-version: 1.20.4
|
||||
check-latest: true
|
||||
cache: true
|
||||
|
||||
|
|
22
README.md
22
README.md
|
@ -303,14 +303,18 @@ Prometheus doesn't drop data during VictoriaMetrics restart. See [this article](
|
|||
## vmui
|
||||
|
||||
VictoriaMetrics provides UI for query troubleshooting and exploration. The UI is available at `http://victoriametrics:8428/vmui`.
|
||||
The UI allows exploring query results via graphs and tables.
|
||||
It also provides the following features:
|
||||
The UI allows exploring query results via graphs and tables. It also provides the following features:
|
||||
|
||||
- [metrics explorer](#metrics-explorer)
|
||||
- [cardinality explorer](#cardinality-explorer)
|
||||
- [query tracer](#query-tracing)
|
||||
- [top queries explorer](#top-queries)
|
||||
|
||||
VMUI automatically switches from graph view to heatmap view when the query returns [histogram](https://docs.victoriametrics.com/keyConcepts.html#histogram) buckets
|
||||
(both [Prometheus histograms](https://prometheus.io/docs/concepts/metric_types/#histogram)
|
||||
and [VictoriaMetrics histograms](https://valyala.medium.com/improving-histogram-usability-for-prometheus-and-grafana-bc7e5df0e350) are supported).
|
||||
Try, for example, [this query](https://play.victoriametrics.com/select/accounting/1/6a716b0f-38bc-4856-90ce-448fd713e3fe/prometheus/graph/#/?g0.expr=sum%28rate%28vm_promscrape_scrape_duration_seconds_bucket%29%29+by+%28vmrange%29&g0.range_input=24h&g0.end_input=2023-04-10T17%3A46%3A12&g0.relative_time=last_24_hours&g0.step_input=31m).
|
||||
|
||||
Graphs in `vmui` support scrolling and zooming:
|
||||
|
||||
* Select the needed time range on the graph in order to zoom in into the selected time range. Hold `ctrl` (or `cmd` on MacOS) and scroll down in order to zoom out.
|
||||
|
@ -321,9 +325,12 @@ Query history can be navigated by holding `Ctrl` (or `Cmd` on MacOS) and pressin
|
|||
|
||||
Multi-line queries can be entered by pressing `Shift-Enter` in query input field.
|
||||
|
||||
When querying the [backfilled data](https://docs.victoriametrics.com/#backfilling) or during [query troubleshooting](https://docs.victoriametrics.com/Troubleshooting.html#unexpected-query-results), it may be useful disabling response cache by clicking `Disable cache` checkbox.
|
||||
When querying the [backfilled data](https://docs.victoriametrics.com/#backfilling)
|
||||
or during [query troubleshooting](https://docs.victoriametrics.com/Troubleshooting.html#unexpected-query-results),
|
||||
it may be useful disabling response cache by clicking `Disable cache` checkbox.
|
||||
|
||||
VMUI automatically adjusts the interval between datapoints on the graph depending on the horizontal resolution and on the selected time range. The step value can be customized by changing `Step value` input.
|
||||
VMUI automatically adjusts the interval between datapoints on the graph depending on the horizontal resolution and on the selected time range.
|
||||
The step value can be customized by changing `Step value` input.
|
||||
|
||||
VMUI allows investigating correlations between multiple queries on the same graph. Just click `Add Query` button,
|
||||
enter an additional query in the newly appeared input field and press `Enter`.
|
||||
|
@ -565,7 +572,8 @@ The `/api/v1/export` endpoint should return the following response:
|
|||
```
|
||||
|
||||
Note that InfluxDB line protocol expects [timestamps in *nanoseconds* by default](https://docs.influxdata.com/influxdb/v1.7/write_protocols/line_protocol_tutorial/#timestamp),
|
||||
while VictoriaMetrics stores them with *milliseconds* precision.
|
||||
while VictoriaMetrics stores them with *milliseconds* precision. It is allowed to ingest timestamps with seconds,
|
||||
microseconds or nanoseconds precision - VictoriaMetrics will automatically convert them to milliseconds.
|
||||
|
||||
Extra labels may be added to all the written time series by passing `extra_label=name=value` query args.
|
||||
For example, `/write?extra_label=foo=bar` would add `{foo="bar"}` label to all the ingested metrics.
|
||||
|
@ -2169,7 +2177,7 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
|||
|
||||
```
|
||||
-bigMergeConcurrency int
|
||||
The maximum number of CPU cores to use for big merges. Default value is used if set to 0
|
||||
Deprecated: this flag does nothing. Please use -smallMergeConcurrency for controlling the concurrency of background merges. See https://docs.victoriametrics.com/#storage
|
||||
-cacheExpireDuration duration
|
||||
Items are removed from in-memory caches after they aren't accessed for this duration. Lower values may reduce memory usage at the cost of higher CPU usage. See also -prevCacheRemovalPercent (default 30m0s)
|
||||
-configAuthKey string
|
||||
|
@ -2519,7 +2527,7 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
|||
-selfScrapeJob string
|
||||
Value for 'job' label, which is added to self-scraped metrics (default "victoria-metrics")
|
||||
-smallMergeConcurrency int
|
||||
The maximum number of CPU cores to use for small merges. Default value is used if set to 0
|
||||
The maximum number of workers for background merges. See https://docs.victoriametrics.com/#storage . It isn't recommended tuning this flag in general case, since this may lead to uncontrolled increase in the number of parts and increased CPU usage during queries
|
||||
-snapshotAuthKey string
|
||||
authKey, which must be passed in query string to /snapshot* pages
|
||||
-snapshotCreateTimeout duration
|
||||
|
|
|
@ -1351,6 +1351,8 @@ See the docs at https://docs.victoriametrics.com/vmagent.html .
|
|||
Wait time used by Consul service discovery. Default value is used if not set
|
||||
-promscrape.consulSDCheckInterval duration
|
||||
Interval for checking for changes in Consul. This works only if consul_sd_configs is configured in '-promscrape.config' file. See https://docs.victoriametrics.com/sd_configs.html#consul_sd_configs for details (default 30s)
|
||||
-promscrape.consulagentSDCheckInterval duration
|
||||
Interval for checking for changes in Consul Agent. This works only if consulagent_sd_configs is configured in '-promscrape.config' file. See https://docs.victoriametrics.com/sd_configs.html#consulagent_sd_configs for details (default 30s)
|
||||
-promscrape.digitaloceanSDCheckInterval duration
|
||||
Interval for checking for changes in digital ocean. This works only if digitalocean_sd_configs is configured in '-promscrape.config' file. See https://docs.victoriametrics.com/sd_configs.html#digitalocean_sd_configs for details (default 1m0s)
|
||||
-promscrape.disableCompression
|
||||
|
@ -1483,7 +1485,7 @@ See the docs at https://docs.victoriametrics.com/vmagent.html .
|
|||
-remoteWrite.maxDailySeries int
|
||||
The maximum number of unique series vmagent can send to remote storage systems during the last 24 hours. Excess series are logged and dropped. This can be useful for limiting series churn rate. See https://docs.victoriametrics.com/vmagent.html#cardinality-limiter
|
||||
-remoteWrite.maxDiskUsagePerURL array
|
||||
The maximum file-based buffer size in bytes at -remoteWrite.tmpDataPath for each -remoteWrite.url. When buffer size reaches the configured maximum, then old data is dropped when adding new data to the buffer. Buffered data is stored in ~500MB chunks, so the minimum practical value for this flag is 500MB. Disk usage is unlimited if the value is set to 0
|
||||
The maximum file-based buffer size in bytes at -remoteWrite.tmpDataPath for each -remoteWrite.url. When buffer size reaches the configured maximum, then old data is dropped when adding new data to the buffer. Buffered data is stored in ~500MB chunks. It is recommended to set the value for this flag to a multiple of the block size 500MB. Disk usage is unlimited if the value is set to 0
|
||||
Supports the following optional suffixes for size values: KB, MB, GB, TB, KiB, MiB, GiB, TiB.
|
||||
Supports array of values separated by comma or specified via multiple flags.
|
||||
-remoteWrite.maxHourlySeries int
|
||||
|
|
|
@ -4,7 +4,6 @@ import (
|
|||
"flag"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
@ -48,7 +47,7 @@ var (
|
|||
"It is hidden by default, since it can contain sensitive info such as auth key")
|
||||
maxPendingBytesPerURL = flagutil.NewArrayBytes("remoteWrite.maxDiskUsagePerURL", "The maximum file-based buffer size in bytes at -remoteWrite.tmpDataPath "+
|
||||
"for each -remoteWrite.url. When buffer size reaches the configured maximum, then old data is dropped when adding new data to the buffer. "+
|
||||
"Buffered data is stored in ~500MB chunks, so the minimum practical value for this flag is 500MB. "+
|
||||
"Buffered data is stored in ~500MB chunks. It is recommended to set the value for this flag to a multiple of the block size 500MB. "+
|
||||
"Disk usage is unlimited if the value is set to 0")
|
||||
significantFigures = flagutil.NewArrayInt("remoteWrite.significantFigures", "The number of significant figures to leave in metric values before writing them "+
|
||||
"to remote storage. See https://en.wikipedia.org/wiki/Significant_figures . Zero value saves all the significant figures. "+
|
||||
|
@ -265,10 +264,7 @@ func newRemoteWriteCtxs(at *auth.Token, urls []string) []*remoteWriteCtx {
|
|||
}
|
||||
|
||||
queuesDir := filepath.Join(*tmpDataPath, persistentQueueDirname)
|
||||
files, err := os.ReadDir(queuesDir)
|
||||
if err != nil {
|
||||
logger.Fatalf("cannot read queues dir %q: %s", queuesDir, err)
|
||||
}
|
||||
files := fs.MustReadDir(queuesDir)
|
||||
removed := 0
|
||||
for _, f := range files {
|
||||
dirname := f.Name()
|
||||
|
@ -527,6 +523,11 @@ func newRemoteWriteCtx(argIdx int, at *auth.Token, remoteWriteURL *url.URL, maxI
|
|||
h := xxhash.Sum64([]byte(pqURL.String()))
|
||||
queuePath := filepath.Join(*tmpDataPath, persistentQueueDirname, fmt.Sprintf("%d_%016X", argIdx+1, h))
|
||||
maxPendingBytes := maxPendingBytesPerURL.GetOptionalArgOrDefault(argIdx, 0)
|
||||
if maxPendingBytes != 0 && maxPendingBytes < persistentqueue.DefaultChunkFileSize {
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4195
|
||||
logger.Warnf("rounding the -remoteWrite.maxDiskUsagePerURL=%d to the minimum supported value: %d", maxPendingBytes, persistentqueue.DefaultChunkFileSize)
|
||||
maxPendingBytes = persistentqueue.DefaultChunkFileSize
|
||||
}
|
||||
fq := persistentqueue.MustOpenFastQueue(queuePath, sanitizedURL, maxInmemoryBlocks, maxPendingBytes)
|
||||
_ = metrics.GetOrCreateGauge(fmt.Sprintf(`vmagent_remotewrite_pending_data_bytes{path=%q, url=%q}`, queuePath, sanitizedURL), func() float64 {
|
||||
return float64(fq.GetPendingBytes())
|
||||
|
|
|
@ -29,7 +29,8 @@ Use this feature for the following cases:
|
|||
* Recording and Alerting rules backfilling (aka `replay`). See [these docs](#rules-backfilling);
|
||||
* Lightweight and without extra dependencies.
|
||||
* Supports [reusable templates](#reusable-templates) for annotations;
|
||||
* Load of recording and alerting rules from local filesystem, GCS and S3.
|
||||
* Load of recording and alerting rules from local filesystem, URL, GCS and S3;
|
||||
* Detect alerting rules which [don't match any series](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4039).
|
||||
|
||||
## Limitations
|
||||
|
||||
|
@ -144,6 +145,15 @@ params:
|
|||
headers:
|
||||
[ <string>, ...]
|
||||
|
||||
# Optional list of HTTP headers in form `header-name: value`
|
||||
# applied for all alert notifications sent to notifiers
|
||||
# generated by rules of this group.
|
||||
# For example:
|
||||
# notifier_headers:
|
||||
# - "TenantID: foo"
|
||||
notifier_headers:
|
||||
[ <string>, ...]
|
||||
|
||||
# Optional list of labels added to every rule within a group.
|
||||
# It has priority over the external labels.
|
||||
# Labels are commonly used for adding environment
|
||||
|
@ -803,6 +813,22 @@ and vmalert will start printing additional log messages:
|
|||
2022-09-15T13:36:56.153Z DEBUG rule "TestGroup":"Conns" (2601299393013563564) at 2022-09-15T15:36:56+02:00: alert 10705778000901301787 {alertgroup="TestGroup",alertname="Conns",cluster="east-1",instance="localhost:8429",replica="a"} PENDING => FIRING: 1m0s since becoming active at 2022-09-15 15:35:56.126006 +0200 CEST m=+39.384575417
|
||||
```
|
||||
|
||||
### Never-firing alerts
|
||||
|
||||
vmalert can detect if alert's expression doesn't match any time series in runtime. This problem usually happens
|
||||
when alerting expression selects time series which aren't present in the datasource (i.e. wrong `job` label)
|
||||
or there is a typo in the series selector (i.e. `env=rpod`). Such alerting rules will be marked with special icon in
|
||||
vmalert's UI and exposed via `vmalert_alerting_rules_last_evaluation_series_fetched` metric. The metric's value will
|
||||
show how many time series were matched before the filtering by rule's expression. If metric's value is `-1`, then
|
||||
this feature is not supported by the datasource (old versions of VictoriaMetrics). The following expression can be
|
||||
used to detect rules matching no series:
|
||||
```
|
||||
max(vmalert_alerting_rules_last_evaluation_series_fetched) by(group, alertname) == 0
|
||||
```
|
||||
|
||||
See more details [here](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4039).
|
||||
This feature is available only if vmalert is using VictoriaMetrics v1.90 or higher as a datasource.
|
||||
|
||||
|
||||
## Profiling
|
||||
|
||||
|
@ -1153,8 +1179,10 @@ The shortlist of configuration flags is the following:
|
|||
Path to the files with alerting and/or recording rules.
|
||||
Supports hierarchical patterns and regexpes.
|
||||
Examples:
|
||||
-rule="/path/to/file". Path to a single file with alerting rules
|
||||
-rule="dir/*.yaml" -rule="/*.yaml" -rule="gcs://vmalert-rules/tenant_%{TENANT_ID}/prod".
|
||||
-rule="/path/to/file". Path to a single file with alerting rules.
|
||||
-rule="http://<some-server-addr>/path/to/rules". HTTP URL to a page with alerting rules.
|
||||
-rule="dir/*.yaml" -rule="/*.yaml" -rule="gcs://vmalert-rules/tenant_%{TENANT_ID}/prod".
|
||||
-rule="dir/**/*.yaml". Includes all the .yaml files in "dir" subfolders recursively.
|
||||
Rule files may contain %{ENV_VAR} placeholders, which are substituted by the corresponding env vars.
|
||||
|
||||
Enterprise version of vmalert supports S3 and GCS paths to rules.
|
||||
|
@ -1177,6 +1205,7 @@ The shortlist of configuration flags is the following:
|
|||
-rule.templates="/path/to/file". Path to a single file with go templates
|
||||
-rule.templates="dir/*.tpl" -rule.templates="/*.tpl". Relative path to all .tpl files in "dir" folder,
|
||||
absolute path to all .tpl files in root.
|
||||
-rule.templates="dir/**/*.tpl". Includes all the .tpl files in "dir" subfolders recursively.
|
||||
Supports an array of values separated by comma or specified via multiple flags.
|
||||
-rule.updateEntriesLimit int
|
||||
Defines the max number of rule's state updates stored in-memory. Rule's updates are available on rule's Details page and are used for debugging purposes. The number of stored updates can be overriden per rule via update_entries_limit param. (default 20)
|
||||
|
|
|
@ -47,10 +47,11 @@ type AlertingRule struct {
|
|||
}
|
||||
|
||||
type alertingRuleMetrics struct {
|
||||
errors *utils.Gauge
|
||||
pending *utils.Gauge
|
||||
active *utils.Gauge
|
||||
samples *utils.Gauge
|
||||
errors *utils.Gauge
|
||||
pending *utils.Gauge
|
||||
active *utils.Gauge
|
||||
samples *utils.Gauge
|
||||
seriesFetched *utils.Gauge
|
||||
}
|
||||
|
||||
func newAlertingRule(qb datasource.QuerierBuilder, group *Group, cfg config.Rule) *AlertingRule {
|
||||
|
@ -121,6 +122,15 @@ func newAlertingRule(qb datasource.QuerierBuilder, group *Group, cfg config.Rule
|
|||
e := ar.state.getLast()
|
||||
return float64(e.samples)
|
||||
})
|
||||
ar.metrics.seriesFetched = utils.GetOrCreateGauge(fmt.Sprintf(`vmalert_alerting_rules_last_evaluation_series_fetched{%s}`, labels),
|
||||
func() float64 {
|
||||
e := ar.state.getLast()
|
||||
if e.seriesFetched == nil {
|
||||
// means seriesFetched is unsupported
|
||||
return -1
|
||||
}
|
||||
return float64(*e.seriesFetched)
|
||||
})
|
||||
return ar
|
||||
}
|
||||
|
||||
|
@ -130,6 +140,7 @@ func (ar *AlertingRule) Close() {
|
|||
ar.metrics.pending.Unregister()
|
||||
ar.metrics.errors.Unregister()
|
||||
ar.metrics.samples.Unregister()
|
||||
ar.metrics.seriesFetched.Unregister()
|
||||
}
|
||||
|
||||
// String implements Stringer interface
|
||||
|
@ -234,7 +245,7 @@ func (ar *AlertingRule) toLabels(m datasource.Metric, qFn templates.QueryFn) (*l
|
|||
// to get time series for backfilling.
|
||||
// It returns ALERT and ALERT_FOR_STATE time series as result.
|
||||
func (ar *AlertingRule) ExecRange(ctx context.Context, start, end time.Time) ([]prompbmarshal.TimeSeries, error) {
|
||||
series, err := ar.q.QueryRange(ctx, ar.Expr, start, end)
|
||||
res, err := ar.q.QueryRange(ctx, ar.Expr, start, end)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -242,7 +253,7 @@ func (ar *AlertingRule) ExecRange(ctx context.Context, start, end time.Time) ([]
|
|||
qFn := func(query string) ([]datasource.Metric, error) {
|
||||
return nil, fmt.Errorf("`query` template isn't supported in replay mode")
|
||||
}
|
||||
for _, s := range series {
|
||||
for _, s := range res.Data {
|
||||
a, err := ar.newAlert(s, nil, time.Time{}, qFn) // initial alert
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create alert: %s", err)
|
||||
|
@ -282,14 +293,15 @@ const resolvedRetention = 15 * time.Minute
|
|||
// Based on the Querier results AlertingRule maintains notifier.Alerts
|
||||
func (ar *AlertingRule) Exec(ctx context.Context, ts time.Time, limit int) ([]prompbmarshal.TimeSeries, error) {
|
||||
start := time.Now()
|
||||
qMetrics, req, err := ar.q.Query(ctx, ar.Expr, ts)
|
||||
res, req, err := ar.q.Query(ctx, ar.Expr, ts)
|
||||
curState := ruleStateEntry{
|
||||
time: start,
|
||||
at: ts,
|
||||
duration: time.Since(start),
|
||||
samples: len(qMetrics),
|
||||
err: err,
|
||||
curl: requestToCurl(req),
|
||||
time: start,
|
||||
at: ts,
|
||||
duration: time.Since(start),
|
||||
samples: len(res.Data),
|
||||
seriesFetched: res.SeriesFetched,
|
||||
err: err,
|
||||
curl: requestToCurl(req),
|
||||
}
|
||||
|
||||
defer func() {
|
||||
|
@ -315,11 +327,11 @@ func (ar *AlertingRule) Exec(ctx context.Context, ts time.Time, limit int) ([]pr
|
|||
|
||||
qFn := func(query string) ([]datasource.Metric, error) {
|
||||
res, _, err := ar.q.Query(ctx, query, ts)
|
||||
return res, err
|
||||
return res.Data, err
|
||||
}
|
||||
updated := make(map[uint64]struct{})
|
||||
// update list of active alerts
|
||||
for _, m := range qMetrics {
|
||||
for _, m := range res.Data {
|
||||
ls, err := ar.toLabels(m, qFn)
|
||||
if err != nil {
|
||||
curState.err = fmt.Errorf("failed to expand labels: %s", err)
|
||||
|
@ -485,22 +497,23 @@ func (ar *AlertingRule) AlertAPI(id uint64) *APIAlert {
|
|||
func (ar *AlertingRule) ToAPI() APIRule {
|
||||
lastState := ar.state.getLast()
|
||||
r := APIRule{
|
||||
Type: "alerting",
|
||||
DatasourceType: ar.Type.String(),
|
||||
Name: ar.Name,
|
||||
Query: ar.Expr,
|
||||
Duration: ar.For.Seconds(),
|
||||
Labels: ar.Labels,
|
||||
Annotations: ar.Annotations,
|
||||
LastEvaluation: lastState.time,
|
||||
EvaluationTime: lastState.duration.Seconds(),
|
||||
Health: "ok",
|
||||
State: "inactive",
|
||||
Alerts: ar.AlertsToAPI(),
|
||||
LastSamples: lastState.samples,
|
||||
MaxUpdates: ar.state.size(),
|
||||
Updates: ar.state.getAll(),
|
||||
Debug: ar.Debug,
|
||||
Type: "alerting",
|
||||
DatasourceType: ar.Type.String(),
|
||||
Name: ar.Name,
|
||||
Query: ar.Expr,
|
||||
Duration: ar.For.Seconds(),
|
||||
Labels: ar.Labels,
|
||||
Annotations: ar.Annotations,
|
||||
LastEvaluation: lastState.time,
|
||||
EvaluationTime: lastState.duration.Seconds(),
|
||||
Health: "ok",
|
||||
State: "inactive",
|
||||
Alerts: ar.AlertsToAPI(),
|
||||
LastSamples: lastState.samples,
|
||||
LastSeriesFetched: lastState.seriesFetched,
|
||||
MaxUpdates: ar.state.size(),
|
||||
Updates: ar.state.getAll(),
|
||||
Debug: ar.Debug,
|
||||
|
||||
// encode as strings to avoid rounding in JSON
|
||||
ID: fmt.Sprintf("%d", ar.ID()),
|
||||
|
@ -637,11 +650,12 @@ func (ar *AlertingRule) Restore(ctx context.Context, q datasource.Querier, ts ti
|
|||
|
||||
ar.logDebugf(ts, nil, "restoring alert state via query %q", expr)
|
||||
|
||||
qMetrics, _, err := q.Query(ctx, expr, ts)
|
||||
res, _, err := q.Query(ctx, expr, ts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
qMetrics := res.Data
|
||||
if len(qMetrics) < 1 {
|
||||
ar.logDebugf(ts, nil, "no response was received from restore query")
|
||||
continue
|
||||
|
|
|
@ -36,7 +36,8 @@ type Group struct {
|
|||
Params url.Values `yaml:"params"`
|
||||
// Headers contains optional HTTP headers added to each rule request
|
||||
Headers []Header `yaml:"headers,omitempty"`
|
||||
|
||||
// NotifierHeaders contains optional HTTP headers sent to notifiers for generated notifications
|
||||
NotifierHeaders []Header `yaml:"notifier_headers,omitempty"`
|
||||
// Catches all undefined fields and must be empty after parsing.
|
||||
XXX map[string]interface{} `yaml:",inline"`
|
||||
}
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
package config
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"os"
|
||||
"strings"
|
||||
|
@ -27,6 +29,40 @@ func TestParseGood(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestParseFromURL(t *testing.T) {
|
||||
mux := http.NewServeMux()
|
||||
mux.HandleFunc("/bad", func(w http.ResponseWriter, _ *http.Request) {
|
||||
w.Write([]byte("foo bar"))
|
||||
})
|
||||
mux.HandleFunc("/good-alert", func(w http.ResponseWriter, _ *http.Request) {
|
||||
w.Write([]byte(`
|
||||
groups:
|
||||
- name: TestGroup
|
||||
rules:
|
||||
- alert: Conns
|
||||
expr: vm_tcplistener_conns > 0`))
|
||||
})
|
||||
mux.HandleFunc("/good-rr", func(w http.ResponseWriter, _ *http.Request) {
|
||||
w.Write([]byte(`
|
||||
groups:
|
||||
- name: TestGroup
|
||||
rules:
|
||||
- record: conns
|
||||
expr: max(vm_tcplistener_conns)`))
|
||||
})
|
||||
|
||||
srv := httptest.NewServer(mux)
|
||||
defer srv.Close()
|
||||
|
||||
if _, err := Parse([]string{srv.URL + "/good-alert", srv.URL + "/good-rr"}, notifier.ValidateTemplates, true); err != nil {
|
||||
t.Errorf("error parsing URLs %s", err)
|
||||
}
|
||||
|
||||
if _, err := Parse([]string{srv.URL + "/bad"}, notifier.ValidateTemplates, true); err == nil {
|
||||
t.Errorf("expected parsing error: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseBad(t *testing.T) {
|
||||
testCases := []struct {
|
||||
path []string
|
||||
|
@ -64,6 +100,10 @@ func TestParseBad(t *testing.T) {
|
|||
[]string{"testdata/dir/rules6-bad.rules"},
|
||||
"missing ':' in header",
|
||||
},
|
||||
{
|
||||
[]string{"http://unreachable-url"},
|
||||
"no such host",
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
_, err := Parse(tc.path, notifier.ValidateTemplates, true)
|
||||
|
@ -102,7 +142,8 @@ func TestGroup_Validate(t *testing.T) {
|
|||
expErr: "group name must be set",
|
||||
},
|
||||
{
|
||||
group: &Group{Name: "test",
|
||||
group: &Group{
|
||||
Name: "test",
|
||||
Rules: []Rule{
|
||||
{
|
||||
Record: "record",
|
||||
|
@ -113,7 +154,8 @@ func TestGroup_Validate(t *testing.T) {
|
|||
expErr: "",
|
||||
},
|
||||
{
|
||||
group: &Group{Name: "test",
|
||||
group: &Group{
|
||||
Name: "test",
|
||||
Rules: []Rule{
|
||||
{
|
||||
Record: "record",
|
||||
|
@ -125,7 +167,8 @@ func TestGroup_Validate(t *testing.T) {
|
|||
validateExpressions: true,
|
||||
},
|
||||
{
|
||||
group: &Group{Name: "test",
|
||||
group: &Group{
|
||||
Name: "test",
|
||||
Rules: []Rule{
|
||||
{
|
||||
Alert: "alert",
|
||||
|
@ -139,7 +182,8 @@ func TestGroup_Validate(t *testing.T) {
|
|||
expErr: "",
|
||||
},
|
||||
{
|
||||
group: &Group{Name: "test",
|
||||
group: &Group{
|
||||
Name: "test",
|
||||
Rules: []Rule{
|
||||
{
|
||||
Alert: "alert",
|
||||
|
@ -156,7 +200,8 @@ func TestGroup_Validate(t *testing.T) {
|
|||
validateAnnotations: true,
|
||||
},
|
||||
{
|
||||
group: &Group{Name: "test",
|
||||
group: &Group{
|
||||
Name: "test",
|
||||
Rules: []Rule{
|
||||
{
|
||||
Alert: "alert",
|
||||
|
@ -171,7 +216,8 @@ func TestGroup_Validate(t *testing.T) {
|
|||
expErr: "duplicate",
|
||||
},
|
||||
{
|
||||
group: &Group{Name: "test",
|
||||
group: &Group{
|
||||
Name: "test",
|
||||
Rules: []Rule{
|
||||
{Alert: "alert", Expr: "up == 1", Labels: map[string]string{
|
||||
"summary": "{{ value|query }}",
|
||||
|
@ -184,7 +230,8 @@ func TestGroup_Validate(t *testing.T) {
|
|||
expErr: "duplicate",
|
||||
},
|
||||
{
|
||||
group: &Group{Name: "test",
|
||||
group: &Group{
|
||||
Name: "test",
|
||||
Rules: []Rule{
|
||||
{Record: "record", Expr: "up == 1", Labels: map[string]string{
|
||||
"summary": "{{ value|query }}",
|
||||
|
@ -197,7 +244,8 @@ func TestGroup_Validate(t *testing.T) {
|
|||
expErr: "duplicate",
|
||||
},
|
||||
{
|
||||
group: &Group{Name: "test",
|
||||
group: &Group{
|
||||
Name: "test",
|
||||
Rules: []Rule{
|
||||
{Alert: "alert", Expr: "up == 1", Labels: map[string]string{
|
||||
"summary": "{{ value|query }}",
|
||||
|
@ -210,7 +258,8 @@ func TestGroup_Validate(t *testing.T) {
|
|||
expErr: "",
|
||||
},
|
||||
{
|
||||
group: &Group{Name: "test",
|
||||
group: &Group{
|
||||
Name: "test",
|
||||
Rules: []Rule{
|
||||
{Record: "alert", Expr: "up == 1", Labels: map[string]string{
|
||||
"summary": "{{ value|query }}",
|
||||
|
@ -223,7 +272,8 @@ func TestGroup_Validate(t *testing.T) {
|
|||
expErr: "",
|
||||
},
|
||||
{
|
||||
group: &Group{Name: "test thanos",
|
||||
group: &Group{
|
||||
Name: "test thanos",
|
||||
Type: NewRawType("thanos"),
|
||||
Rules: []Rule{
|
||||
{Alert: "alert", Expr: "up == 1", Labels: map[string]string{
|
||||
|
@ -235,7 +285,8 @@ func TestGroup_Validate(t *testing.T) {
|
|||
expErr: "unknown datasource type",
|
||||
},
|
||||
{
|
||||
group: &Group{Name: "test graphite",
|
||||
group: &Group{
|
||||
Name: "test graphite",
|
||||
Type: NewGraphiteType(),
|
||||
Rules: []Rule{
|
||||
{Alert: "alert", Expr: "up == 1", Labels: map[string]string{
|
||||
|
@ -247,7 +298,8 @@ func TestGroup_Validate(t *testing.T) {
|
|||
expErr: "",
|
||||
},
|
||||
{
|
||||
group: &Group{Name: "test prometheus",
|
||||
group: &Group{
|
||||
Name: "test prometheus",
|
||||
Type: NewPrometheusType(),
|
||||
Rules: []Rule{
|
||||
{Alert: "alert", Expr: "up == 1", Labels: map[string]string{
|
||||
|
@ -538,6 +590,24 @@ rules:
|
|||
`)
|
||||
})
|
||||
|
||||
t.Run("`notifier_headers` change", func(t *testing.T) {
|
||||
f(t, `
|
||||
name: TestGroup
|
||||
notifier_headers:
|
||||
- "TenantID: foo"
|
||||
rules:
|
||||
- alert: foo
|
||||
expr: sum by(job) (up == 1)
|
||||
`, `
|
||||
name: TestGroup
|
||||
notifier_headers:
|
||||
- "TenantID: bar"
|
||||
rules:
|
||||
- alert: foo
|
||||
expr: sum by(job) (up == 1)
|
||||
`)
|
||||
})
|
||||
|
||||
t.Run("`debug` change", func(t *testing.T) {
|
||||
f(t, `
|
||||
name: TestGroup
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/config/fslocal"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/config/fsurl"
|
||||
)
|
||||
|
||||
// FS represent a file system abstract for reading files.
|
||||
|
@ -32,17 +33,16 @@ var (
|
|||
)
|
||||
|
||||
// readFromFS parses the given path list and inits FS for each item.
|
||||
// Once inited, readFromFS will try to read and return files from each FS.
|
||||
// Once initialed, readFromFS will try to read and return files from each FS.
|
||||
// readFromFS returns an error if at least one FS failed to init.
|
||||
// The function can be called multiple times but each unique path
|
||||
// will be inited only once.
|
||||
// will be initialed only once.
|
||||
//
|
||||
// It is allowed to mix different FS types in path list.
|
||||
func readFromFS(paths []string) (map[string][]byte, error) {
|
||||
var err error
|
||||
result := make(map[string][]byte)
|
||||
for _, path := range paths {
|
||||
|
||||
fsRegistryMu.Lock()
|
||||
fs, ok := fsRegistry[path]
|
||||
if !ok {
|
||||
|
@ -89,8 +89,9 @@ func readFromFS(paths []string) (map[string][]byte, error) {
|
|||
|
||||
// newFS creates FS based on the give path.
|
||||
// Supported file systems are: fs
|
||||
func newFS(path string) (FS, error) {
|
||||
func newFS(originPath string) (FS, error) {
|
||||
scheme := "fs"
|
||||
path := originPath
|
||||
n := strings.Index(path, "://")
|
||||
if n >= 0 {
|
||||
scheme = path[:n]
|
||||
|
@ -102,6 +103,8 @@ func newFS(path string) (FS, error) {
|
|||
switch scheme {
|
||||
case "fs":
|
||||
return &fslocal.FS{Pattern: path}, nil
|
||||
case "http", "https":
|
||||
return &fsurl.FS{Path: originPath}, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported scheme %q", scheme)
|
||||
}
|
||||
|
|
|
@ -3,7 +3,8 @@ package fslocal
|
|||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/bmatcuk/doublestar/v4"
|
||||
)
|
||||
|
||||
// FS represents a local file system
|
||||
|
@ -16,7 +17,7 @@ type FS struct {
|
|||
|
||||
// Init verifies that configured Pattern is correct
|
||||
func (fs *FS) Init() error {
|
||||
_, err := filepath.Glob(fs.Pattern)
|
||||
_, err := doublestar.FilepathGlob(fs.Pattern)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -27,7 +28,7 @@ func (fs *FS) String() string {
|
|||
|
||||
// List returns the list of file names which will be read via Read fn
|
||||
func (fs *FS) List() ([]string, error) {
|
||||
matches, err := filepath.Glob(fs.Pattern)
|
||||
matches, err := doublestar.FilepathGlob(fs.Pattern)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error while matching files via pattern %s: %w", fs.Pattern, err)
|
||||
}
|
||||
|
|
57
app/vmalert/config/fsurl/url.go
Normal file
57
app/vmalert/config/fsurl/url.go
Normal file
|
@ -0,0 +1,57 @@
|
|||
package fsurl
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
)
|
||||
|
||||
// FS represents a struct which can read content from URL Path
|
||||
type FS struct {
|
||||
// Path defines the URL to read the data from
|
||||
Path string
|
||||
}
|
||||
|
||||
// Init verifies that configured Path is correct
|
||||
func (fs *FS) Init() error {
|
||||
_, err := url.Parse(fs.Path)
|
||||
return err
|
||||
}
|
||||
|
||||
// String implements Stringer interface
|
||||
func (fs *FS) String() string {
|
||||
return fmt.Sprintf("URL {Path: %q}", fs.Path)
|
||||
}
|
||||
|
||||
// List returns the list of file names which will be read via Read fn
|
||||
// List isn't supported by FS and reads from Path only
|
||||
func (fs *FS) List() ([]string, error) {
|
||||
return []string{fs.Path}, nil
|
||||
}
|
||||
|
||||
// Read returns a map of read files where
|
||||
// key is the file name and value is file's content.
|
||||
func (fs *FS) Read(files []string) (map[string][]byte, error) {
|
||||
result := make(map[string][]byte)
|
||||
for _, path := range files {
|
||||
resp, err := http.Get(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read from %q: %w", path, err)
|
||||
}
|
||||
data, err := io.ReadAll(resp.Body)
|
||||
_ = resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
if len(data) > 4*1024 {
|
||||
data = data[:4*1024]
|
||||
}
|
||||
return nil, fmt.Errorf("unexpected status code when fetching %q: %d, expecting %d; response: %q",
|
||||
path, resp.StatusCode, http.StatusOK, data)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot read %q: %s", path, err)
|
||||
}
|
||||
result[path] = data
|
||||
}
|
||||
return result, nil
|
||||
}
|
|
@ -5,6 +5,8 @@ groups:
|
|||
limit: 1000
|
||||
headers:
|
||||
- "MyHeader: foo"
|
||||
notifier_headers:
|
||||
- "MyHeader: foo"
|
||||
params:
|
||||
denyPartialResponse: ["true"]
|
||||
rules:
|
||||
|
|
|
@ -13,11 +13,22 @@ type Querier interface {
|
|||
// It returns list of Metric in response, the http.Request used for sending query
|
||||
// and error if any. Returned http.Request can't be reused and its body is already read.
|
||||
// Query should stop once ctx is cancelled.
|
||||
Query(ctx context.Context, query string, ts time.Time) ([]Metric, *http.Request, error)
|
||||
Query(ctx context.Context, query string, ts time.Time) (Result, *http.Request, error)
|
||||
// QueryRange executes range request with the given query on the given time range.
|
||||
// It returns list of Metric in response and error if any.
|
||||
// QueryRange should stop once ctx is cancelled.
|
||||
QueryRange(ctx context.Context, query string, from, to time.Time) ([]Metric, error)
|
||||
QueryRange(ctx context.Context, query string, from, to time.Time) (Result, error)
|
||||
}
|
||||
|
||||
// Result represents expected response from the datasource
|
||||
type Result struct {
|
||||
// Data contains list of received Metric
|
||||
Data []Metric
|
||||
// SeriesFetched contains amount of time series processed by datasource
|
||||
// during query evaluation.
|
||||
// If nil, then this feature is not supported by the datasource.
|
||||
// SeriesFetched is supported by VictoriaMetrics since v1.90.
|
||||
SeriesFetched *int
|
||||
}
|
||||
|
||||
// QuerierBuilder builds Querier with given params.
|
||||
|
|
|
@ -2,6 +2,7 @@ package datasource
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
|
@ -98,10 +99,10 @@ func NewVMStorage(baseURL string, authCfg *promauth.Config, lookBack time.Durati
|
|||
}
|
||||
|
||||
// Query executes the given query and returns parsed response
|
||||
func (s *VMStorage) Query(ctx context.Context, query string, ts time.Time) ([]Metric, *http.Request, error) {
|
||||
func (s *VMStorage) Query(ctx context.Context, query string, ts time.Time) (Result, *http.Request, error) {
|
||||
req, err := s.newRequestPOST()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return Result{}, nil, err
|
||||
}
|
||||
|
||||
switch s.dataSourceType {
|
||||
|
@ -110,12 +111,12 @@ func (s *VMStorage) Query(ctx context.Context, query string, ts time.Time) ([]Me
|
|||
case datasourceGraphite:
|
||||
s.setGraphiteReqParams(req, query, ts)
|
||||
default:
|
||||
return nil, nil, fmt.Errorf("engine not found: %q", s.dataSourceType)
|
||||
return Result{}, nil, fmt.Errorf("engine not found: %q", s.dataSourceType)
|
||||
}
|
||||
|
||||
resp, err := s.do(ctx, req)
|
||||
if err != nil {
|
||||
return nil, req, err
|
||||
return Result{}, req, err
|
||||
}
|
||||
defer func() {
|
||||
_ = resp.Body.Close()
|
||||
|
@ -132,24 +133,24 @@ func (s *VMStorage) Query(ctx context.Context, query string, ts time.Time) ([]Me
|
|||
// QueryRange executes the given query on the given time range.
|
||||
// For Prometheus type see https://prometheus.io/docs/prometheus/latest/querying/api/#range-queries
|
||||
// Graphite type isn't supported.
|
||||
func (s *VMStorage) QueryRange(ctx context.Context, query string, start, end time.Time) ([]Metric, error) {
|
||||
func (s *VMStorage) QueryRange(ctx context.Context, query string, start, end time.Time) (res Result, err error) {
|
||||
if s.dataSourceType != datasourcePrometheus {
|
||||
return nil, fmt.Errorf("%q is not supported for QueryRange", s.dataSourceType)
|
||||
return res, fmt.Errorf("%q is not supported for QueryRange", s.dataSourceType)
|
||||
}
|
||||
req, err := s.newRequestPOST()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return res, err
|
||||
}
|
||||
if start.IsZero() {
|
||||
return nil, fmt.Errorf("start param is missing")
|
||||
return res, fmt.Errorf("start param is missing")
|
||||
}
|
||||
if end.IsZero() {
|
||||
return nil, fmt.Errorf("end param is missing")
|
||||
return res, fmt.Errorf("end param is missing")
|
||||
}
|
||||
s.setPrometheusRangeReqParams(req, query, start, end)
|
||||
resp, err := s.do(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return res, err
|
||||
}
|
||||
defer func() {
|
||||
_ = resp.Body.Close()
|
||||
|
@ -162,6 +163,11 @@ func (s *VMStorage) do(ctx context.Context, req *http.Request) (*http.Response,
|
|||
logger.Infof("DEBUG datasource request: executing %s request with params %q", req.Method, req.URL.RawQuery)
|
||||
}
|
||||
resp, err := s.c.Do(req.WithContext(ctx))
|
||||
if errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) {
|
||||
// something in the middle between client and datasource might be closing
|
||||
// the connection. So we do a one more attempt in hope request will succeed.
|
||||
resp, err = s.c.Do(req.WithContext(ctx))
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error getting response from %s: %w", req.URL.Redacted(), err)
|
||||
}
|
||||
|
|
|
@ -35,12 +35,12 @@ func (r graphiteResponse) metrics() []Metric {
|
|||
return ms
|
||||
}
|
||||
|
||||
func parseGraphiteResponse(req *http.Request, resp *http.Response) ([]Metric, error) {
|
||||
func parseGraphiteResponse(req *http.Request, resp *http.Response) (Result, error) {
|
||||
r := &graphiteResponse{}
|
||||
if err := json.NewDecoder(resp.Body).Decode(r); err != nil {
|
||||
return nil, fmt.Errorf("error parsing graphite metrics for %s: %w", req.URL.Redacted(), err)
|
||||
return Result{}, fmt.Errorf("error parsing graphite metrics for %s: %w", req.URL.Redacted(), err)
|
||||
}
|
||||
return r.metrics(), nil
|
||||
return Result{Data: r.metrics()}, nil
|
||||
}
|
||||
|
||||
const (
|
||||
|
|
|
@ -22,6 +22,10 @@ type promResponse struct {
|
|||
ResultType string `json:"resultType"`
|
||||
Result json.RawMessage `json:"result"`
|
||||
} `json:"data"`
|
||||
// Stats supported by VictoriaMetrics since v1.90
|
||||
Stats struct {
|
||||
SeriesFetched *string `json:"seriesFetched,omitempty"`
|
||||
} `json:"stats,omitempty"`
|
||||
}
|
||||
|
||||
type promInstant struct {
|
||||
|
@ -96,39 +100,54 @@ const (
|
|||
rtVector, rtMatrix, rScalar = "vector", "matrix", "scalar"
|
||||
)
|
||||
|
||||
func parsePrometheusResponse(req *http.Request, resp *http.Response) ([]Metric, error) {
|
||||
func parsePrometheusResponse(req *http.Request, resp *http.Response) (res Result, err error) {
|
||||
r := &promResponse{}
|
||||
if err := json.NewDecoder(resp.Body).Decode(r); err != nil {
|
||||
return nil, fmt.Errorf("error parsing prometheus metrics for %s: %w", req.URL.Redacted(), err)
|
||||
if err = json.NewDecoder(resp.Body).Decode(r); err != nil {
|
||||
return res, fmt.Errorf("error parsing prometheus metrics for %s: %w", req.URL.Redacted(), err)
|
||||
}
|
||||
if r.Status == statusError {
|
||||
return nil, fmt.Errorf("response error, query: %s, errorType: %s, error: %s", req.URL.Redacted(), r.ErrorType, r.Error)
|
||||
return res, fmt.Errorf("response error, query: %s, errorType: %s, error: %s", req.URL.Redacted(), r.ErrorType, r.Error)
|
||||
}
|
||||
if r.Status != statusSuccess {
|
||||
return nil, fmt.Errorf("unknown status: %s, Expected success or error ", r.Status)
|
||||
return res, fmt.Errorf("unknown status: %s, Expected success or error ", r.Status)
|
||||
}
|
||||
var parseFn func() ([]Metric, error)
|
||||
switch r.Data.ResultType {
|
||||
case rtVector:
|
||||
var pi promInstant
|
||||
if err := json.Unmarshal(r.Data.Result, &pi.Result); err != nil {
|
||||
return nil, fmt.Errorf("umarshal err %s; \n %#v", err, string(r.Data.Result))
|
||||
return res, fmt.Errorf("umarshal err %s; \n %#v", err, string(r.Data.Result))
|
||||
}
|
||||
return pi.metrics()
|
||||
parseFn = pi.metrics
|
||||
case rtMatrix:
|
||||
var pr promRange
|
||||
if err := json.Unmarshal(r.Data.Result, &pr.Result); err != nil {
|
||||
return nil, err
|
||||
return res, err
|
||||
}
|
||||
return pr.metrics()
|
||||
parseFn = pr.metrics
|
||||
case rScalar:
|
||||
var ps promScalar
|
||||
if err := json.Unmarshal(r.Data.Result, &ps); err != nil {
|
||||
return nil, err
|
||||
return res, err
|
||||
}
|
||||
return ps.metrics()
|
||||
parseFn = ps.metrics
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown result type %q", r.Data.ResultType)
|
||||
return res, fmt.Errorf("unknown result type %q", r.Data.ResultType)
|
||||
}
|
||||
|
||||
ms, err := parseFn()
|
||||
if err != nil {
|
||||
return res, err
|
||||
}
|
||||
res = Result{Data: ms}
|
||||
if r.Stats.SeriesFetched != nil {
|
||||
intV, err := strconv.Atoi(*r.Stats.SeriesFetched)
|
||||
if err != nil {
|
||||
return res, fmt.Errorf("failed to convert stats.seriesFetched to int: %w", err)
|
||||
}
|
||||
res.SeriesFetched = &intV
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (s *VMStorage) setPrometheusInstantReqParams(r *http.Request, query string, timestamp time.Time) {
|
||||
|
|
|
@ -35,13 +35,6 @@ func TestVMInstantQuery(t *testing.T) {
|
|||
t.Errorf("should not be called")
|
||||
})
|
||||
c := -1
|
||||
mux.HandleFunc("/render", func(w http.ResponseWriter, request *http.Request) {
|
||||
c++
|
||||
switch c {
|
||||
case 8:
|
||||
w.Write([]byte(`[{"target":"constantLine(10)","tags":{"name":"constantLine(10)"},"datapoints":[[10,1611758343],[10,1611758373],[10,1611758403]]}]`))
|
||||
}
|
||||
})
|
||||
mux.HandleFunc("/api/v1/query", func(w http.ResponseWriter, r *http.Request) {
|
||||
c++
|
||||
if r.Method != http.MethodPost {
|
||||
|
@ -62,22 +55,28 @@ func TestVMInstantQuery(t *testing.T) {
|
|||
}
|
||||
switch c {
|
||||
case 0:
|
||||
conn, _, _ := w.(http.Hijacker).Hijack()
|
||||
_ = conn.Close()
|
||||
case 1:
|
||||
w.WriteHeader(500)
|
||||
case 2:
|
||||
case 1:
|
||||
w.Write([]byte("[]"))
|
||||
case 3:
|
||||
case 2:
|
||||
w.Write([]byte(`{"status":"error", "errorType":"type:", "error":"some error msg"}`))
|
||||
case 4:
|
||||
case 3:
|
||||
w.Write([]byte(`{"status":"unknown"}`))
|
||||
case 5:
|
||||
case 4:
|
||||
w.Write([]byte(`{"status":"success","data":{"resultType":"matrix"}}`))
|
||||
case 6:
|
||||
case 5:
|
||||
w.Write([]byte(`{"status":"success","data":{"resultType":"vector","result":[{"metric":{"__name__":"vm_rows","foo":"bar"},"value":[1583786142,"13763"]},{"metric":{"__name__":"vm_requests","foo":"baz"},"value":[1583786140,"2000"]}]}}`))
|
||||
case 7:
|
||||
case 6:
|
||||
w.Write([]byte(`{"status":"success","data":{"resultType":"scalar","result":[1583786142, "1"]}}`))
|
||||
case 7:
|
||||
w.Write([]byte(`{"status":"success","data":{"resultType":"scalar","result":[1583786142, "1"]},"stats":{"seriesFetched": "42"}}`))
|
||||
}
|
||||
})
|
||||
mux.HandleFunc("/render", func(w http.ResponseWriter, request *http.Request) {
|
||||
c++
|
||||
switch c {
|
||||
case 8:
|
||||
w.Write([]byte(`[{"target":"constantLine(10)","tags":{"name":"constantLine(10)"},"datapoints":[[10,1611758343],[10,1611758373],[10,1611758403]]}]`))
|
||||
}
|
||||
})
|
||||
|
||||
|
@ -95,24 +94,27 @@ func TestVMInstantQuery(t *testing.T) {
|
|||
ts := time.Now()
|
||||
|
||||
expErr := func(err string) {
|
||||
if _, _, err := pq.Query(ctx, query, ts); err == nil {
|
||||
_, _, gotErr := pq.Query(ctx, query, ts)
|
||||
if gotErr == nil {
|
||||
t.Fatalf("expected %q got nil", err)
|
||||
}
|
||||
if !strings.Contains(gotErr.Error(), err) {
|
||||
t.Fatalf("expected err %q; got %q", err, gotErr)
|
||||
}
|
||||
}
|
||||
|
||||
expErr("connection error") // 0
|
||||
expErr("invalid response status error") // 1
|
||||
expErr("response body error") // 2
|
||||
expErr("error status") // 3
|
||||
expErr("unknown status") // 4
|
||||
expErr("non-vector resultType error") // 5
|
||||
expErr("500") // 0
|
||||
expErr("error parsing prometheus metrics") // 1
|
||||
expErr("response error") // 2
|
||||
expErr("unknown status") // 3
|
||||
expErr("unexpected end of JSON input") // 4
|
||||
|
||||
m, _, err := pq.Query(ctx, query, ts) // 6 - vector
|
||||
res, _, err := pq.Query(ctx, query, ts) // 5 - vector
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected %s", err)
|
||||
}
|
||||
if len(m) != 2 {
|
||||
t.Fatalf("expected 2 metrics got %d in %+v", len(m), m)
|
||||
if len(res.Data) != 2 {
|
||||
t.Fatalf("expected 2 metrics got %d in %+v", len(res.Data), res.Data)
|
||||
}
|
||||
expected := []Metric{
|
||||
{
|
||||
|
@ -126,17 +128,17 @@ func TestVMInstantQuery(t *testing.T) {
|
|||
Values: []float64{2000},
|
||||
},
|
||||
}
|
||||
metricsEqual(t, m, expected)
|
||||
metricsEqual(t, res.Data, expected)
|
||||
|
||||
m, req, err := pq.Query(ctx, query, ts) // 7 - scalar
|
||||
res, req, err := pq.Query(ctx, query, ts) // 6 - scalar
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected %s", err)
|
||||
}
|
||||
if req == nil {
|
||||
t.Fatalf("expected request to be non-nil")
|
||||
}
|
||||
if len(m) != 1 {
|
||||
t.Fatalf("expected 1 metrics got %d in %+v", len(m), m)
|
||||
if len(res.Data) != 1 {
|
||||
t.Fatalf("expected 1 metrics got %d in %+v", len(res.Data), res.Data)
|
||||
}
|
||||
expected = []Metric{
|
||||
{
|
||||
|
@ -144,18 +146,44 @@ func TestVMInstantQuery(t *testing.T) {
|
|||
Values: []float64{1},
|
||||
},
|
||||
}
|
||||
if !reflect.DeepEqual(m, expected) {
|
||||
t.Fatalf("unexpected metric %+v want %+v", m, expected)
|
||||
if !reflect.DeepEqual(res.Data, expected) {
|
||||
t.Fatalf("unexpected metric %+v want %+v", res.Data, expected)
|
||||
}
|
||||
|
||||
if res.SeriesFetched != nil {
|
||||
t.Fatalf("expected `seriesFetched` field to be nil when it is missing in datasource response; got %v instead",
|
||||
res.SeriesFetched)
|
||||
}
|
||||
|
||||
res, _, err = pq.Query(ctx, query, ts) // 7 - scalar with stats
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected %s", err)
|
||||
}
|
||||
if len(res.Data) != 1 {
|
||||
t.Fatalf("expected 1 metrics got %d in %+v", len(res.Data), res)
|
||||
}
|
||||
expected = []Metric{
|
||||
{
|
||||
Timestamps: []int64{1583786142},
|
||||
Values: []float64{1},
|
||||
},
|
||||
}
|
||||
if !reflect.DeepEqual(res.Data, expected) {
|
||||
t.Fatalf("unexpected metric %+v want %+v", res.Data, expected)
|
||||
}
|
||||
if *res.SeriesFetched != 42 {
|
||||
t.Fatalf("expected `seriesFetched` field to be 42; got %d instead",
|
||||
*res.SeriesFetched)
|
||||
}
|
||||
|
||||
gq := s.BuildWithParams(QuerierParams{DataSourceType: string(datasourceGraphite)})
|
||||
|
||||
m, _, err = gq.Query(ctx, queryRender, ts) // 8 - graphite
|
||||
res, _, err = gq.Query(ctx, queryRender, ts) // 8 - graphite
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected %s", err)
|
||||
}
|
||||
if len(m) != 1 {
|
||||
t.Fatalf("expected 1 metric got %d in %+v", len(m), m)
|
||||
if len(res.Data) != 1 {
|
||||
t.Fatalf("expected 1 metric got %d in %+v", len(res.Data), res.Data)
|
||||
}
|
||||
exp := []Metric{
|
||||
{
|
||||
|
@ -164,7 +192,77 @@ func TestVMInstantQuery(t *testing.T) {
|
|||
Values: []float64{10},
|
||||
},
|
||||
}
|
||||
metricsEqual(t, m, exp)
|
||||
metricsEqual(t, res.Data, exp)
|
||||
|
||||
}
|
||||
|
||||
func TestVMInstantQueryWithRetry(t *testing.T) {
|
||||
mux := http.NewServeMux()
|
||||
mux.HandleFunc("/", func(_ http.ResponseWriter, _ *http.Request) {
|
||||
t.Errorf("should not be called")
|
||||
})
|
||||
c := -1
|
||||
mux.HandleFunc("/api/v1/query", func(w http.ResponseWriter, r *http.Request) {
|
||||
c++
|
||||
if r.URL.Query().Get("query") != query {
|
||||
t.Errorf("expected %s in query param, got %s", query, r.URL.Query().Get("query"))
|
||||
}
|
||||
switch c {
|
||||
case 0:
|
||||
w.Write([]byte(`{"status":"success","data":{"resultType":"scalar","result":[1583786142, "1"]}}`))
|
||||
case 1:
|
||||
conn, _, _ := w.(http.Hijacker).Hijack()
|
||||
_ = conn.Close()
|
||||
case 2:
|
||||
w.Write([]byte(`{"status":"success","data":{"resultType":"scalar","result":[1583786142, "2"]}}`))
|
||||
case 3:
|
||||
conn, _, _ := w.(http.Hijacker).Hijack()
|
||||
_ = conn.Close()
|
||||
case 4:
|
||||
conn, _, _ := w.(http.Hijacker).Hijack()
|
||||
_ = conn.Close()
|
||||
}
|
||||
})
|
||||
|
||||
srv := httptest.NewServer(mux)
|
||||
defer srv.Close()
|
||||
|
||||
s := NewVMStorage(srv.URL, nil, time.Minute, 0, false, srv.Client())
|
||||
pq := s.BuildWithParams(QuerierParams{DataSourceType: string(datasourcePrometheus)})
|
||||
|
||||
expErr := func(err string) {
|
||||
_, _, gotErr := pq.Query(ctx, query, time.Now())
|
||||
if gotErr == nil {
|
||||
t.Fatalf("expected %q got nil", err)
|
||||
}
|
||||
if !strings.Contains(gotErr.Error(), err) {
|
||||
t.Fatalf("expected err %q; got %q", err, gotErr)
|
||||
}
|
||||
}
|
||||
|
||||
expValue := func(v float64) {
|
||||
res, _, err := pq.Query(ctx, query, time.Now())
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected %s", err)
|
||||
}
|
||||
m := res.Data
|
||||
if len(m) != 1 {
|
||||
t.Fatalf("expected 1 metrics got %d in %+v", len(m), m)
|
||||
}
|
||||
expected := []Metric{
|
||||
{
|
||||
Timestamps: []int64{1583786142},
|
||||
Values: []float64{v},
|
||||
},
|
||||
}
|
||||
if !reflect.DeepEqual(m, expected) {
|
||||
t.Fatalf("unexpected metric %+v want %+v", m, expected)
|
||||
}
|
||||
}
|
||||
|
||||
expValue(1) // 0
|
||||
expValue(2) // 1 - fail, 2 - retry
|
||||
expErr("EOF") // 3, 4 - retries
|
||||
}
|
||||
|
||||
func metricsEqual(t *testing.T, gotM, expectedM []Metric) {
|
||||
|
@ -250,10 +348,11 @@ func TestVMRangeQuery(t *testing.T) {
|
|||
|
||||
start, end := time.Now().Add(-time.Minute), time.Now()
|
||||
|
||||
m, err := pq.QueryRange(ctx, query, start, end)
|
||||
res, err := pq.QueryRange(ctx, query, start, end)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected %s", err)
|
||||
}
|
||||
m := res.Data
|
||||
if len(m) != 1 {
|
||||
t.Fatalf("expected 1 metric got %d in %+v", len(m), m)
|
||||
}
|
||||
|
|
|
@ -36,9 +36,10 @@ type Group struct {
|
|||
Checksum string
|
||||
LastEvaluation time.Time
|
||||
|
||||
Labels map[string]string
|
||||
Params url.Values
|
||||
Headers map[string]string
|
||||
Labels map[string]string
|
||||
Params url.Values
|
||||
Headers map[string]string
|
||||
NotifierHeaders map[string]string
|
||||
|
||||
doneCh chan struct{}
|
||||
finishedCh chan struct{}
|
||||
|
@ -93,16 +94,17 @@ func mergeLabels(groupName, ruleName string, set1, set2 map[string]string) map[s
|
|||
|
||||
func newGroup(cfg config.Group, qb datasource.QuerierBuilder, defaultInterval time.Duration, labels map[string]string) *Group {
|
||||
g := &Group{
|
||||
Type: cfg.Type,
|
||||
Name: cfg.Name,
|
||||
File: cfg.File,
|
||||
Interval: cfg.Interval.Duration(),
|
||||
Limit: cfg.Limit,
|
||||
Concurrency: cfg.Concurrency,
|
||||
Checksum: cfg.Checksum,
|
||||
Params: cfg.Params,
|
||||
Headers: make(map[string]string),
|
||||
Labels: cfg.Labels,
|
||||
Type: cfg.Type,
|
||||
Name: cfg.Name,
|
||||
File: cfg.File,
|
||||
Interval: cfg.Interval.Duration(),
|
||||
Limit: cfg.Limit,
|
||||
Concurrency: cfg.Concurrency,
|
||||
Checksum: cfg.Checksum,
|
||||
Params: cfg.Params,
|
||||
Headers: make(map[string]string),
|
||||
NotifierHeaders: make(map[string]string),
|
||||
Labels: cfg.Labels,
|
||||
|
||||
doneCh: make(chan struct{}),
|
||||
finishedCh: make(chan struct{}),
|
||||
|
@ -117,6 +119,9 @@ func newGroup(cfg config.Group, qb datasource.QuerierBuilder, defaultInterval ti
|
|||
for _, h := range cfg.Headers {
|
||||
g.Headers[h.Key] = h.Value
|
||||
}
|
||||
for _, h := range cfg.NotifierHeaders {
|
||||
g.NotifierHeaders[h.Key] = h.Value
|
||||
}
|
||||
g.metrics = newGroupMetrics(g)
|
||||
rules := make([]Rule, len(cfg.Rules))
|
||||
for i, r := range cfg.Rules {
|
||||
|
@ -230,6 +235,7 @@ func (g *Group) updateWith(newGroup *Group) error {
|
|||
g.Concurrency = newGroup.Concurrency
|
||||
g.Params = newGroup.Params
|
||||
g.Headers = newGroup.Headers
|
||||
g.NotifierHeaders = newGroup.NotifierHeaders
|
||||
g.Labels = newGroup.Labels
|
||||
g.Limit = newGroup.Limit
|
||||
g.Checksum = newGroup.Checksum
|
||||
|
@ -294,7 +300,9 @@ func (g *Group) start(ctx context.Context, nts func() []notifier.Notifier, rw *r
|
|||
e := &executor{
|
||||
rw: rw,
|
||||
notifiers: nts,
|
||||
previouslySentSeriesToRW: make(map[uint64]map[string][]prompbmarshal.Label)}
|
||||
notifierHeaders: g.NotifierHeaders,
|
||||
previouslySentSeriesToRW: make(map[uint64]map[string][]prompbmarshal.Label),
|
||||
}
|
||||
|
||||
evalTS := time.Now()
|
||||
|
||||
|
@ -370,6 +378,8 @@ func (g *Group) start(ctx context.Context, nts func() []notifier.Notifier, rw *r
|
|||
// ensure that staleness is tracked or existing rules only
|
||||
e.purgeStaleSeries(g.Rules)
|
||||
|
||||
e.notifierHeaders = g.NotifierHeaders
|
||||
|
||||
if g.Interval != ng.Interval {
|
||||
g.Interval = ng.Interval
|
||||
t.Stop()
|
||||
|
@ -403,8 +413,10 @@ func getResolveDuration(groupInterval, delta, maxDuration time.Duration) time.Du
|
|||
}
|
||||
|
||||
type executor struct {
|
||||
notifiers func() []notifier.Notifier
|
||||
rw *remotewrite.Client
|
||||
notifiers func() []notifier.Notifier
|
||||
notifierHeaders map[string]string
|
||||
|
||||
rw *remotewrite.Client
|
||||
|
||||
previouslySentSeriesToRWMu sync.Mutex
|
||||
// previouslySentSeriesToRW stores series sent to RW on previous iteration
|
||||
|
@ -504,7 +516,7 @@ func (e *executor) exec(ctx context.Context, rule Rule, ts time.Time, resolveDur
|
|||
for _, nt := range e.notifiers() {
|
||||
wg.Add(1)
|
||||
go func(nt notifier.Notifier) {
|
||||
if err := nt.Send(ctx, alerts); err != nil {
|
||||
if err := nt.Send(ctx, alerts, e.notifierHeaders); err != nil {
|
||||
errGr.Add(fmt.Errorf("rule %q: failed to send alerts to addr %q: %w", rule, nt.Addr(), err))
|
||||
}
|
||||
wg.Done()
|
||||
|
|
|
@ -44,21 +44,21 @@ func (fq *fakeQuerier) BuildWithParams(_ datasource.QuerierParams) datasource.Qu
|
|||
return fq
|
||||
}
|
||||
|
||||
func (fq *fakeQuerier) QueryRange(ctx context.Context, q string, _, _ time.Time) ([]datasource.Metric, error) {
|
||||
func (fq *fakeQuerier) QueryRange(ctx context.Context, q string, _, _ time.Time) (datasource.Result, error) {
|
||||
req, _, err := fq.Query(ctx, q, time.Now())
|
||||
return req, err
|
||||
}
|
||||
|
||||
func (fq *fakeQuerier) Query(_ context.Context, _ string, _ time.Time) ([]datasource.Metric, *http.Request, error) {
|
||||
func (fq *fakeQuerier) Query(_ context.Context, _ string, _ time.Time) (datasource.Result, *http.Request, error) {
|
||||
fq.Lock()
|
||||
defer fq.Unlock()
|
||||
if fq.err != nil {
|
||||
return nil, nil, fq.err
|
||||
return datasource.Result{}, nil, fq.err
|
||||
}
|
||||
cp := make([]datasource.Metric, len(fq.metrics))
|
||||
copy(cp, fq.metrics)
|
||||
req, _ := http.NewRequest(http.MethodPost, "foo.com", nil)
|
||||
return cp, req, nil
|
||||
return datasource.Result{Data: cp}, req, nil
|
||||
}
|
||||
|
||||
type fakeQuerierWithRegistry struct {
|
||||
|
@ -85,23 +85,23 @@ func (fqr *fakeQuerierWithRegistry) BuildWithParams(_ datasource.QuerierParams)
|
|||
return fqr
|
||||
}
|
||||
|
||||
func (fqr *fakeQuerierWithRegistry) QueryRange(ctx context.Context, q string, _, _ time.Time) ([]datasource.Metric, error) {
|
||||
func (fqr *fakeQuerierWithRegistry) QueryRange(ctx context.Context, q string, _, _ time.Time) (datasource.Result, error) {
|
||||
req, _, err := fqr.Query(ctx, q, time.Now())
|
||||
return req, err
|
||||
}
|
||||
|
||||
func (fqr *fakeQuerierWithRegistry) Query(_ context.Context, expr string, _ time.Time) ([]datasource.Metric, *http.Request, error) {
|
||||
func (fqr *fakeQuerierWithRegistry) Query(_ context.Context, expr string, _ time.Time) (datasource.Result, *http.Request, error) {
|
||||
fqr.Lock()
|
||||
defer fqr.Unlock()
|
||||
|
||||
req, _ := http.NewRequest(http.MethodPost, "foo.com", nil)
|
||||
metrics, ok := fqr.registry[expr]
|
||||
if !ok {
|
||||
return nil, req, nil
|
||||
return datasource.Result{}, req, nil
|
||||
}
|
||||
cp := make([]datasource.Metric, len(metrics))
|
||||
copy(cp, metrics)
|
||||
return cp, req, nil
|
||||
return datasource.Result{Data: cp}, req, nil
|
||||
}
|
||||
|
||||
type fakeQuerierWithDelay struct {
|
||||
|
@ -109,7 +109,7 @@ type fakeQuerierWithDelay struct {
|
|||
delay time.Duration
|
||||
}
|
||||
|
||||
func (fqd *fakeQuerierWithDelay) Query(ctx context.Context, expr string, ts time.Time) ([]datasource.Metric, *http.Request, error) {
|
||||
func (fqd *fakeQuerierWithDelay) Query(ctx context.Context, expr string, ts time.Time) (datasource.Result, *http.Request, error) {
|
||||
timer := time.NewTimer(fqd.delay)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
|
@ -131,7 +131,7 @@ type fakeNotifier struct {
|
|||
|
||||
func (*fakeNotifier) Close() {}
|
||||
func (*fakeNotifier) Addr() string { return "" }
|
||||
func (fn *fakeNotifier) Send(_ context.Context, alerts []notifier.Alert) error {
|
||||
func (fn *fakeNotifier) Send(_ context.Context, alerts []notifier.Alert, _ map[string]string) error {
|
||||
fn.Lock()
|
||||
defer fn.Unlock()
|
||||
fn.counter += len(alerts)
|
||||
|
@ -155,7 +155,7 @@ type faultyNotifier struct {
|
|||
fakeNotifier
|
||||
}
|
||||
|
||||
func (fn *faultyNotifier) Send(ctx context.Context, _ []notifier.Alert) error {
|
||||
func (fn *faultyNotifier) Send(ctx context.Context, _ []notifier.Alert, _ map[string]string) error {
|
||||
d, ok := ctx.Deadline()
|
||||
if ok {
|
||||
time.Sleep(time.Until(d))
|
||||
|
|
|
@ -10,6 +10,8 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/config"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/datasource"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/notifier"
|
||||
|
@ -24,15 +26,16 @@ import (
|
|||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/procutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/pushmetrics"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
var (
|
||||
rulePath = flagutil.NewArrayString("rule", `Path to the files with alerting and/or recording rules.
|
||||
rulePath = flagutil.NewArrayString("rule", `Path to the files or http url with alerting and/or recording rules.
|
||||
Supports hierarchical patterns and regexpes.
|
||||
Examples:
|
||||
-rule="/path/to/file". Path to a single file with alerting rules
|
||||
-rule="/path/to/file". Path to a single file with alerting rules.
|
||||
-rule="http://<some-server-addr>/path/to/rules". HTTP URL to a page with alerting rules.
|
||||
-rule="dir/*.yaml" -rule="/*.yaml" -rule="gcs://vmalert-rules/tenant_%{TENANT_ID}/prod".
|
||||
-rule="dir/**/*.yaml". Includes all the .yaml files in "dir" subfolders recursively.
|
||||
Rule files may contain %{ENV_VAR} placeholders, which are substituted by the corresponding env vars.
|
||||
|
||||
Enterprise version of vmalert supports S3 and GCS paths to rules.
|
||||
|
@ -47,7 +50,9 @@ See https://docs.victoriametrics.com/vmalert.html#reading-rules-from-object-stor
|
|||
Examples:
|
||||
-rule.templates="/path/to/file". Path to a single file with go templates
|
||||
-rule.templates="dir/*.tpl" -rule.templates="/*.tpl". Relative path to all .tpl files in "dir" folder,
|
||||
absolute path to all .tpl files in root.`)
|
||||
absolute path to all .tpl files in root.
|
||||
-rule.templates="dir/**/*.tpl". Includes all the .tpl files in "dir" subfolders recursively.
|
||||
`)
|
||||
|
||||
rulesCheckInterval = flag.Duration("rule.configCheckInterval", 0, "Interval for checking for changes in '-rule' files. "+
|
||||
"By default the checking is disabled. Send SIGHUP signal in order to force config check for changes. DEPRECATED - see '-configCheckInterval' instead")
|
||||
|
@ -282,7 +287,10 @@ func getAlertURLGenerator(externalURL *url.URL, externalAlertSource string, vali
|
|||
"tpl": externalAlertSource,
|
||||
}
|
||||
return func(alert notifier.Alert) string {
|
||||
templated, err := alert.ExecTemplate(nil, alert.Labels, m)
|
||||
qFn := func(query string) ([]datasource.Metric, error) {
|
||||
return nil, fmt.Errorf("`query` template isn't supported for alert source template")
|
||||
}
|
||||
templated, err := alert.ExecTemplate(qFn, alert.Labels, m)
|
||||
if err != nil {
|
||||
logger.Errorf("can not exec source template %s", err)
|
||||
}
|
||||
|
|
|
@ -176,15 +176,17 @@ func (g *Group) toAPI() APIGroup {
|
|||
// encode as string to avoid rounding
|
||||
ID: fmt.Sprintf("%d", g.ID()),
|
||||
|
||||
Name: g.Name,
|
||||
Type: g.Type.String(),
|
||||
File: g.File,
|
||||
Interval: g.Interval.Seconds(),
|
||||
LastEvaluation: g.LastEvaluation,
|
||||
Concurrency: g.Concurrency,
|
||||
Params: urlValuesToStrings(g.Params),
|
||||
Headers: headersToStrings(g.Headers),
|
||||
Labels: g.Labels,
|
||||
Name: g.Name,
|
||||
Type: g.Type.String(),
|
||||
File: g.File,
|
||||
Interval: g.Interval.Seconds(),
|
||||
LastEvaluation: g.LastEvaluation,
|
||||
Concurrency: g.Concurrency,
|
||||
Params: urlValuesToStrings(g.Params),
|
||||
Headers: headersToStrings(g.Headers),
|
||||
NotifierHeaders: headersToStrings(g.NotifierHeaders),
|
||||
|
||||
Labels: g.Labels,
|
||||
}
|
||||
for _, r := range g.Rules {
|
||||
ag.Rules = append(ag.Rules, r.ToAPI())
|
||||
|
|
|
@ -51,16 +51,16 @@ func (am *AlertManager) Close() {
|
|||
func (am AlertManager) Addr() string { return am.addr }
|
||||
|
||||
// Send an alert or resolve message
|
||||
func (am *AlertManager) Send(ctx context.Context, alerts []Alert) error {
|
||||
func (am *AlertManager) Send(ctx context.Context, alerts []Alert, headers map[string]string) error {
|
||||
am.metrics.alertsSent.Add(len(alerts))
|
||||
err := am.send(ctx, alerts)
|
||||
err := am.send(ctx, alerts, headers)
|
||||
if err != nil {
|
||||
am.metrics.alertsSendErrors.Add(len(alerts))
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (am *AlertManager) send(ctx context.Context, alerts []Alert) error {
|
||||
func (am *AlertManager) send(ctx context.Context, alerts []Alert, headers map[string]string) error {
|
||||
b := &bytes.Buffer{}
|
||||
writeamRequest(b, alerts, am.argFunc, am.relabelConfigs)
|
||||
|
||||
|
@ -69,6 +69,9 @@ func (am *AlertManager) send(ctx context.Context, alerts []Alert) error {
|
|||
return err
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
for key, value := range headers {
|
||||
req.Header.Set(key, value)
|
||||
}
|
||||
|
||||
if am.timeout > 0 {
|
||||
var cancel context.CancelFunc
|
||||
|
@ -105,7 +108,8 @@ const alertManagerPath = "/api/v2/alerts"
|
|||
|
||||
// NewAlertManager is a constructor for AlertManager
|
||||
func NewAlertManager(alertManagerURL string, fn AlertURLGenerator, authCfg promauth.HTTPClientConfig,
|
||||
relabelCfg *promrelabel.ParsedConfigs, timeout time.Duration) (*AlertManager, error) {
|
||||
relabelCfg *promrelabel.ParsedConfigs, timeout time.Duration,
|
||||
) (*AlertManager, error) {
|
||||
tls := &promauth.TLSConfig{}
|
||||
if authCfg.TLSConfig != nil {
|
||||
tls = authCfg.TLSConfig
|
||||
|
|
|
@ -25,6 +25,7 @@ func TestAlertManager_Addr(t *testing.T) {
|
|||
|
||||
func TestAlertManager_Send(t *testing.T) {
|
||||
const baUser, baPass = "foo", "bar"
|
||||
const headerKey, headerValue = "TenantID", "foo"
|
||||
mux := http.NewServeMux()
|
||||
mux.HandleFunc("/", func(_ http.ResponseWriter, _ *http.Request) {
|
||||
t.Errorf("should not be called")
|
||||
|
@ -73,6 +74,11 @@ func TestAlertManager_Send(t *testing.T) {
|
|||
if a[0].EndAt.IsZero() {
|
||||
t.Errorf("expected non-zero end time")
|
||||
}
|
||||
case 3:
|
||||
if r.Header.Get(headerKey) != headerValue {
|
||||
t.Errorf("expected header %q to be set to %q; got %q instead",
|
||||
headerKey, headerValue, r.Header.Get(headerKey))
|
||||
}
|
||||
}
|
||||
})
|
||||
srv := httptest.NewServer(mux)
|
||||
|
@ -90,10 +96,10 @@ func TestAlertManager_Send(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Errorf("unexpected error: %s", err)
|
||||
}
|
||||
if err := am.Send(context.Background(), []Alert{{}, {}}); err == nil {
|
||||
if err := am.Send(context.Background(), []Alert{{}, {}}, nil); err == nil {
|
||||
t.Error("expected connection error got nil")
|
||||
}
|
||||
if err := am.Send(context.Background(), []Alert{}); err == nil {
|
||||
if err := am.Send(context.Background(), []Alert{}, nil); err == nil {
|
||||
t.Error("expected wrong http code error got nil")
|
||||
}
|
||||
if err := am.Send(context.Background(), []Alert{{
|
||||
|
@ -102,10 +108,13 @@ func TestAlertManager_Send(t *testing.T) {
|
|||
Start: time.Now().UTC(),
|
||||
End: time.Now().UTC(),
|
||||
Annotations: map[string]string{"a": "b", "c": "d", "e": "f"},
|
||||
}}); err != nil {
|
||||
}}, nil); err != nil {
|
||||
t.Errorf("unexpected error %s", err)
|
||||
}
|
||||
if c != 2 {
|
||||
t.Errorf("expected 2 calls(count from zero) to server got %d", c)
|
||||
}
|
||||
if err := am.Send(context.Background(), nil, map[string]string{headerKey: headerValue}); err != nil {
|
||||
t.Errorf("unexpected error %s", err)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -7,7 +7,7 @@ type Notifier interface {
|
|||
// Send sends the given list of alerts.
|
||||
// Returns an error if fails to send the alerts.
|
||||
// Must unblock if the given ctx is cancelled.
|
||||
Send(ctx context.Context, alerts []Alert) error
|
||||
Send(ctx context.Context, alerts []Alert, notifierHeaders map[string]string) error
|
||||
// Addr returns address where alerts are sent.
|
||||
Addr() string
|
||||
// Close is a destructor for the Notifier
|
||||
|
|
|
@ -99,13 +99,13 @@ func (rr *RecordingRule) Close() {
|
|||
// It doesn't update internal states of the Rule and meant to be used just
|
||||
// to get time series for backfilling.
|
||||
func (rr *RecordingRule) ExecRange(ctx context.Context, start, end time.Time) ([]prompbmarshal.TimeSeries, error) {
|
||||
series, err := rr.q.QueryRange(ctx, rr.Expr, start, end)
|
||||
res, err := rr.q.QueryRange(ctx, rr.Expr, start, end)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
duplicates := make(map[string]struct{}, len(series))
|
||||
duplicates := make(map[string]struct{}, len(res.Data))
|
||||
var tss []prompbmarshal.TimeSeries
|
||||
for _, s := range series {
|
||||
for _, s := range res.Data {
|
||||
ts := rr.toTimeSeries(s)
|
||||
key := stringifyLabels(ts)
|
||||
if _, ok := duplicates[key]; ok {
|
||||
|
@ -120,13 +120,14 @@ func (rr *RecordingRule) ExecRange(ctx context.Context, start, end time.Time) ([
|
|||
// Exec executes RecordingRule expression via the given Querier.
|
||||
func (rr *RecordingRule) Exec(ctx context.Context, ts time.Time, limit int) ([]prompbmarshal.TimeSeries, error) {
|
||||
start := time.Now()
|
||||
qMetrics, req, err := rr.q.Query(ctx, rr.Expr, ts)
|
||||
res, req, err := rr.q.Query(ctx, rr.Expr, ts)
|
||||
curState := ruleStateEntry{
|
||||
time: start,
|
||||
at: ts,
|
||||
duration: time.Since(start),
|
||||
samples: len(qMetrics),
|
||||
curl: requestToCurl(req),
|
||||
time: start,
|
||||
at: ts,
|
||||
duration: time.Since(start),
|
||||
samples: len(res.Data),
|
||||
seriesFetched: res.SeriesFetched,
|
||||
curl: requestToCurl(req),
|
||||
}
|
||||
|
||||
defer func() {
|
||||
|
@ -138,6 +139,7 @@ func (rr *RecordingRule) Exec(ctx context.Context, ts time.Time, limit int) ([]p
|
|||
return nil, curState.err
|
||||
}
|
||||
|
||||
qMetrics := res.Data
|
||||
numSeries := len(qMetrics)
|
||||
if limit > 0 && numSeries > limit {
|
||||
curState.err = fmt.Errorf("exec exceeded limit of %d with %d series", limit, numSeries)
|
||||
|
@ -208,17 +210,18 @@ func (rr *RecordingRule) UpdateWith(r Rule) error {
|
|||
func (rr *RecordingRule) ToAPI() APIRule {
|
||||
lastState := rr.state.getLast()
|
||||
r := APIRule{
|
||||
Type: "recording",
|
||||
DatasourceType: rr.Type.String(),
|
||||
Name: rr.Name,
|
||||
Query: rr.Expr,
|
||||
Labels: rr.Labels,
|
||||
LastEvaluation: lastState.time,
|
||||
EvaluationTime: lastState.duration.Seconds(),
|
||||
Health: "ok",
|
||||
LastSamples: lastState.samples,
|
||||
MaxUpdates: rr.state.size(),
|
||||
Updates: rr.state.getAll(),
|
||||
Type: "recording",
|
||||
DatasourceType: rr.Type.String(),
|
||||
Name: rr.Name,
|
||||
Query: rr.Expr,
|
||||
Labels: rr.Labels,
|
||||
LastEvaluation: lastState.time,
|
||||
EvaluationTime: lastState.duration.Seconds(),
|
||||
Health: "ok",
|
||||
LastSamples: lastState.samples,
|
||||
LastSeriesFetched: lastState.seriesFetched,
|
||||
MaxUpdates: rr.state.size(),
|
||||
Updates: rr.state.getAll(),
|
||||
|
||||
// encode as strings to avoid rounding
|
||||
ID: fmt.Sprintf("%d", rr.ID()),
|
||||
|
|
|
@ -20,21 +20,21 @@ func (fr *fakeReplayQuerier) BuildWithParams(_ datasource.QuerierParams) datasou
|
|||
return fr
|
||||
}
|
||||
|
||||
func (fr *fakeReplayQuerier) QueryRange(_ context.Context, q string, from, to time.Time) ([]datasource.Metric, error) {
|
||||
func (fr *fakeReplayQuerier) QueryRange(_ context.Context, q string, from, to time.Time) (res datasource.Result, err error) {
|
||||
key := fmt.Sprintf("%s+%s", from.Format("15:04:05"), to.Format("15:04:05"))
|
||||
dps, ok := fr.registry[q]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unexpected query received: %q", q)
|
||||
return res, fmt.Errorf("unexpected query received: %q", q)
|
||||
}
|
||||
_, ok = dps[key]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unexpected time range received: %q", key)
|
||||
return res, fmt.Errorf("unexpected time range received: %q", key)
|
||||
}
|
||||
delete(dps, key)
|
||||
if len(fr.registry[q]) < 1 {
|
||||
delete(fr.registry, q)
|
||||
}
|
||||
return nil, nil
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func TestReplay(t *testing.T) {
|
||||
|
|
|
@ -53,6 +53,12 @@ type ruleStateEntry struct {
|
|||
// stores the number of samples returned during
|
||||
// the last evaluation
|
||||
samples int
|
||||
// stores the number of time series fetched during
|
||||
// the last evaluation.
|
||||
// Is supported by VictoriaMetrics only, starting from v1.90.0
|
||||
// If seriesFetched == nil, then this attribute was missing in
|
||||
// datasource response (unsupported).
|
||||
seriesFetched *int
|
||||
// stores the curl command reflecting the HTTP request used during rule.Exec
|
||||
curl string
|
||||
}
|
||||
|
|
|
@ -21,7 +21,6 @@ import (
|
|||
"math"
|
||||
"net"
|
||||
"net/url"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
|
@ -30,6 +29,8 @@ import (
|
|||
textTpl "text/template"
|
||||
"time"
|
||||
|
||||
"github.com/bmatcuk/doublestar/v4"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/datasource"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/formatutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
|
||||
|
@ -59,12 +60,12 @@ func Load(pathPatterns []string, overwrite bool) error {
|
|||
var err error
|
||||
tmpl := newTemplate()
|
||||
for _, tp := range pathPatterns {
|
||||
p, err := filepath.Glob(tp)
|
||||
p, err := doublestar.FilepathGlob(tp)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to retrieve a template glob %q: %w", tp, err)
|
||||
}
|
||||
if len(p) > 0 {
|
||||
tmpl, err = tmpl.ParseGlob(tp)
|
||||
tmpl, err = tmpl.ParseFiles(p...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse template glob %q: %w", tp, err)
|
||||
}
|
||||
|
@ -182,6 +183,10 @@ func Get() (*textTpl.Template, error) {
|
|||
func FuncsWithQuery(query QueryFn) textTpl.FuncMap {
|
||||
return textTpl.FuncMap{
|
||||
"query": func(q string) ([]metric, error) {
|
||||
if query == nil {
|
||||
return nil, fmt.Errorf("cannot execute query %q: query is not available in this context", q)
|
||||
}
|
||||
|
||||
result, err := query(q)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
@ -38,6 +38,10 @@
|
|||
group.click();
|
||||
}
|
||||
});
|
||||
|
||||
$(document).ready(function() {
|
||||
$('[data-bs-toggle="tooltip"]').tooltip();
|
||||
});
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
||||
|
|
|
@ -73,35 +73,39 @@ func StreamFooter(qw422016 *qt422016.Writer, r *http.Request) {
|
|||
group.click();
|
||||
}
|
||||
});
|
||||
|
||||
$(document).ready(function() {
|
||||
$('[data-bs-toggle="tooltip"]').tooltip();
|
||||
});
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
||||
`)
|
||||
//line app/vmalert/tpl/footer.qtpl:44
|
||||
//line app/vmalert/tpl/footer.qtpl:48
|
||||
}
|
||||
|
||||
//line app/vmalert/tpl/footer.qtpl:44
|
||||
//line app/vmalert/tpl/footer.qtpl:48
|
||||
func WriteFooter(qq422016 qtio422016.Writer, r *http.Request) {
|
||||
//line app/vmalert/tpl/footer.qtpl:44
|
||||
//line app/vmalert/tpl/footer.qtpl:48
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmalert/tpl/footer.qtpl:44
|
||||
//line app/vmalert/tpl/footer.qtpl:48
|
||||
StreamFooter(qw422016, r)
|
||||
//line app/vmalert/tpl/footer.qtpl:44
|
||||
//line app/vmalert/tpl/footer.qtpl:48
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmalert/tpl/footer.qtpl:44
|
||||
//line app/vmalert/tpl/footer.qtpl:48
|
||||
}
|
||||
|
||||
//line app/vmalert/tpl/footer.qtpl:44
|
||||
//line app/vmalert/tpl/footer.qtpl:48
|
||||
func Footer(r *http.Request) string {
|
||||
//line app/vmalert/tpl/footer.qtpl:44
|
||||
//line app/vmalert/tpl/footer.qtpl:48
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmalert/tpl/footer.qtpl:44
|
||||
//line app/vmalert/tpl/footer.qtpl:48
|
||||
WriteFooter(qb422016, r)
|
||||
//line app/vmalert/tpl/footer.qtpl:44
|
||||
//line app/vmalert/tpl/footer.qtpl:48
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmalert/tpl/footer.qtpl:44
|
||||
//line app/vmalert/tpl/footer.qtpl:48
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmalert/tpl/footer.qtpl:44
|
||||
//line app/vmalert/tpl/footer.qtpl:48
|
||||
return qs422016
|
||||
//line app/vmalert/tpl/footer.qtpl:44
|
||||
//line app/vmalert/tpl/footer.qtpl:48
|
||||
}
|
||||
|
|
|
@ -211,7 +211,7 @@ func (rh *requestHandler) groups() []APIGroup {
|
|||
rh.m.groupsMu.RLock()
|
||||
defer rh.m.groupsMu.RUnlock()
|
||||
|
||||
var groups []APIGroup
|
||||
groups := make([]APIGroup, 0)
|
||||
for _, g := range rh.m.groups {
|
||||
groups = append(groups, g.toAPI())
|
||||
}
|
||||
|
@ -276,6 +276,7 @@ func (rh *requestHandler) listAlerts() ([]byte, error) {
|
|||
defer rh.m.groupsMu.RUnlock()
|
||||
|
||||
lr := listAlertsResponse{Status: "success"}
|
||||
lr.Data.Alerts = make([]*APIAlert, 0)
|
||||
for _, g := range rh.m.groups {
|
||||
for _, r := range g.Rules {
|
||||
a, ok := r.(*AlertingRule)
|
||||
|
|
|
@ -91,7 +91,9 @@
|
|||
{% else %}
|
||||
<b>record:</b> {%s r.Name %}
|
||||
{% endif %}
|
||||
| <span><a target="_blank" href="{%s prefix+r.WebLink() %}">Details</a></span>
|
||||
|
|
||||
{%= seriesFetchedWarn(r) %}
|
||||
<span><a target="_blank" href="{%s prefix+r.WebLink() %}">Details</a></span>
|
||||
</div>
|
||||
<div class="col-12">
|
||||
<code><pre>{%s r.Query %}</pre></code>
|
||||
|
@ -377,8 +379,20 @@
|
|||
annotationKeys = append(annotationKeys, k)
|
||||
}
|
||||
sort.Strings(annotationKeys)
|
||||
|
||||
var seriesFetchedEnabled bool
|
||||
var seriesFetchedWarning bool
|
||||
for _, u := range rule.Updates {
|
||||
if u.seriesFetched != nil {
|
||||
seriesFetchedEnabled = true
|
||||
if *u.seriesFetched == 0 && u.samples == 0{
|
||||
seriesFetchedWarning = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
%}
|
||||
<div class="display-6 pb-3 mb-3">Rule: {%s rule.Name %}<span class="ms-2 badge {% if rule.Health!="ok" %}bg-danger{% else %} bg-warning text-dark{% endif %}">{%s rule.Health %}</span></div>
|
||||
<div class="display-6 pb-3 mb-3">Rule: {%s rule.Name %}<span class="ms-2 badge {% if rule.Health!="ok" %}bg-danger{% else %} bg-success text-dark{% endif %}">{%s rule.Health %}</span></div>
|
||||
<div class="container border-bottom p-2">
|
||||
<div class="row">
|
||||
<div class="col-2">
|
||||
|
@ -450,12 +464,26 @@
|
|||
</div>
|
||||
|
||||
<br>
|
||||
{% if seriesFetchedWarning %}
|
||||
<div class="alert alert-warning" role="alert">
|
||||
<strong>Warning:</strong> some of updates have "Series fetched" equal to 0.<br>
|
||||
It might be that either this data is missing in the datasource or there is a typo in rule's expression.
|
||||
For example, <strong>foo{label="bar"} > 0</strong> could never trigger because <strong>foo{label="bar"}</strong>
|
||||
metric doesn't exist.
|
||||
<br>
|
||||
Rule's expressions without time series selector, like <strong>expr: 42</strong> or <strong>expr: time()</strong>
|
||||
aren't fetching time series from datasource, so they could have "Series fetched" equal to 0 and this won't be a problem.
|
||||
<br>
|
||||
See more details about this detection <a target="_blank" href="https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4039">here</a>.
|
||||
</div>
|
||||
{% endif %}
|
||||
<div class="display-6 pb-3">Last {%d len(rule.Updates) %}/{%d rule.MaxUpdates %} updates</span>:</div>
|
||||
<table class="table table-striped table-hover table-sm">
|
||||
<thead>
|
||||
<tr>
|
||||
<th scope="col" title="The time when event was created">Updated at</th>
|
||||
<th scope="col" style="width: 10%" class="text-center" title="How many samples were returned">Samples</th>
|
||||
{% if seriesFetchedEnabled %}<th scope="col" style="width: 10%" class="text-center" title="How many series were scanned by datasource during the evaluation">Series fetched</th>{% endif %}
|
||||
<th scope="col" style="width: 10%" class="text-center" title="How many seconds request took">Duration</th>
|
||||
<th scope="col" class="text-center" title="Time used for rule execution">Executed at</th>
|
||||
<th scope="col" class="text-center" title="cURL command with request example">cURL</th>
|
||||
|
@ -468,7 +496,8 @@
|
|||
<td>
|
||||
<span class="badge bg-primary rounded-pill me-3" title="Updated at">{%s u.time.Format(time.RFC3339) %}</span>
|
||||
</td>
|
||||
<td class="text-center" wi>{%d u.samples %}</td>
|
||||
<td class="text-center">{%d u.samples %}</td>
|
||||
{% if seriesFetchedEnabled %}<td class="text-center">{% if u.seriesFetched != nil %}{%d *u.seriesFetched %}{% endif %}</td>{% endif %}
|
||||
<td class="text-center">{%f.3 u.duration.Seconds() %}s</td>
|
||||
<td class="text-center">{%s u.at.Format(time.RFC3339) %}</td>
|
||||
<td>
|
||||
|
@ -478,7 +507,7 @@
|
|||
</li>
|
||||
{% if u.err != nil %}
|
||||
<tr{% if u.err != nil %} class="alert-danger"{% endif %}>
|
||||
<td colspan="5">
|
||||
<td colspan="{% if seriesFetchedEnabled %}6{%else%}5{%endif%}">
|
||||
<span class="alert-danger">{%v u.err %}</span>
|
||||
</td>
|
||||
</tr>
|
||||
|
@ -503,3 +532,16 @@
|
|||
{% func badgeRestored() %}
|
||||
<span class="badge bg-warning text-dark" title="Alert state was restored after the service restart from remote storage">restored</span>
|
||||
{% endfunc %}
|
||||
|
||||
{% func seriesFetchedWarn(r APIRule) %}
|
||||
{% if r.LastSamples == 0 && r.LastSeriesFetched != nil && *r.LastSeriesFetched == 0 %}
|
||||
<svg xmlns="http://www.w3.org/2000/svg"
|
||||
data-bs-toggle="tooltip"
|
||||
title="This rule last evaluation hasn't selected any time series from the datasource.
|
||||
It might be that either this data is missing in the datasource or there is a typo in rule's expression.
|
||||
See more in Details."
|
||||
width="18" height="18" fill="currentColor" class="bi bi-exclamation-triangle-fill flex-shrink-0 me-2" viewBox="0 0 16 16" role="img" aria-label="Warning:">
|
||||
<path d="M8 16A8 8 0 1 0 8 0a8 8 0 0 0 0 16zm.93-9.412-1 4.705c-.07.34.029.533.304.533.194 0 .487-.07.686-.246l-.088.416c-.287.346-.92.598-1.465.598-.703 0-1.002-.422-.808-1.319l.738-3.468c.064-.293.006-.399-.287-.47l-.451-.081.082-.381 2.29-.287zM8 5.5a1 1 0 1 1 0-2 1 1 0 0 1 0 2z"/>
|
||||
</svg>
|
||||
{% endif %}
|
||||
{% endfunc %}
|
File diff suppressed because it is too large
Load diff
|
@ -162,5 +162,59 @@ func TestHandler(t *testing.T) {
|
|||
t.Run("/api/v1/1/0/status", func(t *testing.T) {
|
||||
getResp(ts.URL+"/api/v1/1/0/status", nil, 404)
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func TestEmptyResponse(t *testing.T) {
|
||||
rh := &requestHandler{m: &manager{groups: make(map[uint64]*Group)}}
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { rh.handler(w, r) }))
|
||||
defer ts.Close()
|
||||
|
||||
getResp := func(url string, to interface{}, code int) {
|
||||
t.Helper()
|
||||
resp, err := http.Get(url)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected err %s", err)
|
||||
}
|
||||
if code != resp.StatusCode {
|
||||
t.Errorf("unexpected status code %d want %d", resp.StatusCode, code)
|
||||
}
|
||||
defer func() {
|
||||
if err := resp.Body.Close(); err != nil {
|
||||
t.Errorf("err closing body %s", err)
|
||||
}
|
||||
}()
|
||||
if to != nil {
|
||||
if err = json.NewDecoder(resp.Body).Decode(to); err != nil {
|
||||
t.Errorf("unexpected err %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
t.Run("/api/v1/alerts", func(t *testing.T) {
|
||||
lr := listAlertsResponse{}
|
||||
getResp(ts.URL+"/api/v1/alerts", &lr, 200)
|
||||
if lr.Data.Alerts == nil {
|
||||
t.Errorf("expected /api/v1/alerts response to have non-nil data")
|
||||
}
|
||||
|
||||
lr = listAlertsResponse{}
|
||||
getResp(ts.URL+"/vmalert/api/v1/alerts", &lr, 200)
|
||||
if lr.Data.Alerts == nil {
|
||||
t.Errorf("expected /api/v1/alerts response to have non-nil data")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("/api/v1/rules", func(t *testing.T) {
|
||||
lr := listGroupsResponse{}
|
||||
getResp(ts.URL+"/api/v1/rules", &lr, 200)
|
||||
if lr.Data.Groups == nil {
|
||||
t.Errorf("expected /api/v1/rules response to have non-nil data")
|
||||
}
|
||||
|
||||
lr = listGroupsResponse{}
|
||||
getResp(ts.URL+"/vmalert/api/v1/rules", &lr, 200)
|
||||
if lr.Data.Groups == nil {
|
||||
t.Errorf("expected /api/v1/rules response to have non-nil data")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
|
|
@ -72,6 +72,8 @@ type APIGroup struct {
|
|||
Params []string `json:"params,omitempty"`
|
||||
// Headers contains HTTP headers added to each Rule's request
|
||||
Headers []string `json:"headers,omitempty"`
|
||||
// NotifierHeaders contains HTTP headers added to each alert request which will send to notifier
|
||||
NotifierHeaders []string `json:"notifier_headers,omitempty"`
|
||||
// Labels is a set of label value pairs, that will be added to every rule.
|
||||
Labels map[string]string `json:"labels,omitempty"`
|
||||
}
|
||||
|
@ -115,7 +117,12 @@ type APIRule struct {
|
|||
|
||||
// DatasourceType of the rule: prometheus or graphite
|
||||
DatasourceType string `json:"datasourceType"`
|
||||
LastSamples int `json:"lastSamples"`
|
||||
// LastSamples stores the amount of data samples received on last evaluation
|
||||
LastSamples int `json:"lastSamples"`
|
||||
// LastSeriesFetched stores the amount of time series fetched by datasource
|
||||
// during the last evaluation
|
||||
LastSeriesFetched *int `json:"lastSeriesFetched,omitempty"`
|
||||
|
||||
// ID is a unique Alert's ID within a group
|
||||
ID string `json:"id"`
|
||||
// GroupID is an unique Group's ID
|
||||
|
|
|
@ -62,8 +62,40 @@ The following [metrics](#monitoring) related to concurrency limits are exposed b
|
|||
- `vmauth_user_concurrent_requests_current{username="..."}` - the current number of concurrent requests for the given `username`.
|
||||
- `vmauth_user_concurrent_requests_limit_reached_total{username="foo"}` - the number of requests rejected with `429 Too Many Requests` error
|
||||
because of the concurrency limit has been reached for the given `username`.
|
||||
- `vmauth_unauthorized_user_concurrent_requests_capacity` - the limit on the number of concurrent requests for unauthorized users (if `unauthorized_user` section is used).
|
||||
- `vmauth_unauthorized_user_concurrent_requests_current` - the current number of concurrent requests for unauthorized users (if `unauthorized_user` section is used).
|
||||
- `vmauth_unauthorized_user_concurrent_requests_limit_reached_total` - the number of requests rejected with `429 Too Many Requests` error
|
||||
because of the concurrency limit has been reached for unauthorized users (if `unauthorized_user` section is used).
|
||||
|
||||
|
||||
## IP filters
|
||||
|
||||
[Enterprise version](https://docs.victoriametrics.com/enterprise.html) of `vmauth` can be configured to allow / deny incoming requests via global and per-user IP filters.
|
||||
|
||||
For example, the following config allows requests to `vmauth` from `10.0.0.0/24` network and from `1.2.3.4` IP address, while denying requests from `10.0.0.42` IP address:
|
||||
|
||||
```yml
|
||||
users:
|
||||
# User configs here
|
||||
|
||||
ip_filters:
|
||||
allow_list:
|
||||
- 10.0.0.0/24
|
||||
- 1.2.3.4
|
||||
deny_list: [10.0.0.42]
|
||||
```
|
||||
|
||||
The following config allows requests for the user 'foobar' only from the ip `127.0.0.1`:
|
||||
|
||||
```yml
|
||||
users:
|
||||
- username: "foobar"
|
||||
password: "***"
|
||||
url_prefix: "http://localhost:8428"
|
||||
ip_filters:
|
||||
allow_list: [127.0.0.1]
|
||||
```
|
||||
|
||||
## Auth config
|
||||
|
||||
`-auth.config` is represented in the following simple `yml` format:
|
||||
|
@ -131,14 +163,22 @@ users:
|
|||
- "http://vminsert2:8480/insert/42/prometheus"
|
||||
|
||||
# A single user for querying and inserting data:
|
||||
#
|
||||
# - Requests to http://vmauth:8427/api/v1/query, http://vmauth:8427/api/v1/query_range
|
||||
# and http://vmauth:8427/api/v1/label/<label_name>/values are proxied to the following urls in a round-robin manner:
|
||||
# - http://vmselect1:8481/select/42/prometheus
|
||||
# - http://vmselect2:8481/select/42/prometheus
|
||||
# For example, http://vmauth:8427/api/v1/query is proxied to http://vmselect1:8480/select/42/prometheus/api/v1/query
|
||||
# or to http://vmselect2:8480/select/42/prometheus/api/v1/query .
|
||||
#
|
||||
# - Requests to http://vmauth:8427/api/v1/write are proxied to http://vminsert:8480/insert/42/prometheus/api/v1/write .
|
||||
# The "X-Scope-OrgID: abc" http header is added to these requests.
|
||||
#
|
||||
# Request which do not match `src_paths` from the `url_map` are proxied to the urls from `default_url`
|
||||
# in a round-robin manner. The original request path is passed in `request_path` query arg.
|
||||
# For example, request to http://vmauth:8427/non/existing/path are proxied:
|
||||
# - to http://default1:8888/unsupported_url_handler?request_path=/non/existing/path
|
||||
# - or http://default2:8888/unsupported_url_handler?request_path=/non/existing/path
|
||||
- username: "foobar"
|
||||
url_map:
|
||||
- src_paths:
|
||||
|
@ -152,6 +192,28 @@ users:
|
|||
url_prefix: "http://vminsert:8480/insert/42/prometheus"
|
||||
headers:
|
||||
- "X-Scope-OrgID: abc"
|
||||
ip_filters:
|
||||
deny_list: [127.0.0.1]
|
||||
default_url:
|
||||
- "http://default1:8888/unsupported_url_handler"
|
||||
- "http://default2:8888/unsupported_url_handler"
|
||||
|
||||
# Requests without Authorization header are routed according to `unauthorized_user` section.
|
||||
unauthorized_user:
|
||||
url_map:
|
||||
- src_paths:
|
||||
- /api/v1/query
|
||||
- /api/v1/query_range
|
||||
url_prefix:
|
||||
- http://vmselect1:8481/select/0/prometheus
|
||||
- http://vmselect2:8481/select/0/prometheus
|
||||
ip_filters:
|
||||
allow_list: [8.8.8.8]
|
||||
|
||||
ip_filters:
|
||||
allow_list: ["1.2.3.0/24", "127.0.0.1"]
|
||||
deny_list:
|
||||
- 10.1.0.1
|
||||
```
|
||||
|
||||
The config may contain `%{ENV_VAR}` placeholders, which are substituted by the corresponding `ENV_VAR` environment variable values.
|
||||
|
@ -174,12 +236,14 @@ Do not transfer Basic Auth headers in plaintext over untrusted networks. Enable
|
|||
|
||||
Alternatively, [https termination proxy](https://en.wikipedia.org/wiki/TLS_termination_proxy) may be put in front of `vmauth`.
|
||||
|
||||
It is recommended protecting following endpoints with authKeys:
|
||||
It is recommended protecting the following endpoints with authKeys:
|
||||
* `/-/reload` with `-reloadAuthKey` command-line flag, so external users couldn't trigger config reload.
|
||||
* `/flags` with `-flagsAuthkey` command-line flag, so unauthorized users couldn't get application command-line flags.
|
||||
* `/metrics` with `metricsAuthkey` command-line flag, so unauthorized users couldn't get access to [vmauth metrics](#monitoring).
|
||||
* `/debug/pprof` with `pprofAuthKey` command-line flag, so unauthorized users couldn't get access to [profiling information](#profiling).
|
||||
|
||||
`vmauth` also supports the ability to restict access by IP - see [these docs](#ip-filters). See also [concurrency limiting docs](#concurrency-limiting).
|
||||
|
||||
## Monitoring
|
||||
|
||||
`vmauth` exports various metrics in Prometheus exposition format at `http://vmauth-host:8427/metrics` page. It is recommended setting up regular scraping of this page
|
||||
|
@ -194,6 +258,8 @@ users:
|
|||
# other config options here
|
||||
```
|
||||
|
||||
For unauthorized users `vmauth` exports `vmauth_unauthorized_user_requests_total` metric without label (if `unauthorized_user` section of config is used).
|
||||
|
||||
## How to build from sources
|
||||
|
||||
It is recommended using [binary releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) - `vmauth` is located in `vmutils-*` archives there.
|
||||
|
|
|
@ -31,7 +31,8 @@ var (
|
|||
|
||||
// AuthConfig represents auth config.
|
||||
type AuthConfig struct {
|
||||
Users []UserInfo `yaml:"users,omitempty"`
|
||||
Users []UserInfo `yaml:"users,omitempty"`
|
||||
UnauthorizedUser *UserInfo `yaml:"unauthorized_user,omitempty"`
|
||||
}
|
||||
|
||||
// UserInfo is user information read from authConfigPath
|
||||
|
@ -44,6 +45,7 @@ type UserInfo struct {
|
|||
URLMaps []URLMap `yaml:"url_map,omitempty"`
|
||||
Headers []Header `yaml:"headers,omitempty"`
|
||||
MaxConcurrentRequests int `yaml:"max_concurrent_requests,omitempty"`
|
||||
DefaultURL *URLPrefix `yaml:"default_url,omitempty"`
|
||||
|
||||
concurrencyLimitCh chan struct{}
|
||||
concurrencyLimitReached *metrics.Counter
|
||||
|
@ -289,11 +291,11 @@ func initAuthConfig() {
|
|||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1240
|
||||
sighupCh := procutil.NewSighupChan()
|
||||
|
||||
m, err := readAuthConfig(*authConfigPath)
|
||||
err := loadAuthConfig()
|
||||
if err != nil {
|
||||
logger.Fatalf("cannot load auth config from `-auth.config=%s`: %s", *authConfigPath, err)
|
||||
logger.Fatalf("cannot load auth config: %s", err)
|
||||
}
|
||||
authConfig.Store(m)
|
||||
|
||||
stopCh = make(chan struct{})
|
||||
authConfigWG.Add(1)
|
||||
go func() {
|
||||
|
@ -324,44 +326,71 @@ func authConfigReloader(sighupCh <-chan os.Signal) {
|
|||
procutil.SelfSIGHUP()
|
||||
case <-sighupCh:
|
||||
logger.Infof("SIGHUP received; loading -auth.config=%q", *authConfigPath)
|
||||
m, err := readAuthConfig(*authConfigPath)
|
||||
err := loadAuthConfig()
|
||||
if err != nil {
|
||||
logger.Errorf("failed to load -auth.config=%q; using the last successfully loaded config; error: %s", *authConfigPath, err)
|
||||
logger.Errorf("failed to load auth config; using the last successfully loaded config; error: %s", err)
|
||||
continue
|
||||
}
|
||||
authConfig.Store(m)
|
||||
logger.Infof("Successfully reloaded -auth.config=%q", *authConfigPath)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var authConfig atomic.Value
|
||||
var authConfig atomic.Pointer[AuthConfig]
|
||||
var authUsers atomic.Pointer[map[string]*UserInfo]
|
||||
var authConfigWG sync.WaitGroup
|
||||
var stopCh chan struct{}
|
||||
|
||||
func readAuthConfig(path string) (map[string]*UserInfo, error) {
|
||||
func loadAuthConfig() error {
|
||||
ac, err := readAuthConfig(*authConfigPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to load -auth.config=%q: %s", *authConfigPath, err)
|
||||
}
|
||||
|
||||
m, err := parseAuthConfigUsers(ac)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse users from -auth.config=%q: %s", *authConfigPath, err)
|
||||
}
|
||||
logger.Infof("loaded information about %d users from -auth.config=%q", len(m), *authConfigPath)
|
||||
|
||||
authConfig.Store(ac)
|
||||
authUsers.Store(&m)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func readAuthConfig(path string) (*AuthConfig, error) {
|
||||
data, err := fs.ReadFileOrHTTP(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m, err := parseAuthConfig(data)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse %q: %w", path, err)
|
||||
}
|
||||
logger.Infof("Loaded information about %d users from %q", len(m), path)
|
||||
return m, nil
|
||||
return parseAuthConfig(data)
|
||||
}
|
||||
|
||||
func parseAuthConfig(data []byte) (map[string]*UserInfo, error) {
|
||||
var err error
|
||||
data, err = envtemplate.ReplaceBytes(data)
|
||||
func parseAuthConfig(data []byte) (*AuthConfig, error) {
|
||||
data, err := envtemplate.ReplaceBytes(data)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot expand environment vars: %w", err)
|
||||
}
|
||||
var ac AuthConfig
|
||||
if err := yaml.UnmarshalStrict(data, &ac); err != nil {
|
||||
if err = yaml.UnmarshalStrict(data, &ac); err != nil {
|
||||
return nil, fmt.Errorf("cannot unmarshal AuthConfig data: %w", err)
|
||||
}
|
||||
ui := ac.UnauthorizedUser
|
||||
if ui != nil {
|
||||
ui.requests = metrics.GetOrCreateCounter(`vmauth_unauthorized_user_requests_total`)
|
||||
ui.concurrencyLimitCh = make(chan struct{}, ui.getMaxConcurrentRequests())
|
||||
ui.concurrencyLimitReached = metrics.GetOrCreateCounter(`vmauth_unauthorized_user_concurrent_requests_limit_reached_total`)
|
||||
_ = metrics.GetOrCreateGauge(`vmauth_unauthorized_user_concurrent_requests_capacity`, func() float64 {
|
||||
return float64(cap(ui.concurrencyLimitCh))
|
||||
})
|
||||
_ = metrics.GetOrCreateGauge(`vmauth_unauthorized_user_concurrent_requests_current`, func() float64 {
|
||||
return float64(len(ui.concurrencyLimitCh))
|
||||
})
|
||||
}
|
||||
return &ac, nil
|
||||
}
|
||||
|
||||
func parseAuthConfigUsers(ac *AuthConfig) (map[string]*UserInfo, error) {
|
||||
uis := ac.Users
|
||||
if len(uis) == 0 {
|
||||
return nil, fmt.Errorf("`users` section cannot be empty in AuthConfig")
|
||||
|
@ -387,6 +416,11 @@ func parseAuthConfig(data []byte) (map[string]*UserInfo, error) {
|
|||
return nil, err
|
||||
}
|
||||
}
|
||||
if ui.DefaultURL != nil {
|
||||
if err := ui.DefaultURL.sanitize(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
for _, e := range ui.URLMaps {
|
||||
if len(e.SrcPaths) == 0 {
|
||||
return nil, fmt.Errorf("missing `src_paths` in `url_map`")
|
||||
|
|
|
@ -13,7 +13,11 @@ import (
|
|||
func TestParseAuthConfigFailure(t *testing.T) {
|
||||
f := func(s string) {
|
||||
t.Helper()
|
||||
_, err := parseAuthConfig([]byte(s))
|
||||
ac, err := parseAuthConfig([]byte(s))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
_, err = parseAuthConfigUsers(ac)
|
||||
if err == nil {
|
||||
t.Fatalf("expecting non-nil error")
|
||||
}
|
||||
|
@ -202,7 +206,11 @@ users:
|
|||
func TestParseAuthConfigSuccess(t *testing.T) {
|
||||
f := func(s string, expectedAuthConfig map[string]*UserInfo) {
|
||||
t.Helper()
|
||||
m, err := parseAuthConfig([]byte(s))
|
||||
ac, err := parseAuthConfig([]byte(s))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
m, err := parseAuthConfigUsers(ac)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
|
@ -352,6 +360,83 @@ users:
|
|||
URLPrefix: mustParseURL("https://bar/x"),
|
||||
},
|
||||
})
|
||||
// with default url
|
||||
f(`
|
||||
users:
|
||||
- bearer_token: foo
|
||||
url_map:
|
||||
- src_paths: ["/api/v1/query","/api/v1/query_range","/api/v1/label/[^./]+/.+"]
|
||||
url_prefix: http://vmselect/select/0/prometheus
|
||||
- src_paths: ["/api/v1/write"]
|
||||
url_prefix: ["http://vminsert1/insert/0/prometheus","http://vminsert2/insert/0/prometheus"]
|
||||
headers:
|
||||
- "foo: bar"
|
||||
- "xxx: y"
|
||||
default_url:
|
||||
- http://default1/select/0/prometheus
|
||||
- http://default2/select/0/prometheus
|
||||
`, map[string]*UserInfo{
|
||||
getAuthToken("foo", "", ""): {
|
||||
BearerToken: "foo",
|
||||
URLMaps: []URLMap{
|
||||
{
|
||||
SrcPaths: getSrcPaths([]string{"/api/v1/query", "/api/v1/query_range", "/api/v1/label/[^./]+/.+"}),
|
||||
URLPrefix: mustParseURL("http://vmselect/select/0/prometheus"),
|
||||
},
|
||||
{
|
||||
SrcPaths: getSrcPaths([]string{"/api/v1/write"}),
|
||||
URLPrefix: mustParseURLs([]string{
|
||||
"http://vminsert1/insert/0/prometheus",
|
||||
"http://vminsert2/insert/0/prometheus",
|
||||
}),
|
||||
Headers: []Header{
|
||||
{
|
||||
Name: "foo",
|
||||
Value: "bar",
|
||||
},
|
||||
{
|
||||
Name: "xxx",
|
||||
Value: "y",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
DefaultURL: mustParseURLs([]string{
|
||||
"http://default1/select/0/prometheus",
|
||||
"http://default2/select/0/prometheus",
|
||||
}),
|
||||
},
|
||||
getAuthToken("", "foo", ""): {
|
||||
BearerToken: "foo",
|
||||
URLMaps: []URLMap{
|
||||
{
|
||||
SrcPaths: getSrcPaths([]string{"/api/v1/query", "/api/v1/query_range", "/api/v1/label/[^./]+/.+"}),
|
||||
URLPrefix: mustParseURL("http://vmselect/select/0/prometheus"),
|
||||
},
|
||||
{
|
||||
SrcPaths: getSrcPaths([]string{"/api/v1/write"}),
|
||||
URLPrefix: mustParseURLs([]string{
|
||||
"http://vminsert1/insert/0/prometheus",
|
||||
"http://vminsert2/insert/0/prometheus",
|
||||
}),
|
||||
Headers: []Header{
|
||||
{
|
||||
Name: "foo",
|
||||
Value: "bar",
|
||||
},
|
||||
{
|
||||
Name: "xxx",
|
||||
Value: "y",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
DefaultURL: mustParseURLs([]string{
|
||||
"http://default1/select/0/prometheus",
|
||||
"http://default2/select/0/prometheus",
|
||||
}),
|
||||
},
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -60,14 +60,22 @@ users:
|
|||
- "http://vminsert2:8480/insert/42/prometheus"
|
||||
|
||||
# A single user for querying and inserting data:
|
||||
#
|
||||
# - Requests to http://vmauth:8427/api/v1/query, http://vmauth:8427/api/v1/query_range
|
||||
# and http://vmauth:8427/api/v1/label/<label_name>/values are proxied to the following urls in a round-robin manner:
|
||||
# - http://vmselect1:8481/select/42/prometheus
|
||||
# - http://vmselect2:8481/select/42/prometheus
|
||||
# For example, http://vmauth:8427/api/v1/query is proxied to http://vmselect1:8480/select/42/prometheus/api/v1/query
|
||||
# or to http://vmselect2:8480/select/42/prometheus/api/v1/query .
|
||||
#
|
||||
# - Requests to http://vmauth:8427/api/v1/write are proxied to http://vminsert:8480/insert/42/prometheus/api/v1/write .
|
||||
# The "X-Scope-OrgID: abc" http header is added to these requests.
|
||||
#
|
||||
# Request which do not match `src_paths` from the `url_map` are proxied to the urls from `default_url`
|
||||
# in a round-robin manner. The original request path is passed in `request_path` query arg.
|
||||
# For example, request to http://vmauth:8427/non/existing/path are proxied:
|
||||
# - to http://default1:8888/unsupported_url_handler?request_path=/non/existing/path
|
||||
# - or http://default2:8888/unsupported_url_handler?request_path=/non/existing/path
|
||||
- username: "foobar"
|
||||
url_map:
|
||||
- src_paths:
|
||||
|
@ -81,3 +89,25 @@ users:
|
|||
url_prefix: "http://vminsert:8480/insert/42/prometheus"
|
||||
headers:
|
||||
- "X-Scope-OrgID: abc"
|
||||
ip_filters:
|
||||
deny_list: [127.0.0.1]
|
||||
default_url:
|
||||
- "http://default1:8888/unsupported_url_handler"
|
||||
- "http://default2:8888/unsupported_url_handler"
|
||||
|
||||
# Requests without Authorization header are routed according to `unauthorized_user` section.
|
||||
unauthorized_user:
|
||||
url_map:
|
||||
- src_paths:
|
||||
- /api/v1/query
|
||||
- /api/v1/query_range
|
||||
url_prefix:
|
||||
- http://vmselect1:8481/select/0/prometheus
|
||||
- http://vmselect2:8481/select/0/prometheus
|
||||
ip_filters:
|
||||
allow_list: [8.8.8.8]
|
||||
|
||||
ip_filters:
|
||||
allow_list: ["1.2.3.0/24", "127.0.0.1"]
|
||||
deny_list:
|
||||
- 10.1.0.1
|
||||
|
|
|
@ -84,6 +84,13 @@ func requestHandler(w http.ResponseWriter, r *http.Request) bool {
|
|||
}
|
||||
authToken := r.Header.Get("Authorization")
|
||||
if authToken == "" {
|
||||
// Process requests for unauthorized users
|
||||
ui := authConfig.Load().UnauthorizedUser
|
||||
if ui != nil {
|
||||
processUserRequest(w, r, ui)
|
||||
return true
|
||||
}
|
||||
|
||||
w.Header().Set("WWW-Authenticate", `Basic realm="Restricted"`)
|
||||
http.Error(w, "missing `Authorization` request header", http.StatusUnauthorized)
|
||||
return true
|
||||
|
@ -94,7 +101,7 @@ func requestHandler(w http.ResponseWriter, r *http.Request) bool {
|
|||
authToken = strings.Replace(authToken, "Token", "Bearer", 1)
|
||||
}
|
||||
|
||||
ac := authConfig.Load().(map[string]*UserInfo)
|
||||
ac := *authUsers.Load()
|
||||
ui := ac[authToken]
|
||||
if ui == nil {
|
||||
invalidAuthTokenRequests.Inc()
|
||||
|
@ -110,6 +117,12 @@ func requestHandler(w http.ResponseWriter, r *http.Request) bool {
|
|||
}
|
||||
return true
|
||||
}
|
||||
|
||||
processUserRequest(w, r, ui)
|
||||
return true
|
||||
}
|
||||
|
||||
func processUserRequest(w http.ResponseWriter, r *http.Request, ui *UserInfo) {
|
||||
ui.requests.Inc()
|
||||
|
||||
// Limit the concurrency of requests to backends
|
||||
|
@ -119,31 +132,45 @@ func requestHandler(w http.ResponseWriter, r *http.Request) bool {
|
|||
if err := ui.beginConcurrencyLimit(); err != nil {
|
||||
handleConcurrencyLimitError(w, r, err)
|
||||
<-concurrencyLimitCh
|
||||
return true
|
||||
return
|
||||
}
|
||||
default:
|
||||
concurrentRequestsLimitReached.Inc()
|
||||
err := fmt.Errorf("cannot serve more than -maxConcurrentRequests=%d concurrent requests", cap(concurrencyLimitCh))
|
||||
handleConcurrencyLimitError(w, r, err)
|
||||
return true
|
||||
return
|
||||
}
|
||||
processRequest(w, r, ui)
|
||||
ui.endConcurrencyLimit()
|
||||
<-concurrencyLimitCh
|
||||
return true
|
||||
}
|
||||
|
||||
func processRequest(w http.ResponseWriter, r *http.Request, ui *UserInfo) {
|
||||
u := normalizeURL(r.URL)
|
||||
up, headers, err := ui.getURLPrefixAndHeaders(u)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "cannot determine targetURL: %s", err)
|
||||
return
|
||||
up, headers := ui.getURLPrefixAndHeaders(u)
|
||||
isDefault := false
|
||||
if up == nil {
|
||||
missingRouteRequests.Inc()
|
||||
if ui.DefaultURL == nil {
|
||||
httpserver.Errorf(w, r, "missing route for %q", u.String())
|
||||
return
|
||||
}
|
||||
up, headers = ui.DefaultURL, ui.Headers
|
||||
isDefault = true
|
||||
}
|
||||
|
||||
maxAttempts := up.getBackendsCount()
|
||||
for i := 0; i < maxAttempts; i++ {
|
||||
bu := up.getLeastLoadedBackendURL()
|
||||
targetURL := mergeURLs(bu.url, u)
|
||||
targetURL := bu.url
|
||||
// Don't change path and add request_path query param for default route.
|
||||
if isDefault {
|
||||
query := targetURL.Query()
|
||||
query.Set("request_path", u.Path)
|
||||
targetURL.RawQuery = query.Encode()
|
||||
} else { // Update path for regular routes.
|
||||
targetURL = mergeURLs(targetURL, u)
|
||||
}
|
||||
ok := tryProcessingRequest(w, r, targetURL, headers)
|
||||
bu.put()
|
||||
if ok {
|
||||
|
@ -151,7 +178,7 @@ func processRequest(w http.ResponseWriter, r *http.Request, ui *UserInfo) {
|
|||
}
|
||||
bu.setBroken()
|
||||
}
|
||||
err = &httpserver.ErrorWithStatusCode{
|
||||
err := &httpserver.ErrorWithStatusCode{
|
||||
Err: fmt.Errorf("all the backends for the user %q are unavailable", ui.name()),
|
||||
StatusCode: http.StatusServiceUnavailable,
|
||||
}
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"path"
|
||||
"strings"
|
||||
|
@ -30,19 +29,18 @@ func mergeURLs(uiURL, requestURI *url.URL) *url.URL {
|
|||
return &targetURL
|
||||
}
|
||||
|
||||
func (ui *UserInfo) getURLPrefixAndHeaders(u *url.URL) (*URLPrefix, []Header, error) {
|
||||
func (ui *UserInfo) getURLPrefixAndHeaders(u *url.URL) (*URLPrefix, []Header) {
|
||||
for _, e := range ui.URLMaps {
|
||||
for _, sp := range e.SrcPaths {
|
||||
if sp.match(u.Path) {
|
||||
return e.URLPrefix, e.Headers, nil
|
||||
return e.URLPrefix, e.Headers
|
||||
}
|
||||
}
|
||||
}
|
||||
if ui.URLPrefix != nil {
|
||||
return ui.URLPrefix, ui.Headers, nil
|
||||
return ui.URLPrefix, ui.Headers
|
||||
}
|
||||
missingRouteRequests.Inc()
|
||||
return nil, nil, fmt.Errorf("missing route for %q", u.String())
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func normalizeURL(uOrig *url.URL) *url.URL {
|
||||
|
|
|
@ -14,9 +14,9 @@ func TestCreateTargetURLSuccess(t *testing.T) {
|
|||
t.Fatalf("cannot parse %q: %s", requestURI, err)
|
||||
}
|
||||
u = normalizeURL(u)
|
||||
up, headers, err := ui.getURLPrefixAndHeaders(u)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
up, headers := ui.getURLPrefixAndHeaders(u)
|
||||
if up == nil {
|
||||
t.Fatalf("cannot determie backend: %s", err)
|
||||
}
|
||||
bu := up.getLeastLoadedBackendURL()
|
||||
target := mergeURLs(bu.url, u)
|
||||
|
@ -124,10 +124,7 @@ func TestCreateTargetURLFailure(t *testing.T) {
|
|||
t.Fatalf("cannot parse %q: %s", requestURI, err)
|
||||
}
|
||||
u = normalizeURL(u)
|
||||
up, headers, err := ui.getURLPrefixAndHeaders(u)
|
||||
if err == nil {
|
||||
t.Fatalf("expecting non-nil error")
|
||||
}
|
||||
up, headers := ui.getURLPrefixAndHeaders(u)
|
||||
if up != nil {
|
||||
t.Fatalf("unexpected non-empty up=%#v", up)
|
||||
}
|
||||
|
|
|
@ -155,7 +155,7 @@ See the docs at https://docs.victoriametrics.com/vmbackup.html .
|
|||
}
|
||||
|
||||
func newSrcFS() (*fslocal.FS, error) {
|
||||
snapshotPath := *storageDataPath + "/snapshots/" + *snapshotName
|
||||
snapshotPath := filepath.Join(*storageDataPath, "snapshots", *snapshotName)
|
||||
|
||||
// Verify the snapshot exists.
|
||||
f, err := os.Open(snapshotPath)
|
||||
|
|
|
@ -158,8 +158,9 @@ The result on the GCS bucket. We see only 3 daily backups:
|
|||
* GET `/api/v1/backups` - returns list of backups in remote storage.
|
||||
Example output:
|
||||
```json
|
||||
[{"name":"daily/2022-11-30","size_bytes":26664689,"size":"25.429Mi"},{"name":"daily/2022-12-01","size_bytes":40160965,"size":"38.300Mi"},{"name":"hourly/2022-11-30:12","size_bytes":5846529,"size":"5.576Mi"},{"name":"hourly/2022-11-30:13","size_bytes":17651847,"size":"16.834Mi"},{"name":"hourly/2022-11-30:13:22","size_bytes":8797831,"size":"8.390Mi"},{"name":"hourly/2022-11-30:14","size_bytes":10680454,"size":"10.186Mi"}]
|
||||
[{"name":"daily/2023-04-07","size_bytes":318837,"size":"311.4ki","created_at":"2023-04-07T16:15:07+00:00"},{"name":"hourly/2023-04-07:11","size_bytes":318837,"size":"311.4ki","created_at":"2023-04-07T16:15:06+00:00"},{"name":"latest","size_bytes":318837,"size":"311.4ki","created_at":"2023-04-07T16:15:04+00:00"},{"name":"monthly/2023-04","size_bytes":318837,"size":"311.4ki","created_at":"2023-04-07T16:15:10+00:00"},{"name":"weekly/2023-14","size_bytes":318837,"size":"311.4ki","created_at":"2023-04-07T16:15:09+00:00"}]
|
||||
```
|
||||
> Note: `created_at` field is in RFC3339 format.
|
||||
|
||||
* POST `/api/v1/restore` - saves backup name to restore when [performing restore](#restore-commands).
|
||||
Example request body:
|
||||
|
@ -211,7 +212,7 @@ It can be changed by using flag:
|
|||
`vmbackupmanager backup list` lists backups in remote storage:
|
||||
```console
|
||||
$ ./vmbackupmanager backup list
|
||||
[{"name":"daily/2022-11-30","size_bytes":26664689,"size":"25.429Mi"},{"name":"daily/2022-12-01","size_bytes":40160965,"size":"38.300Mi"},{"name":"hourly/2022-11-30:12","size_bytes":5846529,"size":"5.576Mi"},{"name":"hourly/2022-11-30:13","size_bytes":17651847,"size":"16.834Mi"},{"name":"hourly/2022-11-30:13:22","size_bytes":8797831,"size":"8.390Mi"},{"name":"hourly/2022-11-30:14","size_bytes":10680454,"size":"10.186Mi"}]
|
||||
[{"name":"daily/2023-04-07","size_bytes":318837,"size":"311.4ki","created_at":"2023-04-07T16:15:07+00:00"},{"name":"hourly/2023-04-07:11","size_bytes":318837,"size":"311.4ki","created_at":"2023-04-07T16:15:06+00:00"},{"name":"latest","size_bytes":318837,"size":"311.4ki","created_at":"2023-04-07T16:15:04+00:00"},{"name":"monthly/2023-04","size_bytes":318837,"size":"311.4ki","created_at":"2023-04-07T16:15:10+00:00"},{"name":"weekly/2023-14","size_bytes":318837,"size":"311.4ki","created_at":"2023-04-07T16:15:09+00:00"}]
|
||||
```
|
||||
|
||||
### Restore commands
|
||||
|
@ -249,16 +250,16 @@ If restore mark doesn't exist at `storageDataPath`(restore wasn't requested) `vm
|
|||
1. Run `vmbackupmanager backup list` to get list of available backups:
|
||||
```console
|
||||
$ /vmbackupmanager-prod backup list
|
||||
["daily/2022-10-06","daily/2022-10-10","hourly/2022-10-04:13","hourly/2022-10-06:12","hourly/2022-10-06:13","hourly/2022-10-10:14","hourly/2022-10-10:16","monthly/2022-10","weekly/2022-40","weekly/2022-41"]
|
||||
[{"name":"daily/2023-04-07","size_bytes":318837,"size":"311.4ki","created_at":"2023-04-07T16:15:07+00:00"},{"name":"hourly/2023-04-07:11","size_bytes":318837,"size":"311.4ki","created_at":"2023-04-07T16:15:06+00:00"},{"name":"latest","size_bytes":318837,"size":"311.4ki","created_at":"2023-04-07T16:15:04+00:00"},{"name":"monthly/2023-04","size_bytes":318837,"size":"311.4ki","created_at":"2023-04-07T16:15:10+00:00"},{"name":"weekly/2023-14","size_bytes":318837,"size":"311.4ki","created_at":"2023-04-07T16:15:09+00:00"}]
|
||||
```
|
||||
2. Run `vmbackupmanager restore create` to create restore mark:
|
||||
- Use relative path to backup to restore from currently used remote storage:
|
||||
```console
|
||||
$ /vmbackupmanager-prod restore create daily/2022-10-06
|
||||
$ /vmbackupmanager-prod restore create daily/2023-04-07
|
||||
```
|
||||
- Use full path to backup to restore from any remote storage:
|
||||
```console
|
||||
$ /vmbackupmanager-prod restore create azblob://test1/vmbackupmanager/daily/2022-10-06
|
||||
$ /vmbackupmanager-prod restore create azblob://test1/vmbackupmanager/daily/2023-04-07
|
||||
```
|
||||
3. Stop `vmstorage` or `vmsingle` node
|
||||
4. Run `vmbackupmanager restore` to restore backup:
|
||||
|
@ -275,23 +276,24 @@ If restore mark doesn't exist at `storageDataPath`(restore wasn't requested) `vm
|
|||
```yaml
|
||||
vmbackup:
|
||||
restore:
|
||||
onStart: "true"
|
||||
onStart:
|
||||
enabled: "true"
|
||||
```
|
||||
See operator `VMStorage` schema [here](https://docs.victoriametrics.com/operator/api.html#vmstorage) and `VMSingle` [here](https://docs.victoriametrics.com/operator/api.html#vmsinglespec).
|
||||
2. Enter container running `vmbackupmanager`
|
||||
2. Use `vmbackupmanager backup list` to get list of available backups:
|
||||
```console
|
||||
$ /vmbackupmanager-prod backup list
|
||||
["daily/2022-10-06","daily/2022-10-10","hourly/2022-10-04:13","hourly/2022-10-06:12","hourly/2022-10-06:13","hourly/2022-10-10:14","hourly/2022-10-10:16","monthly/2022-10","weekly/2022-40","weekly/2022-41"]
|
||||
[{"name":"daily/2023-04-07","size_bytes":318837,"size":"311.4ki","created_at":"2023-04-07T16:15:07+00:00"},{"name":"hourly/2023-04-07:11","size_bytes":318837,"size":"311.4ki","created_at":"2023-04-07T16:15:06+00:00"},{"name":"latest","size_bytes":318837,"size":"311.4ki","created_at":"2023-04-07T16:15:04+00:00"},{"name":"monthly/2023-04","size_bytes":318837,"size":"311.4ki","created_at":"2023-04-07T16:15:10+00:00"},{"name":"weekly/2023-14","size_bytes":318837,"size":"311.4ki","created_at":"2023-04-07T16:15:09+00:00"}]
|
||||
```
|
||||
3. Use `vmbackupmanager restore create` to create restore mark:
|
||||
- Use relative path to backup to restore from currently used remote storage:
|
||||
```console
|
||||
$ /vmbackupmanager-prod restore create daily/2022-10-06
|
||||
$ /vmbackupmanager-prod restore create daily/2023-04-07
|
||||
```
|
||||
- Use full path to backup to restore from any remote storage:
|
||||
```console
|
||||
$ /vmbackupmanager-prod restore create azblob://test1/vmbackupmanager/daily/2022-10-06
|
||||
$ /vmbackupmanager-prod restore create azblob://test1/vmbackupmanager/daily/2023-04-07
|
||||
```
|
||||
4. Restart pod
|
||||
|
||||
|
@ -305,7 +307,8 @@ Clusters here are referred to as `source` and `destination`.
|
|||
```yaml
|
||||
vmbackup:
|
||||
restore:
|
||||
onStart: "true"
|
||||
onStart:
|
||||
enabled: "true"
|
||||
```
|
||||
Note: it is safe to leave this section in the cluster configuration, since it will be ignored if restore mark doesn't exist.
|
||||
> Important! Use different `-dst` for *destination* cluster to avoid overwriting backup data of the *source* cluster.
|
||||
|
@ -313,13 +316,13 @@ Clusters here are referred to as `source` and `destination`.
|
|||
2. Use `vmbackupmanager backup list` to get list of available backups:
|
||||
```console
|
||||
$ /vmbackupmanager-prod backup list
|
||||
["daily/2022-10-06","daily/2022-10-10","hourly/2022-10-04:13","hourly/2022-10-06:12","hourly/2022-10-06:13","hourly/2022-10-10:14","hourly/2022-10-10:16","monthly/2022-10","weekly/2022-40","weekly/2022-41"]
|
||||
[{"name":"daily/2023-04-07","size_bytes":318837,"size":"311.4ki","created_at":"2023-04-07T16:15:07+00:00"},{"name":"hourly/2023-04-07:11","size_bytes":318837,"size":"311.4ki","created_at":"2023-04-07T16:15:06+00:00"},{"name":"latest","size_bytes":318837,"size":"311.4ki","created_at":"2023-04-07T16:15:04+00:00"},{"name":"monthly/2023-04","size_bytes":318837,"size":"311.4ki","created_at":"2023-04-07T16:15:10+00:00"},{"name":"weekly/2023-14","size_bytes":318837,"size":"311.4ki","created_at":"2023-04-07T16:15:09+00:00"}]
|
||||
```
|
||||
3. Use `vmbackupmanager restore create` to create restore mark at each pod of the *destination* cluster.
|
||||
Each pod in *destination* cluster should be restored from backup of respective pod in *source* cluster.
|
||||
For example: `vmstorage-source-0` in *source* cluster should be restored from `vmstorage-destination-0` in *destination* cluster.
|
||||
```console
|
||||
$ /vmbackupmanager-prod restore create s3://source_cluster/vmstorage-source-0/daily/2022-10-06
|
||||
$ /vmbackupmanager-prod restore create s3://source_cluster/vmstorage-source-0/daily/2023-04-07
|
||||
```
|
||||
|
||||
## Monitoring
|
||||
|
|
|
@ -738,7 +738,7 @@ or higher.
|
|||
See `./vmctl vm-native --help` for details and full list of flags.
|
||||
|
||||
Migration in `vm-native` mode takes two steps:
|
||||
1. Explore the list of the metrics to migrate via `/api/v1/series` API;
|
||||
1. Explore the list of the metrics to migrate via `api/v1/label/__name__/values` API;
|
||||
2. Migrate explored metrics one-by-one.
|
||||
|
||||
```
|
||||
|
@ -765,6 +765,57 @@ Requests to make: 9 / 9 [██████████████████
|
|||
requests retries: 0;
|
||||
2023/03/02 09:22:06 Total time: 3.633127625s
|
||||
```
|
||||
`vmctl` uses retries with backoff policy by default.
|
||||
|
||||
The benefits of this retry backoff policy include:
|
||||
1. Improved success rates:
|
||||
With each retry attempt, the migration process has a higher chance of success.
|
||||
By increasing the delay between retries, the system can avoid overwhelming the service with too many requests at once.
|
||||
|
||||
2. Reduced load on the system:
|
||||
By increasing the delay between retries, the system can reduce the load on the service by limiting the number of
|
||||
requests made in a short amount of time.
|
||||
3. Can help to migrate a big amount of data
|
||||
|
||||
However, there are also some potential penalties associated with using a backoff retry policy, including:
|
||||
1. Increased migration process latency:
|
||||
`vmctl` need to make additional call to the `api/v1/label/__name__/values` with defined `--vm-native-filter-match` flag,
|
||||
and after process all metric names with additional filters.
|
||||
|
||||
In case when retries with backoff policy is unneeded `--vm-native-disable-retries` command line flag can be used.
|
||||
When this flag is set to `true`, `vmctl` skips additional call to the `api/v1/label/__name__/values` API and starts
|
||||
migration process by making calls to the `/api/v1/export` and `api/v1/import`. If some errors happen `vmctl` immediately
|
||||
stops the migration process.
|
||||
|
||||
```
|
||||
./vmctl vm-native --vm-native-src-addr=http://127.0.0.1:8481/select/0/prometheus \
|
||||
--vm-native-dst-addr=http://127.0.0.1:8428 \
|
||||
--vm-native-filter-match='{__name__!=""}' \
|
||||
--vm-native-filter-time-start='2023-04-08T11:30:30Z' \
|
||||
--vm-native-disable-retries=true
|
||||
|
||||
VictoriaMetrics Native import mode
|
||||
|
||||
2023/04/11 10:17:14 Initing import process from "http://127.0.0.1:8481/select/0/prometheus/api/v1/export/native" to "http://localhost:8428/api/v1/import/native" with filter
|
||||
filter: match[]={__name__!=""}
|
||||
start: 2023-04-08T11:30:30Z
|
||||
. Continue? [Y/n]
|
||||
2023/04/11 10:17:15 Requests to make: 1
|
||||
2023/04/11 10:17:15 number of workers decreased to 1, because vmctl calculated requests to make 1
|
||||
Total: 0 ↙ Speed: ? p/s Continue import process with filter
|
||||
filter: match[]={__name__!=""}
|
||||
start: 2023-04-08T11:30:30Z
|
||||
end: 2023-04-11T07:17:14Z:
|
||||
Total: 1.64 GiB ↖ Speed: 11.20 MiB p/s
|
||||
2023/04/11 10:19:45 Import finished!
|
||||
2023/04/11 10:19:45 VictoriaMetrics importer stats:
|
||||
time spent while importing: 2m30.813841541s;
|
||||
total bytes: 1.8 GB;
|
||||
bytes/s: 11.7 MB;
|
||||
requests: 1;
|
||||
requests retries: 0;
|
||||
2023/04/11 10:19:45 Total time: 2m30.814721125s
|
||||
```
|
||||
|
||||
Importing tips:
|
||||
|
||||
|
|
|
@ -326,6 +326,7 @@ const (
|
|||
vmNativeStepInterval = "vm-native-step-interval"
|
||||
|
||||
vmNativeDisableHTTPKeepAlive = "vm-native-disable-http-keep-alive"
|
||||
vmNativeDisableRetries = "vm-native-disable-retries"
|
||||
|
||||
vmNativeSrcAddr = "vm-native-src-addr"
|
||||
vmNativeSrcUser = "vm-native-src-user"
|
||||
|
@ -351,12 +352,12 @@ var (
|
|||
},
|
||||
&cli.StringFlag{
|
||||
Name: vmNativeFilterTimeStart,
|
||||
Usage: "The time filter may contain either unix timestamp in seconds or RFC3339 values. E.g. '2020-01-01T20:07:00Z'",
|
||||
Usage: "The time filter may contain different timestamp formats. See more details here https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#timestamp-formats",
|
||||
Required: true,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: vmNativeFilterTimeEnd,
|
||||
Usage: "The time filter may contain either unix timestamp in seconds or RFC3339 values. E.g. '2020-01-01T20:07:00Z'",
|
||||
Usage: "The time filter may contain different timestamp formats. See more details here https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#timestamp-formats",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: vmNativeStepInterval,
|
||||
|
@ -443,6 +444,11 @@ var (
|
|||
Usage: "Number of workers concurrently performing import requests to VM",
|
||||
Value: 2,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: vmNativeDisableRetries,
|
||||
Usage: "Defines whether to disable retries with backoff policy for migration process",
|
||||
Value: false,
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/signal"
|
||||
"strings"
|
||||
|
@ -201,6 +202,8 @@ func main() {
|
|||
return fmt.Errorf("flag %q can't be empty", vmNativeFilterMatch)
|
||||
}
|
||||
|
||||
disableKeepAlive := c.Bool(vmNativeDisableHTTPKeepAlive)
|
||||
|
||||
var srcExtraLabels []string
|
||||
srcAddr := strings.Trim(c.String(vmNativeSrcAddr), "/")
|
||||
srcAuthConfig, err := auth.Generate(
|
||||
|
@ -210,6 +213,7 @@ func main() {
|
|||
if err != nil {
|
||||
return fmt.Errorf("error initilize auth config for source: %s", srcAddr)
|
||||
}
|
||||
srcHTTPClient := &http.Client{Transport: &http.Transport{DisableKeepAlives: disableKeepAlive}}
|
||||
|
||||
dstAddr := strings.Trim(c.String(vmNativeDstAddr), "/")
|
||||
dstExtraLabels := c.StringSlice(vmExtraLabel)
|
||||
|
@ -220,6 +224,7 @@ func main() {
|
|||
if err != nil {
|
||||
return fmt.Errorf("error initilize auth config for destination: %s", dstAddr)
|
||||
}
|
||||
dstHTTPClient := &http.Client{Transport: &http.Transport{DisableKeepAlives: disableKeepAlive}}
|
||||
|
||||
p := vmNativeProcessor{
|
||||
rateLimit: c.Int64(vmRateLimit),
|
||||
|
@ -231,19 +236,20 @@ func main() {
|
|||
Chunk: c.String(vmNativeStepInterval),
|
||||
},
|
||||
src: &native.Client{
|
||||
AuthCfg: srcAuthConfig,
|
||||
Addr: srcAddr,
|
||||
ExtraLabels: srcExtraLabels,
|
||||
DisableHTTPKeepAlive: c.Bool(vmNativeDisableHTTPKeepAlive),
|
||||
AuthCfg: srcAuthConfig,
|
||||
Addr: srcAddr,
|
||||
ExtraLabels: srcExtraLabels,
|
||||
HTTPClient: srcHTTPClient,
|
||||
},
|
||||
dst: &native.Client{
|
||||
AuthCfg: dstAuthConfig,
|
||||
Addr: dstAddr,
|
||||
ExtraLabels: dstExtraLabels,
|
||||
DisableHTTPKeepAlive: c.Bool(vmNativeDisableHTTPKeepAlive),
|
||||
AuthCfg: dstAuthConfig,
|
||||
Addr: dstAddr,
|
||||
ExtraLabels: dstExtraLabels,
|
||||
HTTPClient: dstHTTPClient,
|
||||
},
|
||||
backoff: backoff.New(),
|
||||
cc: c.Int(vmConcurrency),
|
||||
backoff: backoff.New(),
|
||||
cc: c.Int(vmConcurrency),
|
||||
disableRetries: c.Bool(vmNativeDisableRetries),
|
||||
}
|
||||
return p.run(ctx, isNonInteractive(c))
|
||||
},
|
||||
|
|
|
@ -11,34 +11,33 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
nativeTenantsAddr = "admin/tenants"
|
||||
nativeSeriesAddr = "api/v1/series"
|
||||
nameLabel = "__name__"
|
||||
nativeTenantsAddr = "admin/tenants"
|
||||
nativeMetricNamesAddr = "api/v1/label/__name__/values"
|
||||
)
|
||||
|
||||
// Client is an HTTP client for exporting and importing
|
||||
// time series via native protocol.
|
||||
type Client struct {
|
||||
AuthCfg *auth.Config
|
||||
Addr string
|
||||
ExtraLabels []string
|
||||
DisableHTTPKeepAlive bool
|
||||
AuthCfg *auth.Config
|
||||
Addr string
|
||||
ExtraLabels []string
|
||||
HTTPClient *http.Client
|
||||
}
|
||||
|
||||
// LabelValues represents series from api/v1/series response
|
||||
type LabelValues map[string]string
|
||||
|
||||
// Response represents response from api/v1/series
|
||||
// Response represents response from api/v1/label/__name__/values
|
||||
type Response struct {
|
||||
Status string `json:"status"`
|
||||
Series []LabelValues `json:"data"`
|
||||
Status string `json:"status"`
|
||||
MetricNames []string `json:"data"`
|
||||
}
|
||||
|
||||
// Explore finds series by provided filter from api/v1/series
|
||||
func (c *Client) Explore(ctx context.Context, f Filter, tenantID string) (map[string]struct{}, error) {
|
||||
url := fmt.Sprintf("%s/%s", c.Addr, nativeSeriesAddr)
|
||||
// Explore finds metric names by provided filter from api/v1/label/__name__/values
|
||||
func (c *Client) Explore(ctx context.Context, f Filter, tenantID string) ([]string, error) {
|
||||
url := fmt.Sprintf("%s/%s", c.Addr, nativeMetricNamesAddr)
|
||||
if tenantID != "" {
|
||||
url = fmt.Sprintf("%s/select/%s/prometheus/%s", c.Addr, tenantID, nativeSeriesAddr)
|
||||
url = fmt.Sprintf("%s/select/%s/prometheus/%s", c.Addr, tenantID, nativeMetricNamesAddr)
|
||||
}
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
|
||||
if err != nil {
|
||||
|
@ -68,21 +67,7 @@ func (c *Client) Explore(ctx context.Context, f Filter, tenantID string) (map[st
|
|||
if err := resp.Body.Close(); err != nil {
|
||||
return nil, fmt.Errorf("cannot close series response body: %s", err)
|
||||
}
|
||||
names := make(map[string]struct{})
|
||||
for _, series := range response.Series {
|
||||
// TODO: consider tweaking /api/v1/series API to return metric names only
|
||||
// this could make explore response much lighter.
|
||||
for key, value := range series {
|
||||
if key != nameLabel {
|
||||
continue
|
||||
}
|
||||
if _, ok := names[value]; ok {
|
||||
continue
|
||||
}
|
||||
names[value] = struct{}{}
|
||||
}
|
||||
}
|
||||
return names, nil
|
||||
return response.MetricNames, nil
|
||||
}
|
||||
|
||||
// ImportPipe uses pipe reader in request to process data
|
||||
|
@ -169,8 +154,8 @@ func (c *Client) do(req *http.Request, expSC int) (*http.Response, error) {
|
|||
if c.AuthCfg != nil {
|
||||
c.AuthCfg.SetHeaders(req, true)
|
||||
}
|
||||
var httpClient = &http.Client{Transport: &http.Transport{DisableKeepAlives: c.DisableHTTPKeepAlive}}
|
||||
resp, err := httpClient.Do(req)
|
||||
|
||||
resp, err := c.HTTPClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unexpected error when performing request: %w", err)
|
||||
}
|
||||
|
|
|
@ -29,6 +29,11 @@ type Response struct {
|
|||
Series []LabelValues `json:"data"`
|
||||
}
|
||||
|
||||
type MetricNamesResponse struct {
|
||||
Status string `json:"status"`
|
||||
Data []string `json:"data"`
|
||||
}
|
||||
|
||||
// RemoteWriteServer represents fake remote write server with database
|
||||
type RemoteWriteServer struct {
|
||||
server *httptest.Server
|
||||
|
@ -44,6 +49,7 @@ func NewRemoteWriteServer(t *testing.T) *RemoteWriteServer {
|
|||
mux.Handle("/api/v1/import", rws.getWriteHandler(t))
|
||||
mux.Handle("/health", rws.handlePing())
|
||||
mux.Handle("/api/v1/series", rws.seriesHandler())
|
||||
mux.Handle("/api/v1/label/__name__/values", rws.valuesHandler())
|
||||
mux.Handle("/api/v1/export/native", rws.exportNativeHandler())
|
||||
mux.Handle("/api/v1/import/native", rws.importNativeHandler(t))
|
||||
rws.server = httptest.NewServer(mux)
|
||||
|
@ -145,6 +151,36 @@ func (rws *RemoteWriteServer) seriesHandler() http.Handler {
|
|||
})
|
||||
}
|
||||
|
||||
func (rws *RemoteWriteServer) valuesHandler() http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
labelNames := make(map[string]struct{})
|
||||
for _, ser := range rws.series {
|
||||
if ser.Name != "" {
|
||||
labelNames[ser.Name] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
metricNames := make([]string, 0, len(labelNames))
|
||||
for k := range labelNames {
|
||||
metricNames = append(metricNames, k)
|
||||
}
|
||||
resp := MetricNamesResponse{
|
||||
Status: "success",
|
||||
Data: metricNames,
|
||||
}
|
||||
|
||||
err := json.NewEncoder(w).Encode(resp)
|
||||
if err != nil {
|
||||
log.Printf("error send series: %s", err)
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
return
|
||||
})
|
||||
}
|
||||
|
||||
func (rws *RemoteWriteServer) exportNativeHandler() http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
now := time.Now()
|
||||
|
|
31
app/vmctl/utils/time.go
Normal file
31
app/vmctl/utils/time.go
Normal file
|
@ -0,0 +1,31 @@
|
|||
package utils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
|
||||
)
|
||||
|
||||
const (
|
||||
// These values prevent from overflow when storing msec-precision time in int64.
|
||||
minTimeMsecs = 0 // use 0 instead of `int64(-1<<63) / 1e6` because the storage engine doesn't actually support negative time
|
||||
maxTimeMsecs = int64(1<<63-1) / 1e6
|
||||
)
|
||||
|
||||
// GetTime returns time from the given string.
|
||||
func GetTime(s string) (time.Time, error) {
|
||||
secs, err := promutils.ParseTime(s)
|
||||
if err != nil {
|
||||
return time.Time{}, fmt.Errorf("cannot parse %s: %w", s, err)
|
||||
}
|
||||
msecs := int64(secs * 1e3)
|
||||
if msecs < minTimeMsecs {
|
||||
msecs = 0
|
||||
}
|
||||
if msecs > maxTimeMsecs {
|
||||
msecs = maxTimeMsecs
|
||||
}
|
||||
|
||||
return time.Unix(0, msecs*int64(time.Millisecond)).UTC(), nil
|
||||
}
|
182
app/vmctl/utils/time_test.go
Normal file
182
app/vmctl/utils/time_test.go
Normal file
|
@ -0,0 +1,182 @@
|
|||
package utils
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestGetTime(t *testing.T) {
|
||||
l, _ := time.LoadLocation("UTC")
|
||||
tests := []struct {
|
||||
name string
|
||||
s string
|
||||
want func() time.Time
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "empty string",
|
||||
s: "",
|
||||
want: func() time.Time { return time.Time{} },
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "only year",
|
||||
s: "2019",
|
||||
want: func() time.Time {
|
||||
t := time.Date(2019, 1, 1, 0, 0, 0, 0, l)
|
||||
return t
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "year and month",
|
||||
s: "2019-01",
|
||||
want: func() time.Time {
|
||||
t := time.Date(2019, 1, 1, 0, 0, 0, 0, l)
|
||||
return t
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "year and not first month",
|
||||
s: "2019-02",
|
||||
want: func() time.Time {
|
||||
t := time.Date(2019, 2, 1, 0, 0, 0, 0, l)
|
||||
return t
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "year, month and day",
|
||||
s: "2019-02-01",
|
||||
want: func() time.Time {
|
||||
t := time.Date(2019, 2, 1, 0, 0, 0, 0, l)
|
||||
return t
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "year, month and not first day",
|
||||
s: "2019-02-10",
|
||||
want: func() time.Time {
|
||||
t := time.Date(2019, 2, 10, 0, 0, 0, 0, l)
|
||||
return t
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "year, month, day and time",
|
||||
s: "2019-02-02T00",
|
||||
want: func() time.Time {
|
||||
t := time.Date(2019, 2, 2, 0, 0, 0, 0, l)
|
||||
return t
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "year, month, day and one hour time",
|
||||
s: "2019-02-02T01",
|
||||
want: func() time.Time {
|
||||
t := time.Date(2019, 2, 2, 1, 0, 0, 0, l)
|
||||
return t
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "time with zero minutes",
|
||||
s: "2019-02-02T01:00",
|
||||
want: func() time.Time {
|
||||
t := time.Date(2019, 2, 2, 1, 0, 0, 0, l)
|
||||
return t
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "time with one minute",
|
||||
s: "2019-02-02T01:01",
|
||||
want: func() time.Time {
|
||||
t := time.Date(2019, 2, 2, 1, 1, 0, 0, l)
|
||||
return t
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "time with zero seconds",
|
||||
s: "2019-02-02T01:01:00",
|
||||
want: func() time.Time {
|
||||
t := time.Date(2019, 2, 2, 1, 1, 0, 0, l)
|
||||
return t
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "timezone with one second",
|
||||
s: "2019-02-02T01:01:01",
|
||||
want: func() time.Time {
|
||||
t := time.Date(2019, 2, 2, 1, 1, 1, 0, l)
|
||||
return t
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "time with two second and timezone",
|
||||
s: "2019-07-07T20:01:02Z",
|
||||
want: func() time.Time {
|
||||
t := time.Date(2019, 7, 7, 20, 1, 02, 0, l)
|
||||
return t
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "time with seconds and timezone",
|
||||
s: "2019-07-07T20:47:40+03:00",
|
||||
want: func() time.Time {
|
||||
l, _ = time.LoadLocation("Europe/Kiev")
|
||||
t := time.Date(2019, 7, 7, 20, 47, 40, 0, l)
|
||||
return t
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "negative time",
|
||||
s: "-292273086-05-16T16:47:06Z",
|
||||
want: func() time.Time { return time.Time{} },
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "float timestamp representation",
|
||||
s: "1562529662.324",
|
||||
want: func() time.Time {
|
||||
t := time.Date(2019, 7, 7, 23, 01, 02, 324, l)
|
||||
return t
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "negative timestamp",
|
||||
s: "-9223372036.855",
|
||||
want: func() time.Time {
|
||||
l, _ = time.LoadLocation("Europe/Kiev")
|
||||
return time.Date(1970, 01, 01, 03, 00, 00, 00, l)
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "big timestamp",
|
||||
s: "9223372036.855",
|
||||
want: func() time.Time {
|
||||
l, _ = time.LoadLocation("Europe/Kiev")
|
||||
t := time.Date(2262, 04, 12, 02, 47, 16, 855, l)
|
||||
return t
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "duration time",
|
||||
s: "1h5m",
|
||||
want: func() time.Time {
|
||||
t := time.Now().Add(-1 * time.Hour).Add(-5 * time.Minute)
|
||||
return t
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := GetTime(tt.s)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("ParseTime() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
w := tt.want()
|
||||
if got.Unix() != w.Unix() {
|
||||
t.Errorf("ParseTime() got = %v, want %v", got, w)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
|
@ -5,6 +5,7 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
|
@ -12,9 +13,10 @@ import (
|
|||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/limiter"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/native"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/stepper"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/utils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/vm"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/searchutils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
|
||||
"github.com/cheggaaa/pb/v3"
|
||||
)
|
||||
|
||||
|
@ -25,16 +27,18 @@ type vmNativeProcessor struct {
|
|||
src *native.Client
|
||||
backoff *backoff.Backoff
|
||||
|
||||
s *stats
|
||||
rateLimit int64
|
||||
interCluster bool
|
||||
cc int
|
||||
s *stats
|
||||
rateLimit int64
|
||||
interCluster bool
|
||||
cc int
|
||||
disableRetries bool
|
||||
}
|
||||
|
||||
const (
|
||||
nativeExportAddr = "api/v1/export/native"
|
||||
nativeImportAddr = "api/v1/import/native"
|
||||
nativeBarTpl = `{{ blue "%s:" }} {{ counters . }} {{ bar . "[" "█" (cycle . "█") "▒" "]" }} {{ percent . }}`
|
||||
nativeExportAddr = "api/v1/export/native"
|
||||
nativeImportAddr = "api/v1/import/native"
|
||||
nativeWithBackoffTpl = `{{ blue "%s:" }} {{ counters . }} {{ bar . "[" "█" (cycle . "█") "▒" "]" }} {{ percent . }}`
|
||||
nativeSingleProcessTpl = `Total: {{counters . }} {{ cycle . "↖" "↗" "↘" "↙" }} Speed: {{speed . }} {{string . "suffix"}}`
|
||||
)
|
||||
|
||||
func (p *vmNativeProcessor) run(ctx context.Context, silent bool) error {
|
||||
|
@ -45,18 +49,16 @@ func (p *vmNativeProcessor) run(ctx context.Context, silent bool) error {
|
|||
startTime: time.Now(),
|
||||
}
|
||||
|
||||
start, err := time.Parse(time.RFC3339, p.filter.TimeStart)
|
||||
start, err := utils.GetTime(p.filter.TimeStart)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse %s, provided: %s, expected format: %s, error: %w",
|
||||
vmNativeFilterTimeStart, p.filter.TimeStart, time.RFC3339, err)
|
||||
return fmt.Errorf("failed to parse %s, provided: %s, error: %w", vmNativeFilterTimeStart, p.filter.TimeStart, err)
|
||||
}
|
||||
|
||||
end := time.Now().In(start.Location())
|
||||
if p.filter.TimeEnd != "" {
|
||||
end, err = time.Parse(time.RFC3339, p.filter.TimeEnd)
|
||||
end, err = utils.GetTime(p.filter.TimeEnd)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse %s, provided: %s, expected format: %s, error: %w",
|
||||
vmNativeFilterTimeEnd, p.filter.TimeEnd, time.RFC3339, err)
|
||||
return fmt.Errorf("failed to parse %s, provided: %s, error: %w", vmNativeFilterTimeEnd, p.filter.TimeEnd, err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -94,9 +96,9 @@ func (p *vmNativeProcessor) run(ctx context.Context, silent bool) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (p *vmNativeProcessor) do(ctx context.Context, f native.Filter, srcURL, dstURL string) error {
|
||||
func (p *vmNativeProcessor) do(ctx context.Context, f native.Filter, srcURL, dstURL string, bar *pb.ProgressBar) error {
|
||||
|
||||
retryableFunc := func() error { return p.runSingle(ctx, f, srcURL, dstURL) }
|
||||
retryableFunc := func() error { return p.runSingle(ctx, f, srcURL, dstURL, bar) }
|
||||
attempts, err := p.backoff.Retry(ctx, retryableFunc)
|
||||
p.s.Lock()
|
||||
p.s.retries += attempts
|
||||
|
@ -108,13 +110,18 @@ func (p *vmNativeProcessor) do(ctx context.Context, f native.Filter, srcURL, dst
|
|||
return nil
|
||||
}
|
||||
|
||||
func (p *vmNativeProcessor) runSingle(ctx context.Context, f native.Filter, srcURL, dstURL string) error {
|
||||
func (p *vmNativeProcessor) runSingle(ctx context.Context, f native.Filter, srcURL, dstURL string, bar *pb.ProgressBar) error {
|
||||
|
||||
exportReader, err := p.src.ExportPipe(ctx, srcURL, f)
|
||||
reader, err := p.src.ExportPipe(ctx, srcURL, f)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to init export pipe: %w", err)
|
||||
}
|
||||
|
||||
if p.disableRetries && bar != nil {
|
||||
fmt.Printf("Continue import process with filter %s:\n", f.String())
|
||||
reader = bar.NewProxyReader(reader)
|
||||
}
|
||||
|
||||
pr, pw := io.Pipe()
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
|
@ -131,7 +138,7 @@ func (p *vmNativeProcessor) runSingle(ctx context.Context, f native.Filter, srcU
|
|||
w = limiter.NewWriteLimiter(pw, rl)
|
||||
}
|
||||
|
||||
written, err := io.Copy(w, exportReader)
|
||||
written, err := io.Copy(w, reader)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to write into %q: %s", p.dst.Addr, err)
|
||||
}
|
||||
|
@ -176,17 +183,22 @@ func (p *vmNativeProcessor) runBackfilling(ctx context.Context, tenantID string,
|
|||
fmt.Println("") // extra line for better output formatting
|
||||
log.Printf(initMessage, initParams...)
|
||||
|
||||
log.Printf("Exploring metrics...")
|
||||
metrics, err := p.src.Explore(ctx, p.filter, tenantID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot get metrics from source %s: %w", p.src.Addr, err)
|
||||
var foundSeriesMsg string
|
||||
|
||||
metrics := []string{p.filter.Match}
|
||||
if !p.disableRetries {
|
||||
log.Printf("Exploring metrics...")
|
||||
metrics, err = p.src.Explore(ctx, p.filter, tenantID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot get metrics from source %s: %w", p.src.Addr, err)
|
||||
}
|
||||
|
||||
if len(metrics) == 0 {
|
||||
return fmt.Errorf("no metrics found")
|
||||
}
|
||||
foundSeriesMsg = fmt.Sprintf("Found %d metrics to import", len(metrics))
|
||||
}
|
||||
|
||||
if len(metrics) == 0 {
|
||||
return fmt.Errorf("no metrics found")
|
||||
}
|
||||
|
||||
foundSeriesMsg := fmt.Sprintf("Found %d metrics to import", len(metrics))
|
||||
if !p.interCluster {
|
||||
// do not prompt for intercluster because there could be many tenants,
|
||||
// and we don't want to interrupt the process when moving to the next tenant.
|
||||
|
@ -206,7 +218,10 @@ func (p *vmNativeProcessor) runBackfilling(ctx context.Context, tenantID string,
|
|||
|
||||
var bar *pb.ProgressBar
|
||||
if !silent {
|
||||
bar = pb.ProgressBarTemplate(fmt.Sprintf(nativeBarTpl, barPrefix)).New(len(metrics) * len(ranges))
|
||||
bar = pb.ProgressBarTemplate(fmt.Sprintf(nativeWithBackoffTpl, barPrefix)).New(len(metrics) * len(ranges))
|
||||
if p.disableRetries {
|
||||
bar = pb.ProgressBarTemplate(nativeSingleProcessTpl).New(0)
|
||||
}
|
||||
bar.Start()
|
||||
defer bar.Finish()
|
||||
}
|
||||
|
@ -220,19 +235,26 @@ func (p *vmNativeProcessor) runBackfilling(ctx context.Context, tenantID string,
|
|||
go func() {
|
||||
defer wg.Done()
|
||||
for f := range filterCh {
|
||||
if err := p.do(ctx, f, srcURL, dstURL); err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
if bar != nil {
|
||||
bar.Increment()
|
||||
if !p.disableRetries {
|
||||
if err := p.do(ctx, f, srcURL, dstURL, nil); err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
if bar != nil {
|
||||
bar.Increment()
|
||||
}
|
||||
} else {
|
||||
if err := p.runSingle(ctx, f, srcURL, dstURL, bar); err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// any error breaks the import
|
||||
for s := range metrics {
|
||||
for _, s := range metrics {
|
||||
|
||||
match, err := buildMatchWithFilter(p.filter.Match, s)
|
||||
if err != nil {
|
||||
|
@ -313,11 +335,26 @@ func byteCountSI(b int64) string {
|
|||
}
|
||||
|
||||
func buildMatchWithFilter(filter string, metricName string) (string, error) {
|
||||
labels, err := promutils.NewLabelsFromString(filter)
|
||||
if filter == metricName {
|
||||
return filter, nil
|
||||
}
|
||||
|
||||
labels, err := searchutils.ParseMetricSelector(filter)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
labels.Set("__name__", metricName)
|
||||
|
||||
return labels.String(), nil
|
||||
str := make([]string, 0, len(labels))
|
||||
for _, label := range labels {
|
||||
if len(label.Key) == 0 {
|
||||
continue
|
||||
}
|
||||
str = append(str, label.String())
|
||||
}
|
||||
|
||||
nameFilter := fmt.Sprintf("__name__=%q", metricName)
|
||||
str = append(str, nameFilter)
|
||||
|
||||
match := fmt.Sprintf("{%s}", strings.Join(str, ","))
|
||||
return match, nil
|
||||
}
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
@ -191,7 +192,7 @@ func Test_vmNativeProcessor_run(t *testing.T) {
|
|||
t.Fatalf("Error parse end time: %s", err)
|
||||
}
|
||||
|
||||
tt.fields.filter.Match = fmt.Sprintf("%s=%q", tt.fields.matchName, tt.fields.matchValue)
|
||||
tt.fields.filter.Match = fmt.Sprintf("{%s=~%q}", tt.fields.matchName, tt.fields.matchValue)
|
||||
tt.fields.filter.TimeStart = tt.start
|
||||
tt.fields.filter.TimeEnd = tt.end
|
||||
|
||||
|
@ -205,16 +206,16 @@ func Test_vmNativeProcessor_run(t *testing.T) {
|
|||
}
|
||||
|
||||
tt.fields.src = &native.Client{
|
||||
AuthCfg: nil,
|
||||
Addr: src.URL(),
|
||||
ExtraLabels: []string{},
|
||||
DisableHTTPKeepAlive: false,
|
||||
AuthCfg: nil,
|
||||
Addr: src.URL(),
|
||||
ExtraLabels: []string{},
|
||||
HTTPClient: &http.Client{Transport: &http.Transport{DisableKeepAlives: false}},
|
||||
}
|
||||
tt.fields.dst = &native.Client{
|
||||
AuthCfg: nil,
|
||||
Addr: dst.URL(),
|
||||
ExtraLabels: []string{},
|
||||
DisableHTTPKeepAlive: false,
|
||||
AuthCfg: nil,
|
||||
Addr: dst.URL(),
|
||||
ExtraLabels: []string{},
|
||||
HTTPClient: &http.Client{Transport: &http.Transport{DisableKeepAlives: false}},
|
||||
}
|
||||
|
||||
p := &vmNativeProcessor{
|
||||
|
@ -307,44 +308,72 @@ func Test_buildMatchWithFilter(t *testing.T) {
|
|||
name: "parsed metric with label",
|
||||
filter: `{__name__="http_request_count_total",cluster="kube1"}`,
|
||||
metricName: "http_request_count_total",
|
||||
want: `{__name__="http_request_count_total",cluster="kube1"}`,
|
||||
want: `{cluster="kube1",__name__="http_request_count_total"}`,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "metric name with label",
|
||||
filter: `http_request_count_total{cluster="kube1"}`,
|
||||
metricName: "http_request_count_total",
|
||||
want: `{__name__="http_request_count_total",cluster="kube1"}`,
|
||||
want: `{cluster="kube1",__name__="http_request_count_total"}`,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "parsed metric with regexp value",
|
||||
filter: `{__name__="http_request_count_total",cluster~="kube.*"}`,
|
||||
filter: `{__name__="http_request_count_total",cluster=~"kube.*"}`,
|
||||
metricName: "http_request_count_total",
|
||||
want: `{__name__="http_request_count_total",cluster~="kube.*"}`,
|
||||
want: `{cluster=~"kube.*",__name__="http_request_count_total"}`,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "only label with regexp",
|
||||
filter: `{cluster~=".*"}`,
|
||||
filter: `{cluster=~".*"}`,
|
||||
metricName: "http_request_count_total",
|
||||
want: `{cluster~=".*",__name__="http_request_count_total"}`,
|
||||
want: `{cluster=~".*",__name__="http_request_count_total"}`,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "many labels in filter with regexp",
|
||||
filter: `{cluster~=".*",job!=""}`,
|
||||
filter: `{cluster=~".*",job!=""}`,
|
||||
metricName: "http_request_count_total",
|
||||
want: `{cluster~=".*",job!="",__name__="http_request_count_total"}`,
|
||||
want: `{cluster=~".*",job!="",__name__="http_request_count_total"}`,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "match with error",
|
||||
filter: `{cluster=~".*"}`,
|
||||
filter: `{cluster~=".*"}`,
|
||||
metricName: "http_request_count_total",
|
||||
want: ``,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "all names",
|
||||
filter: `{__name__!=""}`,
|
||||
metricName: "http_request_count_total",
|
||||
want: `{__name__="http_request_count_total"}`,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "with many underscores labels",
|
||||
filter: `{__name__!="", __meta__!=""}`,
|
||||
metricName: "http_request_count_total",
|
||||
want: `{__meta__!="",__name__="http_request_count_total"}`,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "metric name has regexp",
|
||||
filter: `{__name__=~".*"}`,
|
||||
metricName: "http_request_count_total",
|
||||
want: `{__name__="http_request_count_total"}`,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "metric name has negative regexp",
|
||||
filter: `{__name__!~".*"}`,
|
||||
metricName: "http_request_count_total",
|
||||
want: `{__name__="http_request_count_total"}`,
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
|
|
|
@ -174,6 +174,7 @@ func pushAggregateSeries(tss []prompbmarshal.TimeSeries) {
|
|||
ctx.skipStreamAggr = true
|
||||
for _, ts := range tss {
|
||||
labels := ts.Labels
|
||||
ctx.Labels = ctx.Labels[:0]
|
||||
for _, label := range labels {
|
||||
name := label.Name
|
||||
if name == "__name__" {
|
||||
|
|
|
@ -115,6 +115,13 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
|||
timerpool.Put(t)
|
||||
qt.Printf("wait in queue because -search.maxConcurrentRequests=%d concurrent requests are executed", *maxConcurrentRequests)
|
||||
defer func() { <-concurrencyLimitCh }()
|
||||
case <-r.Context().Done():
|
||||
timerpool.Put(t)
|
||||
remoteAddr := httpserver.GetQuotedRemoteAddr(r)
|
||||
requestURI := httpserver.GetRequestURI(r)
|
||||
logger.Infof("client has cancelled the request after %.3f seconds: remoteAddr=%s, requestURI: %q",
|
||||
d.Seconds(), remoteAddr, requestURI)
|
||||
return true
|
||||
case <-t.C:
|
||||
timerpool.Put(t)
|
||||
concurrencyLimitTimeout.Inc()
|
||||
|
|
|
@ -3,6 +3,7 @@ package netstorage
|
|||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
|
@ -20,11 +21,9 @@ func InitTmpBlocksDir(tmpDirPath string) {
|
|||
if len(tmpDirPath) == 0 {
|
||||
tmpDirPath = os.TempDir()
|
||||
}
|
||||
tmpBlocksDir = tmpDirPath + "/searchResults"
|
||||
tmpBlocksDir = filepath.Join(tmpDirPath, "searchResults")
|
||||
fs.MustRemoveAll(tmpBlocksDir)
|
||||
if err := fs.MkdirAllIfNotExist(tmpBlocksDir); err != nil {
|
||||
logger.Panicf("FATAL: cannot create %q: %s", tmpBlocksDir, err)
|
||||
}
|
||||
fs.MustMkdirIfNotExist(tmpBlocksDir)
|
||||
}
|
||||
|
||||
var tmpBlocksDir string
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
{% import (
|
||||
"fmt"
|
||||
"github.com/VictoriaMetrics/metricsql"
|
||||
) %}
|
||||
|
||||
|
@ -53,6 +54,28 @@ textarea { margin: 1em }
|
|||
{% endif %}
|
||||
{% endfunc %}
|
||||
|
||||
{% func ExpandWithExprsJSONResponse(q string) %}
|
||||
{% if len(q) == 0 %}
|
||||
{
|
||||
"status": "error",
|
||||
"error": "query string cannot be empty"
|
||||
}
|
||||
{% return %}
|
||||
{% endif %}
|
||||
|
||||
{
|
||||
{% code expr, err := metricsql.Parse(q) %}
|
||||
{% if err != nil %}
|
||||
"status": "error",
|
||||
"error": {%q= fmt.Sprintf("Cannot parse query: %s", err) %}
|
||||
{% else %}
|
||||
{% code expr = metricsql.Optimize(expr) %}
|
||||
"status": "success",
|
||||
"expr": {%qz= expr.AppendString(nil) %}
|
||||
{% endif %}
|
||||
}
|
||||
{% endfunc %}
|
||||
|
||||
{% endstripspace %}
|
||||
|
||||
{% func withExprsTutorial() %}
|
||||
|
|
|
@ -6,127 +6,191 @@ package prometheus
|
|||
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:1
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/VictoriaMetrics/metricsql"
|
||||
)
|
||||
|
||||
// ExpandWithExprsResponse returns a webpage, which expands with templates in q MetricsQL.
|
||||
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:8
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:9
|
||||
import (
|
||||
qtio422016 "io"
|
||||
|
||||
qt422016 "github.com/valyala/quicktemplate"
|
||||
)
|
||||
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:8
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:9
|
||||
var (
|
||||
_ = qtio422016.Copy
|
||||
_ = qt422016.AcquireByteBuffer
|
||||
)
|
||||
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:8
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:9
|
||||
func StreamExpandWithExprsResponse(qw422016 *qt422016.Writer, q string) {
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:8
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:9
|
||||
qw422016.N().S(`<html><head><title>Expand WITH expressions</title><style>p { font-weight: bold }textarea { margin: 1em }</style></head><body><div><form method="get"><div><p><a href="https://docs.victoriametrics.com/MetricsQL.html">MetricsQL</a> query with optional WITH expressions:</p><textarea name="query" style="height: 15em; width: 90%">`)
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:25
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:26
|
||||
qw422016.E().S(q)
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:25
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:26
|
||||
qw422016.N().S(`</textarea><br/><input type="submit" value="Expand" /><p><a href="https://docs.victoriametrics.com/MetricsQL.html">MetricsQL</a> query after expanding WITH expressions and applying other optimizations:</p><textarea style="height: 5em; width: 90%" readonly="readonly">`)
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:31
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:32
|
||||
streamexpandWithExprs(qw422016, q)
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:31
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:32
|
||||
qw422016.N().S(`</textarea></div></form></div><div>`)
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:36
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:37
|
||||
streamwithExprsTutorial(qw422016)
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:36
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:37
|
||||
qw422016.N().S(`</div></body></html>`)
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:40
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:41
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:40
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:41
|
||||
func WriteExpandWithExprsResponse(qq422016 qtio422016.Writer, q string) {
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:40
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:41
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:40
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:41
|
||||
StreamExpandWithExprsResponse(qw422016, q)
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:40
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:41
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:40
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:41
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:40
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:41
|
||||
func ExpandWithExprsResponse(q string) string {
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:40
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:41
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:40
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:41
|
||||
WriteExpandWithExprsResponse(qb422016, q)
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:40
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:41
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:40
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:41
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:40
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:41
|
||||
return qs422016
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:40
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:41
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:42
|
||||
func streamexpandWithExprs(qw422016 *qt422016.Writer, q string) {
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:43
|
||||
if len(q) == 0 {
|
||||
func streamexpandWithExprs(qw422016 *qt422016.Writer, q string) {
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:44
|
||||
return
|
||||
if len(q) == 0 {
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:45
|
||||
return
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:46
|
||||
}
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:47
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:48
|
||||
expr, err := metricsql.Parse(q)
|
||||
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:48
|
||||
if err != nil {
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:48
|
||||
qw422016.N().S(`Cannot parse query:`)
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:49
|
||||
qw422016.E().V(err)
|
||||
if err != nil {
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:49
|
||||
qw422016.N().S(`Cannot parse query:`)
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:50
|
||||
} else {
|
||||
qw422016.E().V(err)
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:51
|
||||
} else {
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:52
|
||||
expr = metricsql.Optimize(expr)
|
||||
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:52
|
||||
qw422016.E().Z(expr.AppendString(nil))
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:53
|
||||
qw422016.E().Z(expr.AppendString(nil))
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:54
|
||||
}
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:54
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:55
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:54
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:55
|
||||
func writeexpandWithExprs(qq422016 qtio422016.Writer, q string) {
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:54
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:55
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:54
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:55
|
||||
streamexpandWithExprs(qw422016, q)
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:54
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:55
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:54
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:55
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:54
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:55
|
||||
func expandWithExprs(q string) string {
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:54
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:55
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:54
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:55
|
||||
writeexpandWithExprs(qb422016, q)
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:54
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:55
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:54
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:55
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:54
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:55
|
||||
return qs422016
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:54
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:55
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:57
|
||||
func StreamExpandWithExprsJSONResponse(qw422016 *qt422016.Writer, q string) {
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:58
|
||||
if len(q) == 0 {
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:58
|
||||
qw422016.N().S(`{"status": "error","error": "query string cannot be empty"}`)
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:63
|
||||
return
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:64
|
||||
}
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:64
|
||||
qw422016.N().S(`{`)
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:67
|
||||
expr, err := metricsql.Parse(q)
|
||||
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:68
|
||||
if err != nil {
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:68
|
||||
qw422016.N().S(`"status": "error","error":`)
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:70
|
||||
qw422016.N().Q(fmt.Sprintf("Cannot parse query: %s", err))
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:71
|
||||
} else {
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:72
|
||||
expr = metricsql.Optimize(expr)
|
||||
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:72
|
||||
qw422016.N().S(`"status": "success","expr":`)
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:74
|
||||
qw422016.N().QZ(expr.AppendString(nil))
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:75
|
||||
}
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:75
|
||||
qw422016.N().S(`}`)
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:77
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:77
|
||||
func WriteExpandWithExprsJSONResponse(qq422016 qtio422016.Writer, q string) {
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:77
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:77
|
||||
StreamExpandWithExprsJSONResponse(qw422016, q)
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:77
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:77
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:77
|
||||
func ExpandWithExprsJSONResponse(q string) string {
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:77
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:77
|
||||
WriteExpandWithExprsJSONResponse(qb422016, q)
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:77
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:77
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:77
|
||||
return qs422016
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:77
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:81
|
||||
func streamwithExprsTutorial(qw422016 *qt422016.Writer) {
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:58
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:81
|
||||
qw422016.N().S(`
|
||||
<h3>Tutorial for WITH expressions in <a href="https://docs.victoriametrics.com/MetricsQL.html">MetricsQL</a></h3>
|
||||
|
||||
|
@ -315,31 +379,31 @@ WITH (
|
|||
</pre>
|
||||
|
||||
`)
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:245
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:268
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:245
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:268
|
||||
func writewithExprsTutorial(qq422016 qtio422016.Writer) {
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:245
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:268
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:245
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:268
|
||||
streamwithExprsTutorial(qw422016)
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:245
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:268
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:245
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:268
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:245
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:268
|
||||
func withExprsTutorial() string {
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:245
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:268
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:245
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:268
|
||||
writewithExprsTutorial(qb422016)
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:245
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:268
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:245
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:268
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:245
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:268
|
||||
return qs422016
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:245
|
||||
//line app/vmselect/prometheus/expand-with-exprs.qtpl:268
|
||||
}
|
||||
|
|
|
@ -61,9 +61,16 @@ const defaultStep = 5 * 60 * 1000
|
|||
// ExpandWithExprs handles the request to /expand-with-exprs
|
||||
func ExpandWithExprs(w http.ResponseWriter, r *http.Request) {
|
||||
query := r.FormValue("query")
|
||||
format := r.FormValue("format")
|
||||
bw := bufferedwriter.Get(w)
|
||||
defer bufferedwriter.Put(bw)
|
||||
WriteExpandWithExprsResponse(bw, query)
|
||||
if format == "json" {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
httpserver.EnableCORS(w, r)
|
||||
WriteExpandWithExprsJSONResponse(bw, query)
|
||||
} else {
|
||||
WriteExpandWithExprsResponse(bw, query)
|
||||
}
|
||||
_ = bw.Flush()
|
||||
}
|
||||
|
||||
|
|
|
@ -433,9 +433,7 @@ func mustLoadRollupResultCacheKeyPrefix(path string) {
|
|||
func mustSaveRollupResultCacheKeyPrefix(path string) {
|
||||
path = path + ".key.prefix"
|
||||
data := encoding.MarshalUint64(nil, rollupResultCacheKeyPrefix)
|
||||
if err := fs.WriteFileAtomically(path, data, true); err != nil {
|
||||
logger.Fatalf("cannot store rollupResult cache key prefix to %q: %s", path, err)
|
||||
}
|
||||
fs.MustWriteAtomic(path, data, true)
|
||||
}
|
||||
|
||||
var tooBigRollupResults = metrics.NewCounter("vm_too_big_rollup_results_total")
|
||||
|
|
|
@ -57,7 +57,7 @@ func GetTime(r *http.Request, argKey string, defaultMs int64) (int64, error) {
|
|||
return maxTimeMsecs, nil
|
||||
}
|
||||
// Parse argValue
|
||||
secs, err := parseTime(argValue)
|
||||
secs, err := promutils.ParseTime(argValue)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("cannot parse %s=%s: %w", argKey, argValue, err)
|
||||
}
|
||||
|
@ -71,78 +71,6 @@ func GetTime(r *http.Request, argKey string, defaultMs int64) (int64, error) {
|
|||
return msecs, nil
|
||||
}
|
||||
|
||||
func parseTime(s string) (float64, error) {
|
||||
if len(s) > 0 && (s[len(s)-1] != 'Z' && s[len(s)-1] > '9' || s[0] == '-') {
|
||||
// Parse duration relative to the current time
|
||||
d, err := promutils.ParseDuration(s)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if d > 0 {
|
||||
d = -d
|
||||
}
|
||||
t := time.Now().Add(d)
|
||||
return float64(t.UnixNano()) / 1e9, nil
|
||||
}
|
||||
if len(s) == 4 {
|
||||
// Parse YYYY
|
||||
t, err := time.Parse("2006", s)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return float64(t.UnixNano()) / 1e9, nil
|
||||
}
|
||||
if !strings.Contains(s, "-") {
|
||||
// Parse the timestamp in milliseconds
|
||||
return strconv.ParseFloat(s, 64)
|
||||
}
|
||||
if len(s) == 7 {
|
||||
// Parse YYYY-MM
|
||||
t, err := time.Parse("2006-01", s)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return float64(t.UnixNano()) / 1e9, nil
|
||||
}
|
||||
if len(s) == 10 {
|
||||
// Parse YYYY-MM-DD
|
||||
t, err := time.Parse("2006-01-02", s)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return float64(t.UnixNano()) / 1e9, nil
|
||||
}
|
||||
if len(s) == 13 {
|
||||
// Parse YYYY-MM-DDTHH
|
||||
t, err := time.Parse("2006-01-02T15", s)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return float64(t.UnixNano()) / 1e9, nil
|
||||
}
|
||||
if len(s) == 16 {
|
||||
// Parse YYYY-MM-DDTHH:MM
|
||||
t, err := time.Parse("2006-01-02T15:04", s)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return float64(t.UnixNano()) / 1e9, nil
|
||||
}
|
||||
if len(s) == 19 {
|
||||
// Parse YYYY-MM-DDTHH:MM:SS
|
||||
t, err := time.Parse("2006-01-02T15:04:05", s)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return float64(t.UnixNano()) / 1e9, nil
|
||||
}
|
||||
t, err := time.Parse(time.RFC3339, s)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return float64(t.UnixNano()) / 1e9, nil
|
||||
}
|
||||
|
||||
var (
|
||||
// These constants were obtained from https://github.com/prometheus/prometheus/blob/91d7175eaac18b00e370965f3a8186cc40bf9f55/web/api/v1/api.go#L442
|
||||
// See https://github.com/prometheus/client_golang/issues/614 for details.
|
||||
|
|
|
@ -9,7 +9,6 @@ import (
|
|||
"path/filepath"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -76,18 +75,11 @@ func collectDashboardsSettings(path string) ([]byte, error) {
|
|||
if !fs.IsPathExist(path) {
|
||||
return nil, fmt.Errorf("cannot find folder %q", path)
|
||||
}
|
||||
files, err := os.ReadDir(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot read folder %q", path)
|
||||
}
|
||||
files := fs.MustReadDir(path)
|
||||
|
||||
var dss []dashboardSettings
|
||||
for _, file := range files {
|
||||
filename := file.Name()
|
||||
if err != nil {
|
||||
logger.Errorf("skipping %q at -vmui.customDashboardsPath=%q, since the info for this file cannot be obtained: %s", filename, path, err)
|
||||
continue
|
||||
}
|
||||
if filepath.Ext(filename) != ".json" {
|
||||
continue
|
||||
}
|
||||
|
|
|
@ -39,8 +39,10 @@ var (
|
|||
finalMergeDelay = flag.Duration("finalMergeDelay", 0, "The delay before starting final merge for per-month partition after no new data is ingested into it. "+
|
||||
"Final merge may require additional disk IO and CPU resources. Final merge may increase query speed and reduce disk space usage in some cases. "+
|
||||
"Zero value disables final merge")
|
||||
bigMergeConcurrency = flag.Int("bigMergeConcurrency", 0, "The maximum number of CPU cores to use for big merges. Default value is used if set to 0")
|
||||
smallMergeConcurrency = flag.Int("smallMergeConcurrency", 0, "The maximum number of CPU cores to use for small merges. Default value is used if set to 0")
|
||||
_ = flag.Int("bigMergeConcurrency", 0, "Deprecated: this flag does nothing. Please use -smallMergeConcurrency "+
|
||||
"for controlling the concurrency of background merges. See https://docs.victoriametrics.com/#storage")
|
||||
smallMergeConcurrency = flag.Int("smallMergeConcurrency", 0, "The maximum number of workers for background merges. See https://docs.victoriametrics.com/#storage . "+
|
||||
"It isn't recommended tuning this flag in general case, since this may lead to uncontrolled increase in the number of parts and increased CPU usage during queries")
|
||||
retentionTimezoneOffset = flag.Duration("retentionTimezoneOffset", 0, "The offset for performing indexdb rotation. "+
|
||||
"If set to 0, then the indexdb rotation is performed at 4am UTC time per each -retentionPeriod. "+
|
||||
"If set to 2h, then the indexdb rotation is performed at 4am EET time (the timezone with +2h offset)")
|
||||
|
@ -93,7 +95,6 @@ func Init(resetCacheIfNeeded func(mrs []storage.MetricRow)) {
|
|||
resetResponseCacheIfNeeded = resetCacheIfNeeded
|
||||
storage.SetLogNewSeries(*logNewSeries)
|
||||
storage.SetFinalMergeDelay(*finalMergeDelay)
|
||||
storage.SetBigMergeWorkersCount(*bigMergeConcurrency)
|
||||
storage.SetMergeWorkersCount(*smallMergeConcurrency)
|
||||
storage.SetRetentionTimezoneOffset(*retentionTimezoneOffset)
|
||||
storage.SetFreeDiskSpaceLimit(minFreeDiskSpaceBytes.N)
|
||||
|
@ -108,10 +109,7 @@ func Init(resetCacheIfNeeded func(mrs []storage.MetricRow)) {
|
|||
logger.Infof("opening storage at %q with -retentionPeriod=%s", *DataPath, retentionPeriod)
|
||||
startTime := time.Now()
|
||||
WG = syncwg.WaitGroup{}
|
||||
strg, err := storage.OpenStorage(*DataPath, retentionPeriod.Msecs, *maxHourlySeries, *maxDailySeries)
|
||||
if err != nil {
|
||||
logger.Fatalf("cannot open a storage at %s with -retentionPeriod=%s: %s", *DataPath, retentionPeriod, err)
|
||||
}
|
||||
strg := storage.MustOpenStorage(*DataPath, retentionPeriod.Msecs, *maxHourlySeries, *maxDailySeries)
|
||||
Storage = strg
|
||||
initStaleSnapshotsRemover(strg)
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
FROM golang:1.20.3 as build-web-stage
|
||||
FROM golang:1.20.4 as build-web-stage
|
||||
COPY build /build
|
||||
|
||||
WORKDIR /build
|
||||
|
|
|
@ -11,6 +11,8 @@ import ThemeProvider from "./components/Main/ThemeProvider/ThemeProvider";
|
|||
import TracePage from "./pages/TracePage";
|
||||
import ExploreMetrics from "./pages/ExploreMetrics";
|
||||
import PreviewIcons from "./components/Main/Icons/PreviewIcons";
|
||||
import WithTemplate from "./pages/WithTemplate";
|
||||
import Relabel from "./pages/Relabel";
|
||||
|
||||
const App: FC = () => {
|
||||
|
||||
|
@ -51,6 +53,14 @@ const App: FC = () => {
|
|||
path={router.dashboards}
|
||||
element={<DashboardsLayout/>}
|
||||
/>
|
||||
<Route
|
||||
path={router.withTemplate}
|
||||
element={<WithTemplate/>}
|
||||
/>
|
||||
<Route
|
||||
path={router.relabel}
|
||||
element={<Relabel/>}
|
||||
/>
|
||||
<Route
|
||||
path={router.icons}
|
||||
element={<PreviewIcons/>}
|
||||
|
|
2
app/vmui/packages/vmui/src/api/expand-with-exprs.ts
Normal file
2
app/vmui/packages/vmui/src/api/expand-with-exprs.ts
Normal file
|
@ -0,0 +1,2 @@
|
|||
export const getExpandWithExprUrl = (server: string, query: string): string =>
|
||||
`${server}/expand-with-exprs?query=${query}&format=json`;
|
8
app/vmui/packages/vmui/src/api/metric-relabel.ts
Normal file
8
app/vmui/packages/vmui/src/api/metric-relabel.ts
Normal file
|
@ -0,0 +1,8 @@
|
|||
export const getMetricRelabelDebug = (server: string, configs: string, metric: string): string => {
|
||||
const params = [
|
||||
"format=json",
|
||||
`relabel_configs=${encodeURIComponent(configs)}`,
|
||||
`metric=${encodeURIComponent(metric)}`
|
||||
];
|
||||
return `${server}/metric-relabel-debug?${params.join("&")}`;
|
||||
};
|
|
@ -4,13 +4,13 @@ import { useTimeDispatch } from "../../../../state/time/TimeStateContext";
|
|||
import { ArrowDownIcon, StorageIcon } from "../../../Main/Icons";
|
||||
import Button from "../../../Main/Button/Button";
|
||||
import "./style.scss";
|
||||
import { replaceTenantId } from "../../../../utils/default-server-url";
|
||||
import classNames from "classnames";
|
||||
import Popper from "../../../Main/Popper/Popper";
|
||||
import { getAppModeEnable } from "../../../../utils/app-mode";
|
||||
import Tooltip from "../../../Main/Tooltip/Tooltip";
|
||||
import useDeviceDetect from "../../../../hooks/useDeviceDetect";
|
||||
import TextField from "../../../Main/TextField/TextField";
|
||||
import { getTenantIdFromUrl, replaceTenantId } from "../../../../utils/tenants";
|
||||
|
||||
const TenantsConfiguration: FC<{accountIds: string[]}> = ({ accountIds }) => {
|
||||
const appModeEnable = getAppModeEnable();
|
||||
|
@ -35,15 +35,7 @@ const TenantsConfiguration: FC<{accountIds: string[]}> = ({ accountIds }) => {
|
|||
}
|
||||
}, [search, accountIds]);
|
||||
|
||||
const getTenantIdFromUrl = (url: string) => {
|
||||
const regexp = /(\/select\/)(\d+|\d.+)(\/)(.+)/;
|
||||
return (url.match(regexp) || [])[2];
|
||||
};
|
||||
|
||||
const showTenantSelector = useMemo(() => {
|
||||
const id = true; //getTenantIdFromUrl(serverUrl);
|
||||
return accountIds.length > 1 && id;
|
||||
}, [accountIds, serverUrl]);
|
||||
const showTenantSelector = useMemo(() => accountIds.length > 1, [accountIds]);
|
||||
|
||||
const toggleOpenOptions = () => {
|
||||
setOpenOptions(prev => !prev);
|
||||
|
|
|
@ -2,10 +2,12 @@ import { useAppState } from "../../../../../state/common/StateContext";
|
|||
import { useEffect, useMemo, useState } from "preact/compat";
|
||||
import { ErrorTypes } from "../../../../../types";
|
||||
import { getAccountIds } from "../../../../../api/accountId";
|
||||
import { getAppModeParams } from "../../../../../utils/app-mode";
|
||||
import { getAppModeEnable, getAppModeParams } from "../../../../../utils/app-mode";
|
||||
import { getTenantIdFromUrl } from "../../../../../utils/tenants";
|
||||
|
||||
export const useFetchAccountIds = () => {
|
||||
const { useTenantID } = getAppModeParams();
|
||||
const appModeEnable = getAppModeEnable();
|
||||
const { serverUrl } = useAppState();
|
||||
|
||||
const [isLoading, setIsLoading] = useState(false);
|
||||
|
@ -13,9 +15,11 @@ export const useFetchAccountIds = () => {
|
|||
const [accountIds, setAccountIds] = useState<string[]>([]);
|
||||
|
||||
const fetchUrl = useMemo(() => getAccountIds(serverUrl), [serverUrl]);
|
||||
const isServerUrlWithTenant = useMemo(() => !!getTenantIdFromUrl(serverUrl), [serverUrl]);
|
||||
const preventFetch = appModeEnable ? !useTenantID : !isServerUrlWithTenant;
|
||||
|
||||
useEffect(() => {
|
||||
if (!useTenantID) return;
|
||||
if (preventFetch) return;
|
||||
const fetchData = async () => {
|
||||
setIsLoading(true);
|
||||
try {
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
@use "../../../../styles/variables" as *;
|
||||
@use "src/styles/variables" as *;
|
||||
|
||||
.vm-tenant-input {
|
||||
position: relative;
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
import React, { FC, useMemo, useState } from "preact/compat";
|
||||
import React, { FC, useEffect, useMemo, useState } from "preact/compat";
|
||||
import { useFetchQuery } from "../../../hooks/useFetchQuery";
|
||||
import { useGraphDispatch, useGraphState } from "../../../state/graph/GraphStateContext";
|
||||
import GraphView from "../../Views/GraphView/GraphView";
|
||||
|
@ -10,6 +10,7 @@ import Button from "../../Main/Button/Button";
|
|||
import "./style.scss";
|
||||
import classNames from "classnames";
|
||||
import useDeviceDetect from "../../../hooks/useDeviceDetect";
|
||||
import { getDurationFromMilliseconds, getSecondsFromDuration, getStepFromDuration } from "../../../utils/time";
|
||||
|
||||
interface ExploreMetricItemGraphProps {
|
||||
name: string,
|
||||
|
@ -26,15 +27,20 @@ const ExploreMetricItem: FC<ExploreMetricItemGraphProps> = ({
|
|||
instance,
|
||||
rateEnabled,
|
||||
isBucket,
|
||||
height
|
||||
height,
|
||||
}) => {
|
||||
const { isMobile } = useDeviceDetect();
|
||||
const { customStep, yaxis } = useGraphState();
|
||||
const { period } = useTimeState();
|
||||
|
||||
const graphDispatch = useGraphDispatch();
|
||||
const timeDispatch = useTimeDispatch();
|
||||
|
||||
const defaultStep = getStepFromDuration(period.end - period.start);
|
||||
const stepSeconds = getSecondsFromDuration(customStep);
|
||||
const heatmapStep = getDurationFromMilliseconds(stepSeconds * 10 * 1000);
|
||||
const [isHeatmap, setIsHeatmap] = useState(false);
|
||||
const step = isHeatmap && customStep === defaultStep ? heatmapStep : customStep;
|
||||
|
||||
const [showAllSeries, setShowAllSeries] = useState(false);
|
||||
|
||||
const query = useMemo(() => {
|
||||
|
@ -49,22 +55,7 @@ const ExploreMetricItem: FC<ExploreMetricItemGraphProps> = ({
|
|||
|
||||
const base = `{${params.join(",")}}`;
|
||||
if (isBucket) {
|
||||
if (instance) {
|
||||
return `
|
||||
label_map(
|
||||
histogram_quantiles("__name__", 0.5, 0.95, 0.99, sum(rate(${base})) by (vmrange, le)),
|
||||
"__name__",
|
||||
"0.5", "q50",
|
||||
"0.95", "q95",
|
||||
"0.99", "q99",
|
||||
)`;
|
||||
}
|
||||
return `
|
||||
with (q = histogram_quantile(0.95, sum(rate(${base})) by (instance, vmrange, le))) (
|
||||
alias(min(q), "q95min"),
|
||||
alias(max(q), "q95max"),
|
||||
alias(avg(q), "q95avg"),
|
||||
)`;
|
||||
return `sum(rate(${base})) by (vmrange, le)`;
|
||||
}
|
||||
const queryBase = rateEnabled ? `rollup_rate(${base})` : `rollup(${base})`;
|
||||
return `
|
||||
|
@ -75,10 +66,10 @@ with (q = ${queryBase}) (
|
|||
)`;
|
||||
}, [name, job, instance, rateEnabled, isBucket]);
|
||||
|
||||
const { isLoading, graphData, error, warning } = useFetchQuery({
|
||||
const { isLoading, graphData, error, warning, isHistogram } = useFetchQuery({
|
||||
predefinedQuery: [query],
|
||||
visible: true,
|
||||
customStep,
|
||||
customStep: step,
|
||||
showAllSeries
|
||||
});
|
||||
|
||||
|
@ -94,6 +85,10 @@ with (q = ${queryBase}) (
|
|||
setShowAllSeries(true);
|
||||
};
|
||||
|
||||
useEffect(() => {
|
||||
setIsHeatmap(isHistogram);
|
||||
}, [isHistogram]);
|
||||
|
||||
return (
|
||||
<div
|
||||
className={classNames({
|
||||
|
@ -119,13 +114,14 @@ with (q = ${queryBase}) (
|
|||
<GraphView
|
||||
data={graphData}
|
||||
period={period}
|
||||
customStep={customStep}
|
||||
customStep={step}
|
||||
query={[query]}
|
||||
yaxis={yaxis}
|
||||
setYaxisLimits={setYaxisLimits}
|
||||
setPeriod={setPeriod}
|
||||
showLegend={false}
|
||||
height={height}
|
||||
isHistogram={isHistogram}
|
||||
/>
|
||||
)}
|
||||
</div>
|
||||
|
|
|
@ -45,8 +45,21 @@ const HeaderNav: FC<HeaderNavProps> = ({ color, background, direction }) => {
|
|||
]
|
||||
},
|
||||
{
|
||||
label: routerOptions[router.trace].title,
|
||||
value: router.trace,
|
||||
label: "Tools",
|
||||
submenu: [
|
||||
{
|
||||
label: routerOptions[router.trace].title,
|
||||
value: router.trace,
|
||||
},
|
||||
{
|
||||
label: routerOptions[router.withTemplate].title,
|
||||
value: router.withTemplate,
|
||||
},
|
||||
{
|
||||
label: routerOptions[router.relabel].title,
|
||||
value: router.relabel,
|
||||
},
|
||||
]
|
||||
},
|
||||
{
|
||||
label: routerOptions[router.dashboards].title,
|
||||
|
|
|
@ -0,0 +1,45 @@
|
|||
import React, { FC, useEffect } from "preact/compat";
|
||||
import "./style.scss";
|
||||
import { useState } from "react";
|
||||
import Tooltip from "../Tooltip/Tooltip";
|
||||
import Button from "../Button/Button";
|
||||
import { CopyIcon } from "../Icons";
|
||||
|
||||
enum CopyState { copy = "Copy", copied = "Copied" }
|
||||
|
||||
const CodeExample: FC<{code: string}> = ({ code }) => {
|
||||
const [tooltip, setTooltip] = useState(CopyState.copy);
|
||||
const handlerCopy = () => {
|
||||
navigator.clipboard.writeText(code);
|
||||
setTooltip(CopyState.copied);
|
||||
};
|
||||
|
||||
useEffect(() => {
|
||||
let timeout: NodeJS.Timeout | null = null;
|
||||
if (tooltip === CopyState.copied) {
|
||||
timeout = setTimeout(() => setTooltip(CopyState.copy), 1000);
|
||||
}
|
||||
|
||||
return () => {
|
||||
timeout && clearTimeout(timeout);
|
||||
};
|
||||
}, [tooltip]);
|
||||
|
||||
return (
|
||||
<code className="vm-code-example">
|
||||
{code}
|
||||
<div className="vm-code-example__copy">
|
||||
<Tooltip title={tooltip}>
|
||||
<Button
|
||||
size="small"
|
||||
variant="text"
|
||||
onClick={handlerCopy}
|
||||
startIcon={<CopyIcon/>}
|
||||
/>
|
||||
</Tooltip>
|
||||
</div>
|
||||
</code>
|
||||
);
|
||||
};
|
||||
|
||||
export default CodeExample;
|
|
@ -0,0 +1,17 @@
|
|||
@use "src/styles/variables" as *;
|
||||
|
||||
.vm-code-example {
|
||||
position: relative;
|
||||
display: block;
|
||||
padding: $padding-global;
|
||||
white-space: pre-wrap;
|
||||
border-radius: $border-radius-small;
|
||||
background-color: rgba($color-black, 0.05);
|
||||
overflow: auto;
|
||||
|
||||
&__copy {
|
||||
position: absolute;
|
||||
right: 10px;
|
||||
top: 10px;
|
||||
}
|
||||
}
|
|
@ -8,6 +8,7 @@ import "./style.scss";
|
|||
import useDeviceDetect from "../../../../hooks/useDeviceDetect";
|
||||
import classNames from "classnames";
|
||||
import MonthsList from "./MonthsList/MonthsList";
|
||||
import Button from "../../Button/Button";
|
||||
|
||||
interface DatePickerProps {
|
||||
date: Date | Dayjs
|
||||
|
@ -29,6 +30,9 @@ const Calendar: FC<DatePickerProps> = ({
|
|||
const [viewType, setViewType] = useState<CalendarTypeView>(CalendarTypeView.days);
|
||||
const [viewDate, setViewDate] = useState(dayjs.tz(date));
|
||||
const [selectDate, setSelectDate] = useState(dayjs.tz(date));
|
||||
|
||||
const today = dayjs().startOf("day").tz();
|
||||
const viewDateIsToday = today.format() === viewDate.format();
|
||||
const { isMobile } = useDeviceDetect();
|
||||
|
||||
const toggleDisplayYears = () => {
|
||||
|
@ -44,6 +48,10 @@ const Calendar: FC<DatePickerProps> = ({
|
|||
setSelectDate(date);
|
||||
};
|
||||
|
||||
const handleToday = () => {
|
||||
setViewDate(today);
|
||||
};
|
||||
|
||||
useEffect(() => {
|
||||
if (selectDate.format() === dayjs.tz(date).format()) return;
|
||||
onChange(selectDate.format(format));
|
||||
|
@ -88,6 +96,17 @@ const Calendar: FC<DatePickerProps> = ({
|
|||
onChangeViewDate={handleChangeViewDate}
|
||||
/>
|
||||
)}
|
||||
{!viewDateIsToday && (viewType === CalendarTypeView.days) && (
|
||||
<div className="vm-calendar-footer">
|
||||
<Button
|
||||
variant="text"
|
||||
size="small"
|
||||
onClick={handleToday}
|
||||
>
|
||||
show today
|
||||
</Button>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
|
|
@ -11,6 +11,7 @@ interface CalendarBodyProps {
|
|||
const weekday = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"];
|
||||
|
||||
const CalendarBody: FC<CalendarBodyProps> = ({ viewDate, selectDate, onChangeSelectDate }) => {
|
||||
const format = "YYYY-MM-DD";
|
||||
const today = dayjs().tz().startOf("day");
|
||||
|
||||
const days: (Dayjs|null)[] = useMemo(() => {
|
||||
|
@ -45,10 +46,10 @@ const CalendarBody: FC<CalendarBodyProps> = ({ viewDate, selectDate, onChangeSel
|
|||
"vm-calendar-body-cell": true,
|
||||
"vm-calendar-body-cell_day": true,
|
||||
"vm-calendar-body-cell_day_empty": !d,
|
||||
"vm-calendar-body-cell_day_active": (d && d.toISOString()) === selectDate.startOf("day").toISOString(),
|
||||
"vm-calendar-body-cell_day_today": (d && d.toISOString()) === today.toISOString()
|
||||
"vm-calendar-body-cell_day_active": (d && d.format(format)) === selectDate.format(format),
|
||||
"vm-calendar-body-cell_day_today": (d && d.format(format)) === today.format(format)
|
||||
})}
|
||||
key={d ? d.toISOString() : i}
|
||||
key={d ? d.format(format) : i}
|
||||
onClick={createHandlerSelectDate(d)}
|
||||
>
|
||||
{d && d.format("D")}
|
||||
|
|
|
@ -166,4 +166,10 @@
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
&-footer {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: flex-end;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -10,12 +10,14 @@ import "./style.scss";
|
|||
export interface CardinalityTotalsProps {
|
||||
totalSeries: number;
|
||||
totalSeriesAll: number;
|
||||
totalSeriesPrev: number;
|
||||
totalLabelValuePairs: number;
|
||||
seriesCountByMetricName: TopHeapEntry[];
|
||||
}
|
||||
|
||||
const CardinalityTotals: FC<CardinalityTotalsProps> = ({
|
||||
totalSeries,
|
||||
totalSeriesPrev,
|
||||
totalSeriesAll,
|
||||
seriesCountByMetricName
|
||||
}) => {
|
||||
|
@ -27,11 +29,14 @@ const CardinalityTotals: FC<CardinalityTotalsProps> = ({
|
|||
const isMetric = /__name__/.test(match || "");
|
||||
|
||||
const progress = seriesCountByMetricName[0]?.value / totalSeriesAll * 100;
|
||||
const diff = totalSeries - totalSeriesPrev;
|
||||
const dynamic = Math.abs(diff) / totalSeriesPrev * 100;
|
||||
|
||||
const totals = [
|
||||
{
|
||||
title: "Total series",
|
||||
value: totalSeries.toLocaleString("en-US"),
|
||||
dynamic: !totalSeries || !totalSeriesPrev ? "" : `${dynamic.toFixed(2)}%`,
|
||||
display: !focusLabel,
|
||||
info: `The total number of active time series.
|
||||
A time series is uniquely identified by its name plus a set of its labels.
|
||||
|
@ -57,20 +62,33 @@ const CardinalityTotals: FC<CardinalityTotalsProps> = ({
|
|||
"vm-cardinality-totals_mobile": isMobile
|
||||
})}
|
||||
>
|
||||
{totals.map(({ title, value, info }) => (
|
||||
{totals.map(({ title, value, info, dynamic }) => (
|
||||
<div
|
||||
className="vm-cardinality-totals-card"
|
||||
key={title}
|
||||
>
|
||||
<div className="vm-cardinality-totals-card-header">
|
||||
<h4 className="vm-cardinality-totals-card__title">
|
||||
{title}
|
||||
{info && (
|
||||
<Tooltip title={<p className="vm-cardinality-totals-card-header__tooltip">{info}</p>}>
|
||||
<div className="vm-cardinality-totals-card-header__info-icon"><InfoIcon/></div>
|
||||
<Tooltip title={<p className="vm-cardinality-totals-card__tooltip">{info}</p>}>
|
||||
<div className="vm-cardinality-totals-card__info-icon"><InfoIcon/></div>
|
||||
</Tooltip>
|
||||
)}
|
||||
<h4 className="vm-cardinality-totals-card-header__title">{title}</h4>
|
||||
</div>
|
||||
</h4>
|
||||
<span className="vm-cardinality-totals-card__value">{value}</span>
|
||||
{!!dynamic && (
|
||||
<Tooltip title={`in relation to the previous day: ${totalSeriesPrev.toLocaleString("en-US")}`}>
|
||||
<span
|
||||
className={classNames({
|
||||
"vm-dynamic-number": true,
|
||||
"vm-dynamic-number_positive vm-dynamic-number_down": diff < 0,
|
||||
"vm-dynamic-number_negative vm-dynamic-number_up": diff > 0,
|
||||
})}
|
||||
>
|
||||
{dynamic}
|
||||
</span>
|
||||
</Tooltip>
|
||||
)}
|
||||
</div>
|
||||
))}
|
||||
</div>
|
||||
|
|
|
@ -5,56 +5,52 @@
|
|||
flex-wrap: wrap;
|
||||
align-content: flex-start;
|
||||
justify-content: flex-start;
|
||||
gap: $padding-global;
|
||||
gap: $padding-medium;
|
||||
flex-grow: 1;
|
||||
|
||||
&_mobile {
|
||||
gap: $padding-small;
|
||||
gap: $padding-global;
|
||||
justify-content: center;
|
||||
}
|
||||
|
||||
&-card {
|
||||
display: flex;
|
||||
display: grid;
|
||||
grid-template-columns: auto 1fr;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
gap: 4px;
|
||||
gap: $padding-small 4px;
|
||||
|
||||
&-header {
|
||||
&__info-icon {
|
||||
width: 12px;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
color: $color-primary;
|
||||
}
|
||||
|
||||
&__title {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: flex-start;
|
||||
gap: 4px;
|
||||
grid-column: 1/-1;
|
||||
color: $color-text;
|
||||
}
|
||||
|
||||
&__info-icon {
|
||||
width: 12px;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
color: $color-primary;
|
||||
}
|
||||
|
||||
&__title {
|
||||
font-weight: bold;
|
||||
color: $color-text;
|
||||
|
||||
&:after {
|
||||
content: ':';
|
||||
}
|
||||
}
|
||||
|
||||
&__tooltip {
|
||||
max-width: 280px;
|
||||
white-space: normal;
|
||||
padding: $padding-small;
|
||||
line-height: 130%;
|
||||
font-size: $font-size;
|
||||
}
|
||||
&__tooltip {
|
||||
max-width: 280px;
|
||||
white-space: normal;
|
||||
padding: $padding-small;
|
||||
line-height: $font-size;
|
||||
font-size: $font-size;
|
||||
}
|
||||
|
||||
&__value {
|
||||
font-weight: bold;
|
||||
color: $color-primary;
|
||||
font-size: $font-size-medium;
|
||||
font-size: $font-size-large;
|
||||
line-height: $font-size;
|
||||
text-align: center;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -18,6 +18,7 @@ interface MetricsProperties {
|
|||
tabs: string[];
|
||||
chartContainer: MutableRef<HTMLDivElement> | undefined;
|
||||
totalSeries: number,
|
||||
totalSeriesPrev: number,
|
||||
sectionTitle: string;
|
||||
tip?: string;
|
||||
tableHeaderCells: HeadCell[];
|
||||
|
@ -28,6 +29,7 @@ const MetricsContent: FC<MetricsProperties> = ({
|
|||
tabs: tabsProps = [],
|
||||
chartContainer,
|
||||
totalSeries,
|
||||
totalSeriesPrev,
|
||||
onActionClick,
|
||||
sectionTitle,
|
||||
tip,
|
||||
|
@ -40,6 +42,7 @@ const MetricsContent: FC<MetricsProperties> = ({
|
|||
<TableCells
|
||||
row={row}
|
||||
totalSeries={totalSeries}
|
||||
totalSeriesPrev={totalSeriesPrev}
|
||||
onActionClick={onActionClick}
|
||||
/>
|
||||
);
|
||||
|
|
|
@ -26,7 +26,7 @@ const EnhancedTable: FC<TableProps> = ({
|
|||
const sortedData = stableSort(rows, getComparator(order, orderBy));
|
||||
|
||||
return (
|
||||
<table className="vm-table">
|
||||
<table className="vm-table vm-cardinality-panel-table">
|
||||
<EnhancedTableHead
|
||||
order={order}
|
||||
orderBy={orderBy}
|
||||
|
|
|
@ -4,15 +4,27 @@ import LineProgress from "../../../../components/Main/LineProgress/LineProgress"
|
|||
import { PlayCircleOutlineIcon } from "../../../../components/Main/Icons";
|
||||
import Button from "../../../../components/Main/Button/Button";
|
||||
import Tooltip from "../../../../components/Main/Tooltip/Tooltip";
|
||||
import classNames from "classnames";
|
||||
|
||||
interface CardinalityTableCells {
|
||||
row: Data,
|
||||
totalSeries: number;
|
||||
totalSeriesPrev: number;
|
||||
onActionClick: (name: string) => void;
|
||||
}
|
||||
|
||||
const TableCells: FC<CardinalityTableCells> = ({ row, totalSeries, onActionClick }) => {
|
||||
const TableCells: FC<CardinalityTableCells> = ({
|
||||
row,
|
||||
totalSeries,
|
||||
totalSeriesPrev,
|
||||
onActionClick
|
||||
}) => {
|
||||
const progress = totalSeries > 0 ? row.value / totalSeries * 100 : -1;
|
||||
const progressPrev = totalSeriesPrev > 0 ? row.valuePrev / totalSeriesPrev * 100 : -1;
|
||||
const hasProgresses = [progress, progressPrev].some(p => p === -1);
|
||||
|
||||
const diffPercent = progress - progressPrev;
|
||||
const relationPrevDay = hasProgresses ? "" : `${diffPercent.toFixed(2)}%`;
|
||||
|
||||
const handleActionClick = () => {
|
||||
onActionClick(row.name);
|
||||
|
@ -35,13 +47,42 @@ const TableCells: FC<CardinalityTableCells> = ({ row, totalSeries, onActionClick
|
|||
key={row.value}
|
||||
>
|
||||
{row.value}
|
||||
|
||||
{!!row.diff && (
|
||||
<Tooltip title={`in relation to the previous day: ${row.valuePrev}`}>
|
||||
<span
|
||||
className={classNames({
|
||||
"vm-dynamic-number": true,
|
||||
"vm-dynamic-number_positive": row.diff < 0,
|
||||
"vm-dynamic-number_negative": row.diff > 0,
|
||||
})}
|
||||
>
|
||||
{row.diff > 0 ? "+" : ""}{row.diff}
|
||||
</span>
|
||||
</Tooltip>
|
||||
)}
|
||||
</td>
|
||||
{progress > 0 && (
|
||||
<td
|
||||
className="vm-table-cell"
|
||||
key={row.progressValue}
|
||||
>
|
||||
<LineProgress value={progress}/>
|
||||
<div className="vm-cardinality-panel-table__progress">
|
||||
<LineProgress value={progress}/>
|
||||
{relationPrevDay && (
|
||||
<Tooltip title={"in relation to the previous day"}>
|
||||
<span
|
||||
className={classNames({
|
||||
"vm-dynamic-number": true,
|
||||
"vm-dynamic-number_positive vm-dynamic-number_down": diffPercent < 0,
|
||||
"vm-dynamic-number_negative vm-dynamic-number_up": diffPercent > 0,
|
||||
})}
|
||||
>
|
||||
{relationPrevDay}
|
||||
</span>
|
||||
</Tooltip>
|
||||
)}
|
||||
</div>
|
||||
</td>
|
||||
)}
|
||||
<td
|
||||
|
|
|
@ -12,7 +12,7 @@ export function EnhancedTableHead(props: EnhancedHeaderTableProps) {
|
|||
};
|
||||
|
||||
return (
|
||||
<thead className="vm-table-header">
|
||||
<thead className="vm-table-header vm-cardinality-panel-table__header">
|
||||
<tr className="vm-table__row vm-table__row_header">
|
||||
{headerCells.map((headCell) => (
|
||||
<th
|
||||
|
|
|
@ -28,6 +28,8 @@ export interface TableProps {
|
|||
export interface Data {
|
||||
name: string;
|
||||
value: number;
|
||||
diff: number;
|
||||
valuePrev: number;
|
||||
progressValue: number;
|
||||
actions: string;
|
||||
}
|
||||
|
|
|
@ -28,8 +28,9 @@ export default class AppConfigurator {
|
|||
get defaultTSDBStatus(): TSDBStatus {
|
||||
return {
|
||||
totalSeries: 0,
|
||||
totalLabelValuePairs: 0,
|
||||
totalSeriesPrev: 0,
|
||||
totalSeriesByAll: 0,
|
||||
totalLabelValuePairs: 0,
|
||||
seriesCountByMetricName: [],
|
||||
seriesCountByLabelName: [],
|
||||
seriesCountByFocusLabelValue: [],
|
||||
|
@ -142,11 +143,11 @@ export default class AppConfigurator {
|
|||
};
|
||||
}
|
||||
|
||||
totalSeries(keyName: string): number {
|
||||
totalSeries(keyName: string, prev = false): number {
|
||||
if (keyName === "labelValueCountByLabelName") {
|
||||
return -1;
|
||||
}
|
||||
return this.tsdbStatus.totalSeries;
|
||||
return prev ? this.tsdbStatus.totalSeriesPrev : this.tsdbStatus.totalSeries;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -33,24 +33,51 @@ export const useFetchQuery = (): {
|
|||
setIsLoading(true);
|
||||
setTSDBStatus(appConfigurator.defaultTSDBStatus);
|
||||
|
||||
const defaultParams = { date: requestParams.date, topN: 0, match: "", focusLabel: "" } as CardinalityRequestsParams;
|
||||
const url = getCardinalityInfo(serverUrl, requestParams);
|
||||
const urlDefault = getCardinalityInfo(serverUrl, defaultParams);
|
||||
const totalParams = {
|
||||
date: requestParams.date,
|
||||
topN: 0,
|
||||
match: "",
|
||||
focusLabel: ""
|
||||
} as CardinalityRequestsParams;
|
||||
|
||||
const prevDayParams = {
|
||||
...requestParams,
|
||||
date: dayjs(requestParams.date).subtract(1, "day").tz().format(DATE_FORMAT),
|
||||
} as CardinalityRequestsParams;
|
||||
|
||||
|
||||
const urlBase = getCardinalityInfo(serverUrl, requestParams);
|
||||
const urlPrev = getCardinalityInfo(serverUrl, prevDayParams);
|
||||
const uslTotal = getCardinalityInfo(serverUrl, totalParams);
|
||||
const urls = [urlBase, urlPrev, uslTotal];
|
||||
|
||||
try {
|
||||
const response = await fetch(url);
|
||||
const resp = await response.json();
|
||||
const responseTotal = await fetch(urlDefault);
|
||||
const respTotals = await responseTotal.json();
|
||||
if (response.ok) {
|
||||
const { data } = resp;
|
||||
const { totalSeries } = respTotals.data;
|
||||
const result = { ...data } as TSDBStatus;
|
||||
result.totalSeriesByAll = totalSeries;
|
||||
const responses = await Promise.all(urls.map(url => fetch(url)));
|
||||
const [resp, respPrev, respTotals] = await Promise.all(responses.map(resp => resp.json()));
|
||||
if (responses[0].ok) {
|
||||
const { data: dataTotal } = respTotals;
|
||||
const prevResult = { ...respPrev.data } as TSDBStatus;
|
||||
const result = { ...resp.data } as TSDBStatus;
|
||||
result.totalSeriesByAll = dataTotal?.totalSeries;
|
||||
result.totalSeriesPrev = prevResult?.totalSeries;
|
||||
|
||||
const name = match?.replace(/[{}"]/g, "");
|
||||
result.seriesCountByLabelValuePair = result.seriesCountByLabelValuePair.filter(s => s.name !== name);
|
||||
|
||||
Object.keys(result).forEach(k => {
|
||||
const key = k as keyof TSDBStatus;
|
||||
const entries = result[key];
|
||||
const prevEntries = prevResult[key];
|
||||
|
||||
if (Array.isArray(entries) && Array.isArray(prevEntries)) {
|
||||
entries.forEach((entry) => {
|
||||
const valuePrev = prevEntries.find(prevEntry => prevEntry.name === entry.name)?.value;
|
||||
entry.diff = valuePrev ? entry.value - valuePrev : 0;
|
||||
entry.valuePrev = valuePrev || 0;
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
setTSDBStatus(result);
|
||||
setIsLoading(false);
|
||||
} else {
|
||||
|
|
|
@ -21,7 +21,7 @@ import {
|
|||
const spinnerMessage = `Please wait while cardinality stats is calculated.
|
||||
This may take some time if the db contains big number of time series.`;
|
||||
|
||||
const Index: FC = () => {
|
||||
const CardinalityPanel: FC = () => {
|
||||
const { isMobile } = useDeviceDetect();
|
||||
|
||||
const [searchParams, setSearchParams] = useSearchParams();
|
||||
|
@ -55,6 +55,7 @@ const Index: FC = () => {
|
|||
{isLoading && <Spinner message={spinnerMessage}/>}
|
||||
<CardinalityConfigurator
|
||||
totalSeries={tsdbStatusData.totalSeries}
|
||||
totalSeriesPrev={tsdbStatusData.totalSeriesPrev}
|
||||
totalSeriesAll={tsdbStatusData.totalSeriesByAll}
|
||||
totalLabelValuePairs={tsdbStatusData.totalLabelValuePairs}
|
||||
seriesCountByMetricName={tsdbStatusData.seriesCountByMetricName}
|
||||
|
@ -80,6 +81,7 @@ const Index: FC = () => {
|
|||
onActionClick={handleFilterClick(keyName)}
|
||||
tabs={defaultState.tabs[keyName as keyof Tabs]}
|
||||
chartContainer={defaultState.containerRefs[keyName as keyof Containers<HTMLDivElement>]}
|
||||
totalSeriesPrev={appConfigurator.totalSeries(keyName, true)}
|
||||
totalSeries={appConfigurator.totalSeries(keyName)}
|
||||
tableHeaderCells={tablesHeaders[keyName]}
|
||||
/>
|
||||
|
@ -88,4 +90,4 @@ const Index: FC = () => {
|
|||
);
|
||||
};
|
||||
|
||||
export default Index;
|
||||
export default CardinalityPanel;
|
||||
|
|
|
@ -18,4 +18,25 @@
|
|||
flex-grow: 1;
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
&-table {
|
||||
&__header {
|
||||
th:first-child {
|
||||
width: 60%;
|
||||
}
|
||||
|
||||
th:not(:first-child) {
|
||||
width: auto;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
&__progress {
|
||||
display: grid;
|
||||
grid-template-columns: minmax(200px, 1fr) 70px;
|
||||
align-items: center;
|
||||
justify-content: flex-start;
|
||||
gap: $padding-small;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -4,6 +4,7 @@ export interface TSDBStatus {
|
|||
totalSeries: number;
|
||||
totalLabelValuePairs: number;
|
||||
totalSeriesByAll: number,
|
||||
totalSeriesPrev: number,
|
||||
seriesCountByMetricName: TopHeapEntry[];
|
||||
seriesCountByLabelName: TopHeapEntry[];
|
||||
seriesCountByFocusLabelValue: TopHeapEntry[];
|
||||
|
@ -14,6 +15,8 @@ export interface TSDBStatus {
|
|||
export interface TopHeapEntry {
|
||||
name: string;
|
||||
value: number;
|
||||
diff: number;
|
||||
valuePrev: number;
|
||||
}
|
||||
|
||||
interface QueryUpdaterArgs {
|
||||
|
|
|
@ -44,7 +44,14 @@ const CustomPanel: FC = () => {
|
|||
|
||||
const { queryOptions } = useFetchQueryOptions();
|
||||
const {
|
||||
isLoading, liveData, graphData, error, queryErrors, warning, traces, isHistogram
|
||||
isLoading,
|
||||
liveData,
|
||||
graphData,
|
||||
error,
|
||||
queryErrors,
|
||||
warning,
|
||||
traces,
|
||||
isHistogram
|
||||
} = useFetchQuery({
|
||||
visible: true,
|
||||
customStep,
|
||||
|
@ -97,7 +104,7 @@ const CustomPanel: FC = () => {
|
|||
|
||||
useEffect(() => {
|
||||
graphDispatch({ type: "SET_IS_HISTOGRAM", payload: isHistogram });
|
||||
}, [isHistogram]);
|
||||
}, [graphData]);
|
||||
|
||||
return (
|
||||
<div
|
||||
|
|
|
@ -0,0 +1,36 @@
|
|||
import { useAppState } from "../../../state/common/StateContext";
|
||||
import { useState } from "react";
|
||||
import { ErrorTypes, RelabelData } from "../../../types";
|
||||
import { getMetricRelabelDebug } from "../../../api/metric-relabel";
|
||||
|
||||
export const useRelabelDebug = () => {
|
||||
const { serverUrl } = useAppState();
|
||||
|
||||
const [data, setData] = useState<RelabelData | null>(null);
|
||||
const [loading, setLoading] = useState(false);
|
||||
const [error, setError] = useState<ErrorTypes | string>();
|
||||
|
||||
const fetchData = async (config: string, metric: string) => {
|
||||
const fetchUrl = getMetricRelabelDebug(serverUrl, config, metric);
|
||||
setLoading(true);
|
||||
try {
|
||||
const response = await fetch(fetchUrl);
|
||||
const resp = await response.json();
|
||||
|
||||
setData(resp.error ? null : resp);
|
||||
setError(String(resp.error || ""));
|
||||
} catch (e) {
|
||||
if (e instanceof Error && e.name !== "AbortError") {
|
||||
setError(`${e.name}: ${e.message}`);
|
||||
}
|
||||
}
|
||||
setLoading(false);
|
||||
};
|
||||
|
||||
return {
|
||||
data,
|
||||
error,
|
||||
loading,
|
||||
fetchData
|
||||
};
|
||||
};
|
181
app/vmui/packages/vmui/src/pages/Relabel/index.tsx
Normal file
181
app/vmui/packages/vmui/src/pages/Relabel/index.tsx
Normal file
|
@ -0,0 +1,181 @@
|
|||
import React, { FC, useEffect } from "preact/compat";
|
||||
import "./style.scss";
|
||||
import TextField from "../../components/Main/TextField/TextField";
|
||||
import { useState } from "react";
|
||||
import Button from "../../components/Main/Button/Button";
|
||||
import { InfoIcon, PlayIcon, WikiIcon } from "../../components/Main/Icons";
|
||||
import "./style.scss";
|
||||
import { useRelabelDebug } from "./hooks/useRelabelDebug";
|
||||
import Spinner from "../../components/Main/Spinner/Spinner";
|
||||
import Alert from "../../components/Main/Alert/Alert";
|
||||
import { useSearchParams } from "react-router-dom";
|
||||
|
||||
const example = {
|
||||
config: `- if: '{bar_label=~"b.*"}'
|
||||
source_labels: [foo_label, bar_label]
|
||||
separator: "_"
|
||||
target_label: foobar
|
||||
- action: labeldrop
|
||||
regex: "foo_.*"
|
||||
- target_label: job
|
||||
replacement: "my-application-2"`,
|
||||
labels: "{__name__=\"my_metric\", bar_label=\"bar\", foo_label=\"foo\", job=\"my-application\", instance=\"192.168.0.1\"}"
|
||||
};
|
||||
|
||||
const Relabel: FC = () => {
|
||||
const [searchParams, setSearchParams] = useSearchParams();
|
||||
|
||||
const { data, loading, error, fetchData } = useRelabelDebug();
|
||||
|
||||
const [config, setConfig] = useState("");
|
||||
const [labels, setLabels] = useState("");
|
||||
|
||||
const handleChangeConfig = (val: string) => {
|
||||
setConfig(val);
|
||||
};
|
||||
|
||||
const handleChangeLabels = (val: string) => {
|
||||
setLabels(val);
|
||||
};
|
||||
|
||||
const handleRunQuery = () => {
|
||||
fetchData(config, labels);
|
||||
searchParams.set("config", config);
|
||||
searchParams.set("labels", labels);
|
||||
setSearchParams(searchParams);
|
||||
};
|
||||
|
||||
const handleRunExample = () => {
|
||||
const { config, labels } = example;
|
||||
setConfig(config);
|
||||
setLabels(labels);
|
||||
fetchData(config, labels);
|
||||
searchParams.set("config", config);
|
||||
searchParams.set("labels", labels);
|
||||
setSearchParams(searchParams);
|
||||
};
|
||||
|
||||
useEffect(() => {
|
||||
const queryConfig = searchParams.get("config") || "";
|
||||
const queryLabels = searchParams.get("labels") || "";
|
||||
if (queryLabels || queryConfig) {
|
||||
fetchData(queryConfig, queryLabels);
|
||||
setConfig(queryConfig);
|
||||
setLabels(queryLabels);
|
||||
}
|
||||
}, []);
|
||||
|
||||
return (
|
||||
<section className="vm-relabeling">
|
||||
{loading && <Spinner/>}
|
||||
<div className="vm-relabeling-header vm-block">
|
||||
<div className="vm-relabeling-header__configs">
|
||||
<TextField
|
||||
type="textarea"
|
||||
label="Relabel configs"
|
||||
value={config}
|
||||
autofocus
|
||||
onChange={handleChangeConfig}
|
||||
/>
|
||||
</div>
|
||||
<div className="vm-relabeling-header__labels">
|
||||
<TextField
|
||||
type="textarea"
|
||||
label="Labels"
|
||||
value={labels}
|
||||
onChange={handleChangeLabels}
|
||||
/>
|
||||
</div>
|
||||
<div className="vm-relabeling-header-bottom">
|
||||
<a
|
||||
className="vm-link vm-link_with-icon"
|
||||
target="_blank"
|
||||
href="https://docs.victoriametrics.com/relabeling.html"
|
||||
rel="help noreferrer"
|
||||
>
|
||||
<InfoIcon/>
|
||||
Relabeling cookbook
|
||||
</a>
|
||||
<a
|
||||
className="vm-link vm-link_with-icon"
|
||||
target="_blank"
|
||||
href="https://docs.victoriametrics.com/vmagent.html#relabeling"
|
||||
rel="help noreferrer"
|
||||
>
|
||||
<WikiIcon/>
|
||||
Documentation
|
||||
</a>
|
||||
<Button
|
||||
variant="text"
|
||||
onClick={handleRunExample}
|
||||
>
|
||||
Try example
|
||||
</Button>
|
||||
<Button
|
||||
variant="contained"
|
||||
onClick={handleRunQuery}
|
||||
startIcon={<PlayIcon/>}
|
||||
>
|
||||
Submit
|
||||
</Button>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{error && <Alert variant="error">{error}</Alert>}
|
||||
|
||||
{data && (
|
||||
<div className="vm-relabeling-steps vm-block">
|
||||
{data.originalLabels && (
|
||||
<div className="vm-relabeling-steps-item">
|
||||
<div className="vm-relabeling-steps-item__row">
|
||||
<span>Original labels:</span>
|
||||
<code dangerouslySetInnerHTML={{ __html: data.originalLabels }}/>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{data.steps.map((step, index) => (
|
||||
<div
|
||||
className="vm-relabeling-steps-item"
|
||||
key={index}
|
||||
>
|
||||
<div className="vm-relabeling-steps-item__row">
|
||||
<span>Step:</span>
|
||||
{index + 1}
|
||||
</div>
|
||||
<div className="vm-relabeling-steps-item__row">
|
||||
<span>Relabeling Rule:</span>
|
||||
<code>
|
||||
<pre>{step.rule}</pre>
|
||||
</code>
|
||||
</div>
|
||||
<div className="vm-relabeling-steps-item__row">
|
||||
<span>Input Labels:</span>
|
||||
<code>
|
||||
<pre dangerouslySetInnerHTML={{ __html: step.inLabels }}/>
|
||||
</code>
|
||||
</div>
|
||||
<div className="vm-relabeling-steps-item__row">
|
||||
<span>Output labels:</span>
|
||||
<code>
|
||||
<pre dangerouslySetInnerHTML={{ __html: step.outLabels }}/>
|
||||
</code>
|
||||
</div>
|
||||
</div>
|
||||
))}
|
||||
|
||||
{data.resultingLabels && (
|
||||
<div className="vm-relabeling-steps-item">
|
||||
<div className="vm-relabeling-steps-item__row">
|
||||
<span>Resulting labels:</span>
|
||||
<code dangerouslySetInnerHTML={{ __html: data.resultingLabels }}/>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
</section>
|
||||
);
|
||||
};
|
||||
|
||||
export default Relabel;
|
74
app/vmui/packages/vmui/src/pages/Relabel/style.scss
Normal file
74
app/vmui/packages/vmui/src/pages/Relabel/style.scss
Normal file
|
@ -0,0 +1,74 @@
|
|||
@use "src/styles/variables" as *;
|
||||
|
||||
.vm-relabeling {
|
||||
display: grid;
|
||||
gap: $padding-medium;
|
||||
|
||||
&-header {
|
||||
display: grid;
|
||||
gap: $padding-global;
|
||||
align-items: flex-start;
|
||||
width: 100%;
|
||||
|
||||
&__configs {
|
||||
textarea {
|
||||
min-height: 200px;
|
||||
}
|
||||
}
|
||||
|
||||
&__labels {
|
||||
textarea {
|
||||
min-height: 60px;
|
||||
}
|
||||
}
|
||||
|
||||
textarea {
|
||||
overflow: auto;
|
||||
width: 100%;
|
||||
height: 100%;
|
||||
}
|
||||
|
||||
&-bottom {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: flex-end;
|
||||
gap: $padding-global;
|
||||
|
||||
a {
|
||||
color: $color-text-secondary;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
&-steps {
|
||||
display: grid;
|
||||
gap: $padding-global;
|
||||
|
||||
&-item {
|
||||
display: grid;
|
||||
gap: $padding-global;
|
||||
padding: 0 $padding-global $padding-global;
|
||||
border-bottom: $border-divider;
|
||||
|
||||
&:last-child {
|
||||
border-bottom: none;
|
||||
padding-bottom: 0;
|
||||
}
|
||||
|
||||
&__row {
|
||||
display: grid;
|
||||
grid-template-columns: 100px 1fr;
|
||||
|
||||
@media (max-width: 500px) {
|
||||
grid-template-columns: 1fr;
|
||||
gap: 4px;
|
||||
|
||||
}
|
||||
|
||||
pre {
|
||||
white-space: pre-wrap;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -19,9 +19,8 @@ import classNames from "classnames";
|
|||
|
||||
const exampleDuration = "30ms, 15s, 3d4h, 1y2w";
|
||||
|
||||
const Index: FC = () => {
|
||||
const TopQueries: FC = () => {
|
||||
const { isMobile } = useDeviceDetect();
|
||||
|
||||
const { data, error, loading } = useFetchTopQueries();
|
||||
const { topN, maxLifetime } = useTopQueriesState();
|
||||
const topQueriesDispatch = useTopQueriesDispatch();
|
||||
|
@ -180,4 +179,4 @@ const Index: FC = () => {
|
|||
);
|
||||
};
|
||||
|
||||
export default Index;
|
||||
export default TopQueries;
|
||||
|
|
|
@ -0,0 +1,218 @@
|
|||
import React, { FC } from "preact/compat";
|
||||
import "./style.scss";
|
||||
import CodeExample from "../../../components/Main/CodeExample/CodeExample";
|
||||
|
||||
const MetricsQL = () => (
|
||||
<a
|
||||
className="vm-link vm-link_colored"
|
||||
href="https://docs.victoriametrics.com/MetricsQL.html"
|
||||
target="_blank"
|
||||
rel="help noreferrer"
|
||||
>
|
||||
MetricsQL
|
||||
</a>
|
||||
);
|
||||
|
||||
const NodeExporterFull = () => (
|
||||
<a
|
||||
className="vm-link vm-link_colored"
|
||||
href="https://grafana.com/grafana/dashboards/1860-node-exporter-full/"
|
||||
target="_blank"
|
||||
rel="help noreferrer"
|
||||
>
|
||||
Node Exporter Full
|
||||
</a>
|
||||
);
|
||||
|
||||
const WithTemplateTutorial: FC = () => (
|
||||
<section className="vm-with-template-tutorial">
|
||||
<h2 className="vm-with-template-tutorial__title">
|
||||
Tutorial for WITH expressions in <MetricsQL/>
|
||||
</h2>
|
||||
<div className="vm-with-template-tutorial-section">
|
||||
<p className="vm-with-template-tutorial-section__text">Let's look at the following real query from <NodeExporterFull/> dashboard:</p>
|
||||
<CodeExample
|
||||
code= {`(
|
||||
(
|
||||
node_memory_MemTotal_bytes{instance=~"$node:$port", job=~"$job"}
|
||||
-
|
||||
node_memory_MemFree_bytes{instance=~"$node:$port", job=~"$job"}
|
||||
)
|
||||
/
|
||||
node_memory_MemTotal_bytes{instance=~"$node:$port", job=~"$job"}
|
||||
) * 100`}
|
||||
/>
|
||||
<p className="vm-with-template-tutorial-section__text">
|
||||
It is clear the query calculates the percentage of used memory for the given $node, $port and $job.
|
||||
Isn't it? :)
|
||||
</p>
|
||||
</div>
|
||||
<div className="vm-with-template-tutorial-section">
|
||||
<p className="vm-with-template-tutorial-section__text">
|
||||
What's wrong with this query?
|
||||
Copy-pasted label filters for distinct timeseries which makes it easy
|
||||
to mistype these filters during modification. Let's simplify the query with WITH expressions:
|
||||
</p>
|
||||
<CodeExample
|
||||
code={`WITH (
|
||||
commonFilters = {instance=~"$node:$port",job=~"$job"}
|
||||
)
|
||||
(
|
||||
node_memory_MemTotal_bytes{commonFilters}
|
||||
-
|
||||
node_memory_MemFree_bytes{commonFilters}
|
||||
)
|
||||
/
|
||||
node_memory_MemTotal_bytes{commonFilters} * 100`}
|
||||
/>
|
||||
</div>
|
||||
<div className="vm-with-template-tutorial-section">
|
||||
<p className="vm-with-template-tutorial-section__text">
|
||||
Now label filters are located in a single place instead of three distinct places.
|
||||
The query mentions node_memory_MemTotal_bytes metric twice and {"{commonFilters}"} three times.
|
||||
WITH expressions may improve this:
|
||||
</p>
|
||||
<CodeExample
|
||||
code={`WITH (
|
||||
my_resource_utilization(free, limit, filters) = (limit{filters} - free{filters}) / limit{filters} * 100
|
||||
)
|
||||
my_resource_utilization(
|
||||
node_memory_MemFree_bytes,
|
||||
node_memory_MemTotal_bytes,
|
||||
{instance=~"$node:$port",job=~"$job"},
|
||||
)`}
|
||||
/>
|
||||
<p className="vm-with-template-tutorial-section__text">
|
||||
Now the template function my_resource_utilization() may be used
|
||||
for monitoring arbitrary resources - memory, CPU, network, storage, you name it.
|
||||
</p>
|
||||
</div>
|
||||
<div className="vm-with-template-tutorial-section">
|
||||
<p className="vm-with-template-tutorial-section__text">
|
||||
Let's take another nice query from <NodeExporterFull/> dashboard:
|
||||
</p>
|
||||
<CodeExample
|
||||
code={`(
|
||||
(
|
||||
(
|
||||
count(
|
||||
count(node_cpu_seconds_total{instance=~"$node:$port",job=~"$job"}) by (cpu)
|
||||
)
|
||||
)
|
||||
-
|
||||
avg(
|
||||
sum by (mode) (rate(node_cpu_seconds_total{mode='idle',instance=~"$node:$port",job=~"$job"}[5m]))
|
||||
)
|
||||
)
|
||||
*
|
||||
100
|
||||
)
|
||||
/
|
||||
count(
|
||||
count(node_cpu_seconds_total{instance=~"$node:$port",job=~"$job"}) by (cpu)
|
||||
)`}
|
||||
/>
|
||||
<p className="vm-with-template-tutorial-section__text">
|
||||
Do you understand what does this mess do? Is it manageable? :)
|
||||
WITH expressions are happy to help in a few iterations.
|
||||
</p>
|
||||
</div>
|
||||
|
||||
<div className="vm-with-template-tutorial-section">
|
||||
<p className="vm-with-template-tutorial-section__text">
|
||||
1. Extract common filters used in multiple places into a commonFilters variable:
|
||||
</p>
|
||||
<CodeExample
|
||||
code={`WITH (
|
||||
commonFilters = {instance=~"$node:$port",job=~"$job"}
|
||||
)
|
||||
(
|
||||
(
|
||||
(
|
||||
count(
|
||||
count(node_cpu_seconds_total{commonFilters}) by (cpu)
|
||||
)
|
||||
)
|
||||
-
|
||||
avg(
|
||||
sum by (mode) (rate(node_cpu_seconds_total{mode='idle',commonFilters}[5m]))
|
||||
)
|
||||
)
|
||||
*
|
||||
100
|
||||
)
|
||||
/
|
||||
count(
|
||||
count(node_cpu_seconds_total{commonFilters}) by (cpu)
|
||||
)`}
|
||||
/>
|
||||
</div>
|
||||
<div className="vm-with-template-tutorial-section">
|
||||
<p className="vm-with-template-tutorial-section__text">
|
||||
2. Extract "count(count(...) by (cpu))" into cpuCount variable:
|
||||
</p>
|
||||
<CodeExample
|
||||
code={`WITH (
|
||||
commonFilters = {instance=~"$node:$port",job=~"$job"},
|
||||
cpuCount = count(count(node_cpu_seconds_total{commonFilters}) by (cpu))
|
||||
)
|
||||
(
|
||||
(
|
||||
cpuCount
|
||||
-
|
||||
avg(
|
||||
sum by (mode) (rate(node_cpu_seconds_total{mode='idle',commonFilters}[5m]))
|
||||
)
|
||||
)
|
||||
*
|
||||
100
|
||||
) / cpuCount`}
|
||||
/>
|
||||
</div>
|
||||
<div className="vm-with-template-tutorial-section">
|
||||
<p className="vm-with-template-tutorial-section__text">
|
||||
3. Extract rate(...) part into cpuIdle variable,
|
||||
since it is clear now that this part calculates the number of idle CPUs:
|
||||
</p>
|
||||
<CodeExample
|
||||
code={`WITH (
|
||||
commonFilters = {instance=~"$node:$port",job=~"$job"},
|
||||
cpuCount = count(count(node_cpu_seconds_total{commonFilters}) by (cpu)),
|
||||
cpuIdle = sum(rate(node_cpu_seconds_total{mode='idle',commonFilters}[5m]))
|
||||
)
|
||||
((cpuCount - cpuIdle) * 100) / cpuCount`}
|
||||
/>
|
||||
</div>
|
||||
<div className="vm-with-template-tutorial-section">
|
||||
<p className="vm-with-template-tutorial-section__text">
|
||||
4. Put node_cpu_seconds_total{"{commonFilters}"} into its own varialbe with the name cpuSeconds:
|
||||
</p>
|
||||
<CodeExample
|
||||
code={`WITH (
|
||||
cpuSeconds = node_cpu_seconds_total{instance=~"$node:$port",job=~"$job"},
|
||||
cpuCount = count(count(cpuSeconds) by (cpu)),
|
||||
cpuIdle = sum(rate(cpuSeconds{mode='idle'}[5m]))
|
||||
)
|
||||
((cpuCount - cpuIdle) * 100) / cpuCount`}
|
||||
/>
|
||||
<p className="vm-with-template-tutorial-section__text">
|
||||
Now the query became more clear comparing to the initial query.
|
||||
</p>
|
||||
</div>
|
||||
<div className="vm-with-template-tutorial-section">
|
||||
<p className="vm-with-template-tutorial-section__text">
|
||||
WITH expressions may be nested and may be put anywhere. Try expanding the following query:
|
||||
</p>
|
||||
<CodeExample
|
||||
code= {`WITH (
|
||||
f(a, b) = WITH (
|
||||
f1(x) = b-x,
|
||||
f2(x) = x+x
|
||||
) f1(a)*f2(b)
|
||||
) f(foo, with(x=bar) x)`}
|
||||
/>
|
||||
</div>
|
||||
</section>
|
||||
);
|
||||
|
||||
export default WithTemplateTutorial;
|
|
@ -0,0 +1,22 @@
|
|||
@use "src/styles/variables" as *;
|
||||
|
||||
.vm-with-template-tutorial {
|
||||
display: grid;
|
||||
gap: $padding-large;
|
||||
|
||||
&__title {
|
||||
font-size: $font-size-large;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
&-section {
|
||||
display: grid;
|
||||
gap: $padding-global;
|
||||
|
||||
&__text {
|
||||
font-size: $font-size-medium;
|
||||
line-height: 130%;
|
||||
max-width: 720px;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,37 @@
|
|||
import { useAppState } from "../../../state/common/StateContext";
|
||||
import { useState } from "react";
|
||||
import { ErrorTypes } from "../../../types";
|
||||
import { getExpandWithExprUrl } from "../../../api/expand-with-exprs";
|
||||
|
||||
export const useExpandWithExprs = () => {
|
||||
const { serverUrl } = useAppState();
|
||||
|
||||
const [data, setData] = useState("");
|
||||
const [loading, setLoading] = useState(false);
|
||||
const [error, setError] = useState<ErrorTypes | string>();
|
||||
|
||||
const fetchData = async (query: string) => {
|
||||
const fetchUrl = getExpandWithExprUrl(serverUrl, query);
|
||||
setLoading(true);
|
||||
try {
|
||||
const response = await fetch(fetchUrl);
|
||||
|
||||
const resp = await response.json();
|
||||
|
||||
setData(resp?.expr || "");
|
||||
setError(String(resp.error || ""));
|
||||
} catch (e) {
|
||||
if (e instanceof Error && e.name !== "AbortError") {
|
||||
setError(`${e.name}: ${e.message}`);
|
||||
}
|
||||
}
|
||||
setLoading(false);
|
||||
};
|
||||
|
||||
return {
|
||||
data,
|
||||
error,
|
||||
loading,
|
||||
expand: fetchData
|
||||
};
|
||||
};
|
63
app/vmui/packages/vmui/src/pages/WithTemplate/index.tsx
Normal file
63
app/vmui/packages/vmui/src/pages/WithTemplate/index.tsx
Normal file
|
@ -0,0 +1,63 @@
|
|||
import React, { FC } from "preact/compat";
|
||||
import "./style.scss";
|
||||
import TextField from "../../components/Main/TextField/TextField";
|
||||
import { useState } from "react";
|
||||
import Button from "../../components/Main/Button/Button";
|
||||
import { PlayIcon } from "../../components/Main/Icons";
|
||||
import WithTemplateTutorial from "./WithTemplateTutorial/WithTemplateTutorial";
|
||||
import { useExpandWithExprs } from "./hooks/useExpandWithExprs";
|
||||
import Spinner from "../../components/Main/Spinner/Spinner";
|
||||
|
||||
const WithTemplate: FC = () => {
|
||||
const { data, loading, error, expand } = useExpandWithExprs();
|
||||
const [expr, setExpr] = useState("");
|
||||
|
||||
const handleChangeInput = (val: string) => {
|
||||
setExpr(val);
|
||||
};
|
||||
|
||||
const handleRunQuery = () => {
|
||||
expand(expr);
|
||||
};
|
||||
|
||||
return (
|
||||
<section className="vm-with-template">
|
||||
{loading && <Spinner />}
|
||||
|
||||
<div className="vm-with-template-body vm-block">
|
||||
<div className="vm-with-template-body__expr">
|
||||
<TextField
|
||||
type="textarea"
|
||||
label="MetricsQL query with optional WITH expressions"
|
||||
value={expr}
|
||||
error={error}
|
||||
autofocus
|
||||
onChange={handleChangeInput}
|
||||
/>
|
||||
</div>
|
||||
<div className="vm-with-template-body__result">
|
||||
<TextField
|
||||
type="textarea"
|
||||
label="MetricsQL query after expanding WITH expressions and applying other optimizations"
|
||||
value={data}
|
||||
disabled
|
||||
/>
|
||||
</div>
|
||||
<div className="vm-with-template-body-top">
|
||||
<Button
|
||||
variant="contained"
|
||||
onClick={handleRunQuery}
|
||||
startIcon={<PlayIcon/>}
|
||||
>
|
||||
Expand
|
||||
</Button>
|
||||
</div>
|
||||
</div>
|
||||
<div className="vm-block">
|
||||
<WithTemplateTutorial/>
|
||||
</div>
|
||||
</section>
|
||||
);
|
||||
};
|
||||
|
||||
export default WithTemplate;
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue