mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2024-12-11 14:53:49 +00:00
Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files
This commit is contained in:
commit
c13f1d99e0
287 changed files with 35115 additions and 1051 deletions
3
.github/workflows/main.yml
vendored
3
.github/workflows/main.yml
vendored
|
@ -45,16 +45,19 @@ jobs:
|
|||
GOOS=freebsd go build -mod=vendor ./app/vmalert
|
||||
GOOS=freebsd go build -mod=vendor ./app/vmbackup
|
||||
GOOS=freebsd go build -mod=vendor ./app/vmrestore
|
||||
GOOS=freebsd go build -mod=vendor ./app/vmctl
|
||||
GOOS=openbsd go build -mod=vendor ./app/victoria-metrics
|
||||
GOOS=openbsd go build -mod=vendor ./app/vmagent
|
||||
GOOS=openbsd go build -mod=vendor ./app/vmalert
|
||||
GOOS=openbsd go build -mod=vendor ./app/vmbackup
|
||||
GOOS=openbsd go build -mod=vendor ./app/vmrestore
|
||||
GOOS=openbsd go build -mod=vendor ./app/vmctl
|
||||
GOOS=darwin go build -mod=vendor ./app/victoria-metrics
|
||||
GOOS=darwin go build -mod=vendor ./app/vmagent
|
||||
GOOS=darwin go build -mod=vendor ./app/vmalert
|
||||
GOOS=darwin go build -mod=vendor ./app/vmbackup
|
||||
GOOS=darwin go build -mod=vendor ./app/vmrestore
|
||||
GOOS=darwin go build -mod=vendor ./app/vmctl
|
||||
- name: Publish coverage
|
||||
uses: codecov/codecov-action@v1.0.6
|
||||
with:
|
||||
|
|
22
Makefile
22
Makefile
|
@ -18,7 +18,8 @@ all: \
|
|||
vmalert-prod \
|
||||
vmauth-prod \
|
||||
vmbackup-prod \
|
||||
vmrestore-prod
|
||||
vmrestore-prod \
|
||||
vmctl-prod
|
||||
|
||||
include app/*/Makefile
|
||||
include deployment/*/Makefile
|
||||
|
@ -32,7 +33,8 @@ publish: \
|
|||
publish-vmalert \
|
||||
publish-vmauth \
|
||||
publish-vmbackup \
|
||||
publish-vmrestore
|
||||
publish-vmrestore \
|
||||
publish-vmctl
|
||||
|
||||
package: \
|
||||
package-victoria-metrics \
|
||||
|
@ -40,21 +42,24 @@ package: \
|
|||
package-vmalert \
|
||||
package-vmauth \
|
||||
package-vmbackup \
|
||||
package-vmrestore
|
||||
package-vmrestore \
|
||||
package-vmctl
|
||||
|
||||
vmutils: \
|
||||
vmagent \
|
||||
vmalert \
|
||||
vmauth \
|
||||
vmbackup \
|
||||
vmrestore
|
||||
vmrestore \
|
||||
vmctl
|
||||
|
||||
vmutils-arm64: \
|
||||
vmagent-arm64 \
|
||||
vmalert-arm64 \
|
||||
vmauth-arm64 \
|
||||
vmbackup-arm64 \
|
||||
vmrestore-arm64
|
||||
vmrestore-arm64 \
|
||||
vmctl-arm64
|
||||
|
||||
release-snap:
|
||||
snapcraft
|
||||
|
@ -97,7 +102,8 @@ release-vmutils-generic: \
|
|||
vmalert-$(GOARCH)-prod \
|
||||
vmauth-$(GOARCH)-prod \
|
||||
vmbackup-$(GOARCH)-prod \
|
||||
vmrestore-$(GOARCH)-prod
|
||||
vmrestore-$(GOARCH)-prod \
|
||||
vmctl-$(GOARCH)-prod
|
||||
cd bin && \
|
||||
tar --transform="flags=r;s|-$(GOARCH)||" -czf vmutils-$(GOARCH)-$(PKG_TAG).tar.gz \
|
||||
vmagent-$(GOARCH)-prod \
|
||||
|
@ -105,12 +111,14 @@ release-vmutils-generic: \
|
|||
vmauth-$(GOARCH)-prod \
|
||||
vmbackup-$(GOARCH)-prod \
|
||||
vmrestore-$(GOARCH)-prod \
|
||||
vmctl-$(GOARCH)-prod \
|
||||
&& sha256sum vmutils-$(GOARCH)-$(PKG_TAG).tar.gz \
|
||||
vmagent-$(GOARCH)-prod \
|
||||
vmalert-$(GOARCH)-prod \
|
||||
vmauth-$(GOARCH)-prod \
|
||||
vmbackup-$(GOARCH)-prod \
|
||||
vmrestore-$(GOARCH)-prod \
|
||||
vmctl-$(GOARCH)-prod \
|
||||
| sed s/-$(GOARCH)// > vmutils-$(GOARCH)-$(PKG_TAG)_checksums.txt
|
||||
|
||||
pprof-cpu:
|
||||
|
@ -141,6 +149,7 @@ errcheck: install-errcheck
|
|||
errcheck -exclude=errcheck_excludes.txt ./app/vmauth/...
|
||||
errcheck -exclude=errcheck_excludes.txt ./app/vmbackup/...
|
||||
errcheck -exclude=errcheck_excludes.txt ./app/vmrestore/...
|
||||
errcheck -exclude=errcheck_excludes.txt ./app/vmctl/...
|
||||
|
||||
install-errcheck:
|
||||
which errcheck || go install github.com/kisielk/errcheck
|
||||
|
@ -204,4 +213,5 @@ docs-sync:
|
|||
cp app/vmauth/README.md docs/vmauth.md
|
||||
cp app/vmbackup/README.md docs/vmbackup.md
|
||||
cp app/vmrestore/README.md docs/vmrestore.md
|
||||
cp app/vmctl/README.md docs/vmctl.md
|
||||
cp README.md docs/Single-server-VictoriaMetrics.md
|
||||
|
|
22
README.md
22
README.md
|
@ -154,6 +154,7 @@ Alphabetically sorted links to case studies:
|
|||
* [Tuning](#tuning)
|
||||
* [Monitoring](#monitoring)
|
||||
* [Troubleshooting](#troubleshooting)
|
||||
* [Data migration](#data-migration)
|
||||
* [Backfilling](#backfilling)
|
||||
* [Data updates](#data-updates)
|
||||
* [Replication](#replication)
|
||||
|
@ -547,10 +548,15 @@ These handlers can be queried from Prometheus-compatible clients such as Grafana
|
|||
|
||||
### Prometheus querying API enhancements
|
||||
|
||||
Additionally to unix timestamps and [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) VictoriaMetrics accepts relative times in `time`, `start` and `end` query args.
|
||||
VictoriaMetrics accepts optional `extra_label=<label_name>=<label_value>` query arg, which can be used for enforcing additional label filters for queries. For example,
|
||||
`/api/v1/query_range?extra_label=user_id=123&query=<query>` would automatically add `{user_id="123"}` label filter to the given `<query>`. This functionality can be used
|
||||
for limiting the scope of time series visible to the given tenant. It is expected that the `extra_label` query arg is automatically set by auth proxy sitting
|
||||
in front of VictoriaMetrics. [Contact us](mailto:sales@victoriametrics.com) if you need assistance with such a proxy.
|
||||
|
||||
VictoriaMetrics accepts relative times in `time`, `start` and `end` query args additionally to unix timestamps and [RFC3339](https://www.ietf.org/rfc/rfc3339.txt).
|
||||
For example, the following query would return data for the last 30 minutes: `/api/v1/query_range?start=-30m&query=...`.
|
||||
|
||||
By default, VictoriaMetrics returns time series for the last 5 minutes from /api/v1/series, while the Prometheus API defaults to all time. Use `start` and `end` to select a different time range.
|
||||
By default, VictoriaMetrics returns time series for the last 5 minutes from `/api/v1/series`, while the Prometheus API defaults to all time. Use `start` and `end` to select a different time range.
|
||||
|
||||
VictoriaMetrics accepts additional args for `/api/v1/labels` and `/api/v1/label/.../values` handlers.
|
||||
See [this feature request](https://github.com/prometheus/prometheus/issues/6178) for details:
|
||||
|
@ -1353,6 +1359,17 @@ See the example of alerting rules for VM components [here](https://github.com/Vi
|
|||
* VictoriaMetrics ignores `NaN` values during data ingestion.
|
||||
|
||||
|
||||
## Data migration
|
||||
|
||||
Use [vmctl](https://victoriametrics.github.io/vmctl.html) for data migration. It supports the following data migration types:
|
||||
|
||||
* From Prometheus to VictoriaMetrics
|
||||
* From InfluxDB to VictoriaMetrics
|
||||
* From VictoriaMetrics to VictoriaMetrics
|
||||
|
||||
See [vmctl docs](https://victoriametrics.github.io/vmctl.html) for more details.
|
||||
|
||||
|
||||
## Backfilling
|
||||
|
||||
VictoriaMetrics accepts historical data in arbitrary order of time via [any supported ingestion method](#how-to-import-time-series-data).
|
||||
|
@ -1420,7 +1437,6 @@ The collected profiles may be analyzed with [go tool pprof](https://github.com/g
|
|||
|
||||
* [Helm charts for single-node and cluster versions of VictoriaMetrics](https://github.com/VictoriaMetrics/helm-charts).
|
||||
* [Kubernetes operator for VictoriaMetrics](https://github.com/VictoriaMetrics/operator).
|
||||
* [vmctl tool for data migration to VictoriaMetrics](https://github.com/VictoriaMetrics/vmctl).
|
||||
* [netdata](https://github.com/netdata/netdata) can push data into VictoriaMetrics via `Prometheus remote_write API`.
|
||||
See [these docs](https://github.com/netdata/netdata#integrations).
|
||||
* [go-graphite/carbonapi](https://github.com/go-graphite/carbonapi) can use VictoriaMetrics as time series backend.
|
||||
|
|
|
@ -11,6 +11,6 @@
|
|||
"status":"success",
|
||||
"data":{"resultType":"matrix",
|
||||
"result":[
|
||||
{"metric":{"item":"y"},"values":[["{TIME_S-1m}","0.5"],["{TIME_S}","0.5"]]}
|
||||
{"metric":{"item":"y"},"values":[["{TIME_S-1m}","0.5"]]}
|
||||
]}}
|
||||
}
|
||||
|
|
|
@ -13,6 +13,6 @@
|
|||
"data":{"resultType":"matrix",
|
||||
"result":[
|
||||
{"metric":{"__name__":"not_nan_as_missing_data","item":"x"},"values":[["{TIME_S-2m}","2"]]},
|
||||
{"metric":{"__name__":"not_nan_as_missing_data","item":"y"},"values":[["{TIME_S-2m}","4"],["{TIME_S-1m}","3"],["{TIME_S}","3"]]}
|
||||
{"metric":{"__name__":"not_nan_as_missing_data","item":"y"},"values":[["{TIME_S-2m}","4"],["{TIME_S-1m}","3"]]}
|
||||
]}}
|
||||
}
|
||||
|
|
|
@ -7,7 +7,7 @@ or any other Prometheus-compatible storage system that supports the `remote_writ
|
|||
<img alt="vmagent" src="vmagent.png">
|
||||
|
||||
|
||||
### Motivation
|
||||
## Motivation
|
||||
|
||||
While VictoriaMetrics provides an efficient solution to store and observe metrics, our users needed something fast
|
||||
and RAM friendly to scrape metrics from Prometheus-compatible exporters to VictoriaMetrics.
|
||||
|
@ -15,7 +15,7 @@ Also, we found that users’ infrastructure are snowflakes - no two are alike, a
|
|||
to `vmagent` (like the ability to push metrics instead of pulling them). We did our best and plan to do even more.
|
||||
|
||||
|
||||
### Features
|
||||
## Features
|
||||
|
||||
* Can be used as drop-in replacement for Prometheus for scraping targets such as [node_exporter](https://github.com/prometheus/node_exporter).
|
||||
See [Quick Start](#quick-start) for details.
|
||||
|
@ -36,7 +36,7 @@ to `vmagent` (like the ability to push metrics instead of pulling them). We did
|
|||
* Uses lower amounts of RAM, CPU, disk IO and network bandwidth compared to Prometheus.
|
||||
|
||||
|
||||
### Quick Start
|
||||
## Quick Start
|
||||
|
||||
Just download `vmutils-*` archive from [releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases), unpack it
|
||||
and pass the following flags to `vmagent` binary in order to start scraping Prometheus targets:
|
||||
|
@ -63,7 +63,7 @@ Then send Influx data to `http://vmagent-host:8429`. See [these docs](https://vi
|
|||
Pass `-help` to `vmagent` in order to see the full list of supported command-line flags with their descriptions.
|
||||
|
||||
|
||||
### Configuration update
|
||||
## Configuration update
|
||||
|
||||
`vmagent` should be restarted in order to update config options set via command-line args.
|
||||
|
||||
|
@ -79,10 +79,10 @@ Pass `-help` to `vmagent` in order to see the full list of supported command-lin
|
|||
There is also `-promscrape.configCheckInterval` command-line option, which can be used for automatic reloading configs from updated `-promscrape.config` file.
|
||||
|
||||
|
||||
### Use cases
|
||||
## Use cases
|
||||
|
||||
|
||||
#### IoT and Edge monitoring
|
||||
### IoT and Edge monitoring
|
||||
|
||||
`vmagent` can run and collect metrics in IoT and industrial networks with unreliable or scheduled connections to the remote storage.
|
||||
It buffers the collected data in local files until the connection to remote storage becomes available and then sends the buffered
|
||||
|
@ -93,14 +93,14 @@ The maximum buffer size can be limited with `-remoteWrite.maxDiskUsagePerURL`.
|
|||
See [the corresponding Makefile rules](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/app/vmagent/Makefile) for details.
|
||||
|
||||
|
||||
#### Drop-in replacement for Prometheus
|
||||
### Drop-in replacement for Prometheus
|
||||
|
||||
If you use Prometheus only for scraping metrics from various targets and forwarding these metrics to remote storage,
|
||||
then `vmagent` can replace such Prometheus setup. Usually `vmagent` requires lower amounts of RAM, CPU and network bandwidth comparing to Prometheus for such a setup.
|
||||
See [these docs](#how-to-collect-metrics-in-prometheus-format) for details.
|
||||
|
||||
|
||||
#### Replication and high availability
|
||||
### Replication and high availability
|
||||
|
||||
`vmagent` replicates the collected metrics among multiple remote storage instances configured via `-remoteWrite.url` args.
|
||||
If a single remote storage instance temporarily is out of service, then the collected data remains available in another remote storage instances.
|
||||
|
@ -108,14 +108,14 @@ If a single remote storage instance temporarily is out of service, then the coll
|
|||
Then it sends the buffered data to the remote storage in order to prevent data gaps in the remote storage.
|
||||
|
||||
|
||||
#### Relabeling and filtering
|
||||
### Relabeling and filtering
|
||||
|
||||
`vmagent` can add, remove or update labels on the collected data before sending it to remote storage. Additionally,
|
||||
it can remove unwanted samples via Prometheus-like relabeling before sending the collected data to remote storage.
|
||||
See [these docs](#relabeling) for details.
|
||||
|
||||
|
||||
#### Splitting data streams among multiple systems
|
||||
### Splitting data streams among multiple systems
|
||||
|
||||
`vmagent` supports splitting the collected data between muliple destinations with the help of `-remoteWrite.urlRelabelConfig`,
|
||||
which is applied independently for each configured `-remoteWrite.url` destination. For instance, it is possible to replicate or split
|
||||
|
@ -123,7 +123,7 @@ data among long-term remote storage, short-term remote storage and real-time ana
|
|||
Note that each destination can receive its own subset of the collected data thanks to per-destination relabeling via `-remoteWrite.urlRelabelConfig`.
|
||||
|
||||
|
||||
#### Prometheus remote_write proxy
|
||||
### Prometheus remote_write proxy
|
||||
|
||||
`vmagent` may be used as a proxy for Prometheus data sent via Prometheus `remote_write` protocol. It can accept data via `remote_write` API
|
||||
at `/api/v1/write` endpoint, apply relabeling and filtering and then proxy it to another `remote_write` systems.
|
||||
|
@ -131,12 +131,12 @@ The `vmagent` can be configured to encrypt the incoming `remote_write` requests
|
|||
Additionally, Basic Auth can be enabled for the incoming `remote_write` requests with `-httpAuth.*` command-line flags.
|
||||
|
||||
|
||||
#### remote_write for clustered version
|
||||
### remote_write for clustered version
|
||||
|
||||
Despite `vmagent` can accept data in several supported protocols (OpenTSDB, Influx, Prometheus, Graphite) and scrape data from various targets, writes always peformed in Promethes remote_write protocol. Therefore for clustered version `-remoteWrite.url` command-line flag should be configured as `<schema>://<vminsert-host>:8480/insert/<customer-id>/prometheus/api/v1/write`
|
||||
|
||||
|
||||
### How to collect metrics in Prometheus format
|
||||
## How to collect metrics in Prometheus format
|
||||
|
||||
Pass the path to `prometheus.yml` to `-promscrape.config` command-line flag. `vmagent` takes into account the following
|
||||
sections from [Prometheus config file](https://prometheus.io/docs/prometheus/latest/configuration/configuration/):
|
||||
|
@ -192,7 +192,7 @@ entries to 60s. Run `vmagent -help` in order to see default values for `-promscr
|
|||
The file pointed by `-promscrape.config` may contain `%{ENV_VAR}` placeholders, which are substituted by the corresponding `ENV_VAR` environment variable values.
|
||||
|
||||
|
||||
### Adding labels to metrics
|
||||
## Adding labels to metrics
|
||||
|
||||
Labels can be added to metrics via the following mechanisms:
|
||||
|
||||
|
@ -200,7 +200,7 @@ Labels can be added to metrics via the following mechanisms:
|
|||
* Via `-remoteWrite.label` command-line flag. These labels are added to all the collected metrics before sending them to `-remoteWrite.url`.
|
||||
|
||||
|
||||
### Relabeling
|
||||
## Relabeling
|
||||
|
||||
`vmagent` supports [Prometheus relabeling](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config).
|
||||
Additionally it provides the following extra actions:
|
||||
|
@ -227,7 +227,7 @@ Read more about relabeling in the following articles:
|
|||
* [relabel_configs vs metric_relabel_configs](https://www.robustperception.io/relabel_configs-vs-metric_relabel_configs)
|
||||
|
||||
|
||||
### Monitoring
|
||||
## Monitoring
|
||||
|
||||
`vmagent` exports various metrics in Prometheus exposition format at `http://vmagent-host:8429/metrics` page. It is recommended setting up regular scraping of this page
|
||||
either via `vmagent` itself or via Prometheus, so the exported metrics could be analyzed later.
|
||||
|
@ -246,7 +246,7 @@ This information may be useful for debugging target relabeling.
|
|||
It may be useful for performing `vmagent` rolling update without scrape loss.
|
||||
|
||||
|
||||
### Troubleshooting
|
||||
## Troubleshooting
|
||||
|
||||
* It is recommended [setting up the official Grafana dashboard](#monitoring) in order to monitor `vmagent` state.
|
||||
|
||||
|
@ -322,24 +322,24 @@ It may be useful for performing `vmagent` rolling update without scrape loss.
|
|||
```
|
||||
|
||||
|
||||
### How to build from sources
|
||||
## How to build from sources
|
||||
|
||||
It is recommended using [binary releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) - `vmagent` is located in `vmutils-*` archives there.
|
||||
|
||||
|
||||
#### Development build
|
||||
### Development build
|
||||
|
||||
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.13.
|
||||
2. Run `make vmagent` from the root folder of the repository.
|
||||
It builds `vmagent` binary and puts it into the `bin` folder.
|
||||
|
||||
#### Production build
|
||||
### Production build
|
||||
|
||||
1. [Install docker](https://docs.docker.com/install/).
|
||||
2. Run `make vmagent-prod` from the root folder of the repository.
|
||||
It builds `vmagent-prod` binary and puts it into the `bin` folder.
|
||||
|
||||
#### Building docker images
|
||||
### Building docker images
|
||||
|
||||
Run `make package-vmagent`. It builds `victoriametrics/vmagent:<PKG_TAG>` docker image locally.
|
||||
`<PKG_TAG>` is auto-generated image tag, which depends on source code in the repository.
|
||||
|
@ -352,24 +352,24 @@ by setting it via `<ROOT_IMAGE>` environment variable. For example, the followin
|
|||
ROOT_IMAGE=scratch make package-vmagent
|
||||
```
|
||||
|
||||
#### ARM build
|
||||
### ARM build
|
||||
|
||||
ARM build may run on Raspberry Pi or on [energy-efficient ARM servers](https://blog.cloudflare.com/arm-takes-wing/).
|
||||
|
||||
#### Development ARM build
|
||||
### Development ARM build
|
||||
|
||||
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.13.
|
||||
2. Run `make vmagent-arm` or `make vmagent-arm64` from the root folder of the repository.
|
||||
It builds `vmagent-arm` or `vmagent-arm64` binary respectively and puts it into the `bin` folder.
|
||||
|
||||
#### Production ARM build
|
||||
### Production ARM build
|
||||
|
||||
1. [Install docker](https://docs.docker.com/install/).
|
||||
2. Run `make vmagent-arm-prod` or `make vmagent-arm64-prod` from the root folder of the repository.
|
||||
It builds `vmagent-arm-prod` or `vmagent-arm64-prod` binary respectively and puts it into the `bin` folder.
|
||||
|
||||
|
||||
### Profiling
|
||||
## Profiling
|
||||
|
||||
`vmagent` provides handlers for collecting the following [Go profiles](https://blog.golang.org/profiling-go-programs):
|
||||
|
||||
|
|
|
@ -8,7 +8,6 @@ import (
|
|||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
@ -22,7 +21,7 @@ import (
|
|||
)
|
||||
|
||||
var (
|
||||
rateLimit = flagutil.NewArray("remoteWrite.rateLimit", "Optional rate limit in bytes per second for data sent to -remoteWrite.url. "+
|
||||
rateLimit = flagutil.NewArrayInt("remoteWrite.rateLimit", "Optional rate limit in bytes per second for data sent to -remoteWrite.url. "+
|
||||
"By default the rate limit is disabled. It can be useful for limiting load on remote storage when big amounts of buffered data "+
|
||||
"is sent after temporary unavailability of the remote storage")
|
||||
sendTimeout = flagutil.NewArrayDuration("remoteWrite.sendTimeout", "Timeout for sending a single block of data to -remoteWrite.url")
|
||||
|
@ -120,15 +119,9 @@ func newClient(argIdx int, remoteWriteURL, sanitizedURL string, fq *persistentqu
|
|||
},
|
||||
stopCh: make(chan struct{}),
|
||||
}
|
||||
if bytesPerSec := rateLimit.GetOptionalArg(argIdx); bytesPerSec != "" {
|
||||
limit, err := strconv.ParseInt(bytesPerSec, 10, 64)
|
||||
if err != nil {
|
||||
logger.Fatalf("cannot parse -remoteWrite.rateLimit=%q for -remoteWrite.url=%q: %s", bytesPerSec, sanitizedURL, err)
|
||||
}
|
||||
if limit > 0 {
|
||||
logger.Infof("applying %d bytes per second rate limit for -remoteWrite.url=%q", limit, sanitizedURL)
|
||||
c.rl.perSecondLimit = limit
|
||||
}
|
||||
if bytesPerSec := rateLimit.GetOptionalArgOrDefault(argIdx, 0); bytesPerSec > 0 {
|
||||
logger.Infof("applying %d bytes per second rate limit for -remoteWrite.url=%q", bytesPerSec, sanitizedURL)
|
||||
c.rl.perSecondLimit = int64(bytesPerSec)
|
||||
}
|
||||
c.rl.limitReached = metrics.GetOrCreateCounter(fmt.Sprintf(`vmagent_remote_write_rate_limit_reached_total{url=%q}`, c.sanitizedURL))
|
||||
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/decimal"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/persistentqueue"
|
||||
|
@ -35,9 +36,11 @@ type pendingSeries struct {
|
|||
periodicFlusherWG sync.WaitGroup
|
||||
}
|
||||
|
||||
func newPendingSeries(pushBlock func(block []byte)) *pendingSeries {
|
||||
func newPendingSeries(pushBlock func(block []byte), significantFigures, roundDigits int) *pendingSeries {
|
||||
var ps pendingSeries
|
||||
ps.wr.pushBlock = pushBlock
|
||||
ps.wr.significantFigures = significantFigures
|
||||
ps.wr.roundDigits = roundDigits
|
||||
ps.stopCh = make(chan struct{})
|
||||
ps.periodicFlusherWG.Add(1)
|
||||
go func() {
|
||||
|
@ -85,9 +88,17 @@ type writeRequest struct {
|
|||
// Move lastFlushTime to the top of the struct in order to guarantee atomic access on 32-bit architectures.
|
||||
lastFlushTime uint64
|
||||
|
||||
wr prompbmarshal.WriteRequest
|
||||
// pushBlock is called when whe write request is ready to be sent.
|
||||
pushBlock func(block []byte)
|
||||
|
||||
// How many significant figures must be left before sending the writeRequest to pushBlock.
|
||||
significantFigures int
|
||||
|
||||
// How many decimal digits after point must be left before sending the writeRequest to pushBlock.
|
||||
roundDigits int
|
||||
|
||||
wr prompbmarshal.WriteRequest
|
||||
|
||||
tss []prompbmarshal.TimeSeries
|
||||
|
||||
labels []prompbmarshal.Label
|
||||
|
@ -96,6 +107,8 @@ type writeRequest struct {
|
|||
}
|
||||
|
||||
func (wr *writeRequest) reset() {
|
||||
// Do not reset pushBlock, significantFigures and roundDigits, since they are re-used.
|
||||
|
||||
wr.wr.Timeseries = nil
|
||||
|
||||
for i := range wr.tss {
|
||||
|
@ -114,11 +127,28 @@ func (wr *writeRequest) reset() {
|
|||
|
||||
func (wr *writeRequest) flush() {
|
||||
wr.wr.Timeseries = wr.tss
|
||||
wr.adjustSampleValues()
|
||||
atomic.StoreUint64(&wr.lastFlushTime, fasttime.UnixTimestamp())
|
||||
pushWriteRequest(&wr.wr, wr.pushBlock)
|
||||
wr.reset()
|
||||
}
|
||||
|
||||
func (wr *writeRequest) adjustSampleValues() {
|
||||
samples := wr.samples
|
||||
if n := wr.significantFigures; n > 0 {
|
||||
for i := range samples {
|
||||
s := &samples[i]
|
||||
s.Value = decimal.RoundToSignificantFigures(s.Value, n)
|
||||
}
|
||||
}
|
||||
if n := wr.roundDigits; n < 100 {
|
||||
for i := range samples {
|
||||
s := &samples[i]
|
||||
s.Value = decimal.RoundToDecimalDigits(s.Value, n)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (wr *writeRequest) push(src []prompbmarshal.TimeSeries) {
|
||||
tssDst := wr.tss
|
||||
for i := range src {
|
||||
|
|
|
@ -7,7 +7,6 @@ import (
|
|||
"sync/atomic"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/decimal"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/memory"
|
||||
|
@ -31,9 +30,13 @@ var (
|
|||
"for each -remoteWrite.url. When buffer size reaches the configured maximum, then old data is dropped when adding new data to the buffer. "+
|
||||
"Buffered data is stored in ~500MB chunks, so the minimum practical value for this flag is 500000000. "+
|
||||
"Disk usage is unlimited if the value is set to 0")
|
||||
significantFigures = flag.Int("remoteWrite.significantFigures", 0, "The number of significant figures to leave in metric values before writing them to remote storage. "+
|
||||
"See https://en.wikipedia.org/wiki/Significant_figures . Zero value saves all the significant figures. "+
|
||||
"This option may be used for increasing on-disk compression level for the stored metrics")
|
||||
significantFigures = flagutil.NewArrayInt("remoteWrite.significantFigures", "The number of significant figures to leave in metric values before writing them "+
|
||||
"to remote storage. See https://en.wikipedia.org/wiki/Significant_figures . Zero value saves all the significant figures. "+
|
||||
"This option may be used for improving data compression for the stored metrics. See also -remoteWrite.roundDigits")
|
||||
roundDigits = flagutil.NewArrayInt("remoteWrite.roundDigits", "Round metric values to this number of decimal digits after the point before writing them to remote storage. "+
|
||||
"Examples: -remoteWrite.roundDigits=2 would round 1.236 to 1.24, while -remoteWrite.roundDigits=-1 would round 126.78 to 130. "+
|
||||
"By default digits rounding is disabled. Set it to 100 for disabling it for a particular remote storage. "+
|
||||
"This option may be used for improving data compression for the stored metrics")
|
||||
)
|
||||
|
||||
var rwctxs []*remoteWriteCtx
|
||||
|
@ -137,17 +140,6 @@ func Stop() {
|
|||
//
|
||||
// Note that wr may be modified by Push due to relabeling and rounding.
|
||||
func Push(wr *prompbmarshal.WriteRequest) {
|
||||
if *significantFigures > 0 {
|
||||
// Round values according to significantFigures
|
||||
for i := range wr.Timeseries {
|
||||
samples := wr.Timeseries[i].Samples
|
||||
for j := range samples {
|
||||
s := &samples[j]
|
||||
s.Value = decimal.Round(s.Value, *significantFigures)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var rctx *relabelCtx
|
||||
rcs := allRelabelConfigs.Load().(*relabelConfigs)
|
||||
prcsGlobal := rcs.global
|
||||
|
@ -213,9 +205,11 @@ func newRemoteWriteCtx(argIdx int, remoteWriteURL string, maxInmemoryBlocks int,
|
|||
return float64(fq.GetInmemoryQueueLen())
|
||||
})
|
||||
c := newClient(argIdx, remoteWriteURL, sanitizedURL, fq, *queues)
|
||||
sf := significantFigures.GetOptionalArgOrDefault(argIdx, 0)
|
||||
rd := roundDigits.GetOptionalArgOrDefault(argIdx, 100)
|
||||
pss := make([]*pendingSeries, *queues)
|
||||
for i := range pss {
|
||||
pss[i] = newPendingSeries(fq.MustWriteBlock)
|
||||
pss[i] = newPendingSeries(fq.MustWriteBlock, sf, rd)
|
||||
}
|
||||
return &remoteWriteCtx{
|
||||
idx: argIdx,
|
||||
|
|
|
@ -12,6 +12,7 @@ rules against configured address.
|
|||
support;
|
||||
* Integration with [Alertmanager](https://github.com/prometheus/alertmanager);
|
||||
* Keeps the alerts [state on restarts](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/master/app/vmalert#alerts-state-on-restarts);
|
||||
* Graphite datasource can be used for alerting and recording rules. See [these docs](#graphite) for details.
|
||||
* Lightweight without extra dependencies.
|
||||
|
||||
### Limitations:
|
||||
|
@ -105,7 +106,13 @@ The syntax for alerting rule is following:
|
|||
# The name of the alert. Must be a valid metric name.
|
||||
alert: <string>
|
||||
|
||||
# The MetricsQL expression to evaluate.
|
||||
# Optional type for the rule. Supported values: "graphite", "prometheus".
|
||||
# By default "prometheus" rule type is used.
|
||||
[ type: <string> ]
|
||||
|
||||
# The expression to evaluate. The expression language depends on the type value.
|
||||
# By default MetricsQL expression is used. If type="graphite", then the expression
|
||||
# must contain valid Graphite expression.
|
||||
expr: <string>
|
||||
|
||||
# Alerts are considered firing once they have been returned for this long.
|
||||
|
@ -128,7 +135,13 @@ The syntax for recording rules is following:
|
|||
# The name of the time series to output to. Must be a valid metric name.
|
||||
record: <string>
|
||||
|
||||
# The MetricsQL expression to evaluate.
|
||||
# Optional type for the rule. Supported values: "graphite", "prometheus".
|
||||
# By default "prometheus" rule type is used.
|
||||
[ type: <string> ]
|
||||
|
||||
# The expression to evaluate. The expression language depends on the type value.
|
||||
# By default MetricsQL expression is used. If type="graphite", then the expression
|
||||
# must contain valid Graphite expression.
|
||||
expr: <string>
|
||||
|
||||
# Labels to add or overwrite before storing the result.
|
||||
|
@ -166,6 +179,13 @@ Used as alert source in AlertManager.
|
|||
* `http://<vmalert-addr>/-/reload` - hot configuration reload.
|
||||
|
||||
|
||||
### Graphite
|
||||
|
||||
vmalert sends requests to `<-datasource.url>/render?format=json` during evaluation of alerting and recording rules
|
||||
if the corresponding rule contains `type: "graphite"` config option. It is expected that the `<-datasource.url>/render`
|
||||
implements [Graphite Render API](https://graphite.readthedocs.io/en/stable/render_api.html) for `format=json`.
|
||||
|
||||
|
||||
### Configuration
|
||||
|
||||
The shortlist of configuration flags is the following:
|
||||
|
|
|
@ -19,6 +19,7 @@ import (
|
|||
|
||||
// AlertingRule is basic alert entity
|
||||
type AlertingRule struct {
|
||||
Type datasource.Type
|
||||
RuleID uint64
|
||||
Name string
|
||||
Expr string
|
||||
|
@ -50,6 +51,7 @@ type alertingRuleMetrics struct {
|
|||
|
||||
func newAlertingRule(group *Group, cfg config.Rule) *AlertingRule {
|
||||
ar := &AlertingRule{
|
||||
Type: cfg.Type,
|
||||
RuleID: cfg.ID,
|
||||
Name: cfg.Alert,
|
||||
Expr: cfg.Expr,
|
||||
|
@ -120,7 +122,7 @@ func (ar *AlertingRule) ID() uint64 {
|
|||
// Exec executes AlertingRule expression via the given Querier.
|
||||
// Based on the Querier results AlertingRule maintains notifier.Alerts
|
||||
func (ar *AlertingRule) Exec(ctx context.Context, q datasource.Querier, series bool) ([]prompbmarshal.TimeSeries, error) {
|
||||
qMetrics, err := q.Query(ctx, ar.Expr)
|
||||
qMetrics, err := q.Query(ctx, ar.Expr, ar.Type)
|
||||
ar.mu.Lock()
|
||||
defer ar.mu.Unlock()
|
||||
|
||||
|
@ -137,7 +139,7 @@ func (ar *AlertingRule) Exec(ctx context.Context, q datasource.Querier, series b
|
|||
}
|
||||
}
|
||||
|
||||
qFn := func(query string) ([]datasource.Metric, error) { return q.Query(ctx, query) }
|
||||
qFn := func(query string) ([]datasource.Metric, error) { return q.Query(ctx, query, ar.Type) }
|
||||
updated := make(map[uint64]struct{})
|
||||
// update list of active alerts
|
||||
for _, m := range qMetrics {
|
||||
|
@ -310,6 +312,7 @@ func (ar *AlertingRule) RuleAPI() APIAlertingRule {
|
|||
// encode as strings to avoid rounding
|
||||
ID: fmt.Sprintf("%d", ar.ID()),
|
||||
GroupID: fmt.Sprintf("%d", ar.GroupID),
|
||||
Type: ar.Type.String(),
|
||||
Name: ar.Name,
|
||||
Expression: ar.Expr,
|
||||
For: ar.For.String(),
|
||||
|
@ -404,7 +407,7 @@ func (ar *AlertingRule) Restore(ctx context.Context, q datasource.Querier, lookb
|
|||
return fmt.Errorf("querier is nil")
|
||||
}
|
||||
|
||||
qFn := func(query string) ([]datasource.Metric, error) { return q.Query(ctx, query) }
|
||||
qFn := func(query string) ([]datasource.Metric, error) { return q.Query(ctx, query, ar.Type) }
|
||||
|
||||
// account for external labels in filter
|
||||
var labelsFilter string
|
||||
|
@ -417,7 +420,7 @@ func (ar *AlertingRule) Restore(ctx context.Context, q datasource.Querier, lookb
|
|||
// remote write protocol which is used for state persistence in vmalert.
|
||||
expr := fmt.Sprintf("last_over_time(%s{alertname=%q%s}[%ds])",
|
||||
alertForStateMetricName, ar.Name, labelsFilter, int(lookback.Seconds()))
|
||||
qMetrics, err := q.Query(ctx, expr)
|
||||
qMetrics, err := q.Query(ctx, expr, ar.Type)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -10,6 +10,8 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/datasource"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/notifier"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/utils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/envtemplate"
|
||||
|
@ -21,6 +23,7 @@ import (
|
|||
// Group contains list of Rules grouped into
|
||||
// entity with one name and evaluation interval
|
||||
type Group struct {
|
||||
Type datasource.Type `yaml:"type,omitempty"`
|
||||
File string
|
||||
Name string `yaml:"name"`
|
||||
Interval time.Duration `yaml:"interval,omitempty"`
|
||||
|
@ -44,6 +47,19 @@ func (g *Group) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
|||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal group configuration for checksum: %w", err)
|
||||
}
|
||||
// change default value to prometheus datasource.
|
||||
if g.Type.Get() == "" {
|
||||
g.Type.Set(datasource.NewPrometheusType())
|
||||
}
|
||||
// update rules with empty type.
|
||||
for i, r := range g.Rules {
|
||||
if r.Type.Get() == "" {
|
||||
r.Type.Set(g.Type)
|
||||
r.ID = HashRule(r)
|
||||
g.Rules[i] = r
|
||||
}
|
||||
}
|
||||
|
||||
h := md5.New()
|
||||
h.Write(b)
|
||||
g.Checksum = fmt.Sprintf("%x", h.Sum(nil))
|
||||
|
@ -58,6 +74,7 @@ func (g *Group) Validate(validateAnnotations, validateExpressions bool) error {
|
|||
if len(g.Rules) == 0 {
|
||||
return fmt.Errorf("group %q can't contain no rules", g.Name)
|
||||
}
|
||||
|
||||
uniqueRules := map[uint64]struct{}{}
|
||||
for _, r := range g.Rules {
|
||||
ruleName := r.Record
|
||||
|
@ -72,7 +89,13 @@ func (g *Group) Validate(validateAnnotations, validateExpressions bool) error {
|
|||
return fmt.Errorf("invalid rule %q.%q: %w", g.Name, ruleName, err)
|
||||
}
|
||||
if validateExpressions {
|
||||
if _, err := metricsql.Parse(r.Expr); err != nil {
|
||||
// its needed only for tests.
|
||||
// because correct types must be inherited after unmarshalling.
|
||||
exprValidator := g.Type.ValidateExpr
|
||||
if r.Type.Get() != "" {
|
||||
exprValidator = r.Type.ValidateExpr
|
||||
}
|
||||
if err := exprValidator(r.Expr); err != nil {
|
||||
return fmt.Errorf("invalid expression for rule %q.%q: %w", g.Name, ruleName, err)
|
||||
}
|
||||
}
|
||||
|
@ -92,6 +115,7 @@ func (g *Group) Validate(validateAnnotations, validateExpressions bool) error {
|
|||
// recording rule or alerting rule.
|
||||
type Rule struct {
|
||||
ID uint64
|
||||
Type datasource.Type `yaml:"type,omitempty"`
|
||||
Record string `yaml:"record,omitempty"`
|
||||
Alert string `yaml:"alert,omitempty"`
|
||||
Expr string `yaml:"expr"`
|
||||
|
@ -169,6 +193,7 @@ func HashRule(r Rule) uint64 {
|
|||
h.Write([]byte("alerting"))
|
||||
h.Write([]byte(r.Alert))
|
||||
}
|
||||
h.Write([]byte(r.Type.Get()))
|
||||
kv := sortMap(r.Labels)
|
||||
for _, i := range kv {
|
||||
h.Write([]byte(i.key))
|
||||
|
|
|
@ -7,6 +7,8 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/datasource"
|
||||
|
||||
"gopkg.in/yaml.v2"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/notifier"
|
||||
|
@ -53,6 +55,10 @@ func TestParseBad(t *testing.T) {
|
|||
[]string{"testdata/dir/rules4-bad.rules"},
|
||||
"either `record` or `alert` must be set",
|
||||
},
|
||||
{
|
||||
[]string{"testdata/rules1-bad.rules"},
|
||||
"bad graphite expr",
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
_, err := Parse(tc.path, true, true)
|
||||
|
@ -215,6 +221,75 @@ func TestGroup_Validate(t *testing.T) {
|
|||
},
|
||||
expErr: "",
|
||||
},
|
||||
{
|
||||
group: &Group{Name: "test thanos",
|
||||
Type: datasource.NewRawType("thanos"),
|
||||
Rules: []Rule{
|
||||
{Alert: "alert", Expr: "up == 1", Labels: map[string]string{
|
||||
"description": "{{ value|query }}",
|
||||
}},
|
||||
},
|
||||
},
|
||||
validateExpressions: true,
|
||||
expErr: "unknown datasource type",
|
||||
},
|
||||
{
|
||||
group: &Group{Name: "test graphite",
|
||||
Type: datasource.NewGraphiteType(),
|
||||
Rules: []Rule{
|
||||
{Alert: "alert", Expr: "up == 1", Labels: map[string]string{
|
||||
"description": "some-description",
|
||||
}},
|
||||
},
|
||||
},
|
||||
validateExpressions: true,
|
||||
expErr: "",
|
||||
},
|
||||
{
|
||||
group: &Group{Name: "test prometheus",
|
||||
Type: datasource.NewPrometheusType(),
|
||||
Rules: []Rule{
|
||||
{Alert: "alert", Expr: "up == 1", Labels: map[string]string{
|
||||
"description": "{{ value|query }}",
|
||||
}},
|
||||
},
|
||||
},
|
||||
validateExpressions: true,
|
||||
expErr: "",
|
||||
},
|
||||
{
|
||||
group: &Group{
|
||||
Name: "test graphite inherit",
|
||||
Type: datasource.NewGraphiteType(),
|
||||
Rules: []Rule{
|
||||
{
|
||||
Expr: "sumSeries(time('foo.bar',10))",
|
||||
For: PromDuration{milliseconds: 10},
|
||||
},
|
||||
{
|
||||
Expr: "sum(up == 0 ) by (host)",
|
||||
Type: datasource.NewPrometheusType(),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
group: &Group{
|
||||
Name: "test graphite prometheus bad expr",
|
||||
Type: datasource.NewGraphiteType(),
|
||||
Rules: []Rule{
|
||||
{
|
||||
Expr: "sum(up == 0 ) by (host)",
|
||||
For: PromDuration{milliseconds: 10},
|
||||
},
|
||||
{
|
||||
Expr: "sumSeries(time('foo.bar',10))",
|
||||
Type: datasource.NewPrometheusType(),
|
||||
},
|
||||
},
|
||||
},
|
||||
expErr: "invalid rule",
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
err := tc.group.Validate(tc.validateAnnotations, tc.validateExpressions)
|
||||
|
|
13
app/vmalert/config/testdata/dir/rules-update0-good.rules
vendored
Normal file
13
app/vmalert/config/testdata/dir/rules-update0-good.rules
vendored
Normal file
|
@ -0,0 +1,13 @@
|
|||
groups:
|
||||
- name: TestUpdateGroup
|
||||
interval: 2s
|
||||
concurrency: 2
|
||||
type: prometheus
|
||||
rules:
|
||||
- alert: up
|
||||
expr: up == 0
|
||||
for: 30s
|
||||
- alert: up graphite
|
||||
expr: filterSeries(time('host.1',20),'>','0')
|
||||
for: 30s
|
||||
type: graphite
|
12
app/vmalert/config/testdata/dir/rules-update1-good.rules
vendored
Normal file
12
app/vmalert/config/testdata/dir/rules-update1-good.rules
vendored
Normal file
|
@ -0,0 +1,12 @@
|
|||
groups:
|
||||
- name: TestUpdateGroup
|
||||
interval: 30s
|
||||
type: graphite
|
||||
rules:
|
||||
- alert: up
|
||||
expr: filterSeries(time('host.2',20),'>','0')
|
||||
for: 30s
|
||||
- alert: up graphite
|
||||
expr: filterSeries(time('host.1',20),'>','0')
|
||||
for: 30s
|
||||
type: graphite
|
12
app/vmalert/config/testdata/rules1-bad.rules
vendored
Normal file
12
app/vmalert/config/testdata/rules1-bad.rules
vendored
Normal file
|
@ -0,0 +1,12 @@
|
|||
groups:
|
||||
- name: TestGraphiteBadGroup
|
||||
interval: 2s
|
||||
concurrency: 2
|
||||
type: graphite
|
||||
rules:
|
||||
- alert: Conns
|
||||
expr: filterSeries(sumSeries(host.receiver.interface.cons),'last','>', 500) by instance
|
||||
for: 3m
|
||||
annotations:
|
||||
summary: Too high connection number for {{$labels.instance}}
|
||||
description: "It is {{ $value }} connections for {{$labels.instance}}"
|
30
app/vmalert/config/testdata/rules3-good.rules
vendored
Normal file
30
app/vmalert/config/testdata/rules3-good.rules
vendored
Normal file
|
@ -0,0 +1,30 @@
|
|||
groups:
|
||||
- name: TestGroup
|
||||
interval: 2s
|
||||
concurrency: 2
|
||||
type: graphite
|
||||
rules:
|
||||
- alert: Conns
|
||||
expr: filterSeries(sumSeries(host.receiver.interface.cons),'last','>', 500)
|
||||
for: 3m
|
||||
annotations:
|
||||
summary: Too high connection number for {{$labels.instance}}
|
||||
description: "It is {{ $value }} connections for {{$labels.instance}}"
|
||||
- name: TestGroupPromMixed
|
||||
interval: 2s
|
||||
concurrency: 2
|
||||
type: prometheus
|
||||
rules:
|
||||
- alert: Conns
|
||||
expr: sum(vm_tcplistener_conns) by (instance) > 1
|
||||
for: 3m
|
||||
annotations:
|
||||
summary: Too high connection number for {{$labels.instance}}
|
||||
description: "It is {{ $value }} connections for {{$labels.instance}}"
|
||||
- alert: HostDown
|
||||
type: graphite
|
||||
expr: filterSeries(sumSeries(host.receiver.interface.up),'last','=', 0)
|
||||
for: 3m
|
||||
annotations:
|
||||
summary: Too high connection number for {{$labels.instance}}
|
||||
description: "It is {{ $value }} connections for {{$labels.instance}}"
|
|
@ -1,12 +1,14 @@
|
|||
package datasource
|
||||
|
||||
import "context"
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
|
||||
// Querier interface wraps Query method which
|
||||
// executes given query and returns list of Metrics
|
||||
// as result
|
||||
type Querier interface {
|
||||
Query(ctx context.Context, query string) ([]Metric, error)
|
||||
Query(ctx context.Context, query string, engine Type) ([]Metric, error)
|
||||
}
|
||||
|
||||
// Metric is the basic entity which should be return by datasource
|
||||
|
|
89
app/vmalert/datasource/type.go
Normal file
89
app/vmalert/datasource/type.go
Normal file
|
@ -0,0 +1,89 @@
|
|||
package datasource
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/graphiteql"
|
||||
"github.com/VictoriaMetrics/metricsql"
|
||||
)
|
||||
|
||||
const graphiteType = "graphite"
|
||||
const prometheusType = "prometheus"
|
||||
|
||||
// Type represents data source type
|
||||
type Type struct {
|
||||
name string
|
||||
}
|
||||
|
||||
// NewPrometheusType returns prometheus datasource type
|
||||
func NewPrometheusType() Type {
|
||||
return Type{name: prometheusType}
|
||||
}
|
||||
|
||||
// NewGraphiteType returns graphite datasource type
|
||||
func NewGraphiteType() Type {
|
||||
return Type{name: graphiteType}
|
||||
}
|
||||
|
||||
// NewRawType returns datasource type from raw string
|
||||
// without validation.
|
||||
func NewRawType(d string) Type {
|
||||
return Type{name: d}
|
||||
}
|
||||
|
||||
// Get returns datasource type
|
||||
func (t *Type) Get() string {
|
||||
return t.name
|
||||
}
|
||||
|
||||
// Set changes datasource type
|
||||
func (t *Type) Set(d Type) {
|
||||
t.name = d.name
|
||||
}
|
||||
|
||||
// String implements String interface with default value.
|
||||
func (t Type) String() string {
|
||||
if t.name == "" {
|
||||
return prometheusType
|
||||
}
|
||||
return t.name
|
||||
}
|
||||
|
||||
// ValidateExpr validates query expression with datasource ql.
|
||||
func (t *Type) ValidateExpr(expr string) error {
|
||||
switch t.name {
|
||||
case graphiteType:
|
||||
if _, err := graphiteql.Parse(expr); err != nil {
|
||||
return fmt.Errorf("bad graphite expr: %q, err: %w", expr, err)
|
||||
}
|
||||
case "", prometheusType:
|
||||
if _, err := metricsql.Parse(expr); err != nil {
|
||||
return fmt.Errorf("bad prometheus expr: %q, err: %w", expr, err)
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("unknown datasource type=%q", t.name)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||
func (t *Type) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||
var s string
|
||||
if err := unmarshal(&s); err != nil {
|
||||
return err
|
||||
}
|
||||
switch s {
|
||||
case "":
|
||||
s = prometheusType
|
||||
case graphiteType, prometheusType:
|
||||
default:
|
||||
return fmt.Errorf("unknown datasource type=%q, want %q or %q", s, prometheusType, graphiteType)
|
||||
}
|
||||
t.name = s
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalYAML implements the yaml.Unmarshaler interface.
|
||||
func (t Type) MarshalYAML() (interface{}, error) {
|
||||
return t.name, nil
|
||||
}
|
|
@ -6,7 +6,6 @@ import (
|
|||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
@ -46,17 +45,45 @@ func (r response) metrics() ([]Metric, error) {
|
|||
return ms, nil
|
||||
}
|
||||
|
||||
type graphiteResponse []graphiteResponseTarget
|
||||
|
||||
type graphiteResponseTarget struct {
|
||||
Target string `json:"target"`
|
||||
Tags map[string]string `json:"tags"`
|
||||
DataPoints [][2]float64 `json:"datapoints"`
|
||||
}
|
||||
|
||||
func (r graphiteResponse) metrics() []Metric {
|
||||
var ms []Metric
|
||||
for _, res := range r {
|
||||
if len(res.DataPoints) < 1 {
|
||||
continue
|
||||
}
|
||||
var m Metric
|
||||
// add only last value to the result.
|
||||
last := res.DataPoints[len(res.DataPoints)-1]
|
||||
m.Value = last[0]
|
||||
m.Timestamp = int64(last[1])
|
||||
for k, v := range res.Tags {
|
||||
m.AddLabel(k, v)
|
||||
}
|
||||
ms = append(ms, m)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
|
||||
// VMStorage represents vmstorage entity with ability to read and write metrics
|
||||
type VMStorage struct {
|
||||
c *http.Client
|
||||
queryURL string
|
||||
datasourceURL string
|
||||
basicAuthUser string
|
||||
basicAuthPass string
|
||||
lookBack time.Duration
|
||||
queryStep time.Duration
|
||||
}
|
||||
|
||||
const queryPath = "/api/v1/query?query="
|
||||
const queryPath = "/api/v1/query"
|
||||
const graphitePath = "/render"
|
||||
|
||||
// NewVMStorage is a constructor for VMStorage
|
||||
func NewVMStorage(baseURL, basicAuthUser, basicAuthPass string, lookBack time.Duration, queryStep time.Duration, c *http.Client) *VMStorage {
|
||||
|
@ -64,26 +91,31 @@ func NewVMStorage(baseURL, basicAuthUser, basicAuthPass string, lookBack time.Du
|
|||
c: c,
|
||||
basicAuthUser: basicAuthUser,
|
||||
basicAuthPass: basicAuthPass,
|
||||
queryURL: strings.TrimSuffix(baseURL, "/") + queryPath,
|
||||
datasourceURL: strings.TrimSuffix(baseURL, "/"),
|
||||
lookBack: lookBack,
|
||||
queryStep: queryStep,
|
||||
}
|
||||
}
|
||||
|
||||
// Query reads metrics from datasource by given query
|
||||
func (s *VMStorage) Query(ctx context.Context, query string) ([]Metric, error) {
|
||||
const (
|
||||
statusSuccess, statusError, rtVector = "success", "error", "vector"
|
||||
)
|
||||
q := s.queryURL + url.QueryEscape(query)
|
||||
if s.lookBack > 0 {
|
||||
lookBack := time.Now().Add(-s.lookBack)
|
||||
q += fmt.Sprintf("&time=%d", lookBack.Unix())
|
||||
// Query reads metrics from datasource by given query and type
|
||||
func (s *VMStorage) Query(ctx context.Context, query string, dataSourceType Type) ([]Metric, error) {
|
||||
switch dataSourceType.name {
|
||||
case "", prometheusType:
|
||||
return s.queryDataSource(ctx, query, s.setPrometheusReqParams, parsePrometheusResponse)
|
||||
case graphiteType:
|
||||
return s.queryDataSource(ctx, query, s.setGraphiteReqParams, parseGraphiteResponse)
|
||||
default:
|
||||
return nil, fmt.Errorf("engine not found: %q", dataSourceType)
|
||||
}
|
||||
if s.queryStep > 0 {
|
||||
q += fmt.Sprintf("&step=%s", s.queryStep.String())
|
||||
}
|
||||
req, err := http.NewRequest("POST", q, nil)
|
||||
}
|
||||
|
||||
func (s *VMStorage) queryDataSource(
|
||||
ctx context.Context,
|
||||
query string,
|
||||
setReqParams func(r *http.Request, query string),
|
||||
processResponse func(r *http.Request, resp *http.Response,
|
||||
) ([]Metric, error)) ([]Metric, error) {
|
||||
req, err := http.NewRequest("POST", s.datasourceURL, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -91,6 +123,7 @@ func (s *VMStorage) Query(ctx context.Context, query string) ([]Metric, error) {
|
|||
if s.basicAuthPass != "" {
|
||||
req.SetBasicAuth(s.basicAuthUser, s.basicAuthPass)
|
||||
}
|
||||
setReqParams(req, query)
|
||||
resp, err := s.c.Do(req.WithContext(ctx))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error getting response from %s: %w", req.URL, err)
|
||||
|
@ -100,9 +133,46 @@ func (s *VMStorage) Query(ctx context.Context, query string) ([]Metric, error) {
|
|||
body, _ := ioutil.ReadAll(resp.Body)
|
||||
return nil, fmt.Errorf("datasource returns unexpected response code %d for %s. Response body %s", resp.StatusCode, req.URL, body)
|
||||
}
|
||||
return processResponse(req, resp)
|
||||
}
|
||||
|
||||
func (s *VMStorage) setPrometheusReqParams(r *http.Request, query string) {
|
||||
r.URL.Path += queryPath
|
||||
q := r.URL.Query()
|
||||
q.Set("query", query)
|
||||
if s.lookBack > 0 {
|
||||
lookBack := time.Now().Add(-s.lookBack)
|
||||
q.Set("time", fmt.Sprintf("%d", lookBack.Unix()))
|
||||
}
|
||||
if s.queryStep > 0 {
|
||||
q.Set("step", s.queryStep.String())
|
||||
}
|
||||
r.URL.RawQuery = q.Encode()
|
||||
}
|
||||
|
||||
func (s *VMStorage) setGraphiteReqParams(r *http.Request, query string) {
|
||||
r.URL.Path += graphitePath
|
||||
q := r.URL.Query()
|
||||
q.Set("format", "json")
|
||||
q.Set("target", query)
|
||||
from := "-5min"
|
||||
if s.lookBack > 0 {
|
||||
lookBack := time.Now().Add(-s.lookBack)
|
||||
from = strconv.FormatInt(lookBack.Unix(), 10)
|
||||
}
|
||||
q.Set("from", from)
|
||||
q.Set("until", "now")
|
||||
r.URL.RawQuery = q.Encode()
|
||||
}
|
||||
|
||||
const (
|
||||
statusSuccess, statusError, rtVector = "success", "error", "vector"
|
||||
)
|
||||
|
||||
func parsePrometheusResponse(req *http.Request, resp *http.Response) ([]Metric, error) {
|
||||
r := &response{}
|
||||
if err := json.NewDecoder(resp.Body).Decode(r); err != nil {
|
||||
return nil, fmt.Errorf("error parsing metrics for %s: %w", req.URL, err)
|
||||
return nil, fmt.Errorf("error parsing prometheus metrics for %s: %w", req.URL, err)
|
||||
}
|
||||
if r.Status == statusError {
|
||||
return nil, fmt.Errorf("response error, query: %s, errorType: %s, error: %s", req.URL, r.ErrorType, r.Error)
|
||||
|
@ -115,3 +185,11 @@ func (s *VMStorage) Query(ctx context.Context, query string) ([]Metric, error) {
|
|||
}
|
||||
return r.metrics()
|
||||
}
|
||||
|
||||
func parseGraphiteResponse(req *http.Request, resp *http.Response) ([]Metric, error) {
|
||||
r := &graphiteResponse{}
|
||||
if err := json.NewDecoder(resp.Body).Decode(r); err != nil {
|
||||
return nil, fmt.Errorf("error parsing graphite metrics for %s: %w", req.URL, err)
|
||||
}
|
||||
return r.metrics(), nil
|
||||
}
|
||||
|
|
|
@ -14,6 +14,7 @@ var (
|
|||
basicAuthName = "foo"
|
||||
basicAuthPass = "bar"
|
||||
query = "vm_rows"
|
||||
queryRender = "constantLine(10)"
|
||||
)
|
||||
|
||||
func TestVMSelectQuery(t *testing.T) {
|
||||
|
@ -22,6 +23,13 @@ func TestVMSelectQuery(t *testing.T) {
|
|||
t.Errorf("should not be called")
|
||||
})
|
||||
c := -1
|
||||
mux.HandleFunc("/render", func(w http.ResponseWriter, request *http.Request) {
|
||||
c++
|
||||
switch c {
|
||||
case 7:
|
||||
w.Write([]byte(`[{"target":"constantLine(10)","tags":{"name":"constantLine(10)"},"datapoints":[[10,1611758343],[10,1611758373],[10,1611758403]]}]`))
|
||||
}
|
||||
})
|
||||
mux.HandleFunc("/api/v1/query", func(w http.ResponseWriter, r *http.Request) {
|
||||
c++
|
||||
if r.Method != http.MethodPost {
|
||||
|
@ -62,25 +70,25 @@ func TestVMSelectQuery(t *testing.T) {
|
|||
srv := httptest.NewServer(mux)
|
||||
defer srv.Close()
|
||||
am := NewVMStorage(srv.URL, basicAuthName, basicAuthPass, time.Minute, 0, srv.Client())
|
||||
if _, err := am.Query(ctx, query); err == nil {
|
||||
if _, err := am.Query(ctx, query, NewPrometheusType()); err == nil {
|
||||
t.Fatalf("expected connection error got nil")
|
||||
}
|
||||
if _, err := am.Query(ctx, query); err == nil {
|
||||
if _, err := am.Query(ctx, query, NewPrometheusType()); err == nil {
|
||||
t.Fatalf("expected invalid response status error got nil")
|
||||
}
|
||||
if _, err := am.Query(ctx, query); err == nil {
|
||||
if _, err := am.Query(ctx, query, NewPrometheusType()); err == nil {
|
||||
t.Fatalf("expected response body error got nil")
|
||||
}
|
||||
if _, err := am.Query(ctx, query); err == nil {
|
||||
if _, err := am.Query(ctx, query, NewPrometheusType()); err == nil {
|
||||
t.Fatalf("expected error status got nil")
|
||||
}
|
||||
if _, err := am.Query(ctx, query); err == nil {
|
||||
if _, err := am.Query(ctx, query, NewPrometheusType()); err == nil {
|
||||
t.Fatalf("expected unknown status got nil")
|
||||
}
|
||||
if _, err := am.Query(ctx, query); err == nil {
|
||||
if _, err := am.Query(ctx, query, NewPrometheusType()); err == nil {
|
||||
t.Fatalf("expected non-vector resultType error got nil")
|
||||
}
|
||||
m, err := am.Query(ctx, query)
|
||||
m, err := am.Query(ctx, query, NewPrometheusType())
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected %s", err)
|
||||
}
|
||||
|
@ -98,4 +106,22 @@ func TestVMSelectQuery(t *testing.T) {
|
|||
m[0].Labels[0].Name != expected.Labels[0].Name {
|
||||
t.Fatalf("unexpected metric %+v want %+v", m[0], expected)
|
||||
}
|
||||
m, err = am.Query(ctx, queryRender, NewGraphiteType())
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected %s", err)
|
||||
}
|
||||
if len(m) != 1 {
|
||||
t.Fatalf("expected 1 metric got %d in %+v", len(m), m)
|
||||
}
|
||||
expected = Metric{
|
||||
Labels: []Label{{Value: "constantLine(10)", Name: "name"}},
|
||||
Timestamp: 1611758403,
|
||||
Value: 10,
|
||||
}
|
||||
if m[0].Timestamp != expected.Timestamp &&
|
||||
m[0].Value != expected.Value &&
|
||||
m[0].Labels[0].Value != expected.Labels[0].Value &&
|
||||
m[0].Labels[0].Name != expected.Labels[0].Name {
|
||||
t.Fatalf("unexpected metric %+v want %+v", m[0], expected)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -22,6 +22,7 @@ type Group struct {
|
|||
Name string
|
||||
File string
|
||||
Rules []Rule
|
||||
Type datasource.Type
|
||||
Interval time.Duration
|
||||
Concurrency int
|
||||
Checksum string
|
||||
|
@ -50,6 +51,7 @@ func newGroupMetrics(name, file string) *groupMetrics {
|
|||
|
||||
func newGroup(cfg config.Group, defaultInterval time.Duration, labels map[string]string) *Group {
|
||||
g := &Group{
|
||||
Type: cfg.Type,
|
||||
Name: cfg.Name,
|
||||
File: cfg.File,
|
||||
Interval: cfg.Interval,
|
||||
|
@ -99,6 +101,7 @@ func (g *Group) ID() uint64 {
|
|||
hash.Write([]byte(g.File))
|
||||
hash.Write([]byte("\xff"))
|
||||
hash.Write([]byte(g.Name))
|
||||
hash.Write([]byte(g.Type.Get()))
|
||||
return hash.Sum64()
|
||||
}
|
||||
|
||||
|
@ -157,6 +160,7 @@ func (g *Group) updateWith(newGroup *Group) error {
|
|||
for _, nr := range rulesRegistry {
|
||||
newRules = append(newRules, nr)
|
||||
}
|
||||
g.Type = newGroup.Type
|
||||
g.Concurrency = newGroup.Concurrency
|
||||
g.Checksum = newGroup.Checksum
|
||||
g.Rules = newRules
|
||||
|
|
|
@ -38,7 +38,7 @@ func (fq *fakeQuerier) add(metrics ...datasource.Metric) {
|
|||
fq.Unlock()
|
||||
}
|
||||
|
||||
func (fq *fakeQuerier) Query(_ context.Context, _ string) ([]datasource.Metric, error) {
|
||||
func (fq *fakeQuerier) Query(_ context.Context, _ string, _ datasource.Type) ([]datasource.Metric, error) {
|
||||
fq.Lock()
|
||||
defer fq.Unlock()
|
||||
if fq.err != nil {
|
||||
|
|
|
@ -142,6 +142,7 @@ func (g *Group) toAPI() APIGroup {
|
|||
// encode as string to avoid rounding
|
||||
ID: fmt.Sprintf("%d", g.ID()),
|
||||
Name: g.Name,
|
||||
Type: g.Type.String(),
|
||||
File: g.File,
|
||||
Interval: g.Interval.String(),
|
||||
Concurrency: g.Concurrency,
|
||||
|
|
|
@ -9,6 +9,8 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/datasource"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/notifier"
|
||||
)
|
||||
|
||||
|
@ -106,6 +108,18 @@ func TestManagerUpdate(t *testing.T) {
|
|||
Name: "ExampleAlertAlwaysFiring",
|
||||
Expr: "sum by(job) (up == 1)",
|
||||
}
|
||||
ExampleAlertGraphite = &AlertingRule{
|
||||
Name: "up graphite",
|
||||
Expr: "filterSeries(time('host.1',20),'>','0')",
|
||||
Type: datasource.NewGraphiteType(),
|
||||
For: defaultEvalInterval,
|
||||
}
|
||||
ExampleAlertGraphite2 = &AlertingRule{
|
||||
Name: "up",
|
||||
Expr: "filterSeries(time('host.2',20),'>','0')",
|
||||
Type: datasource.NewGraphiteType(),
|
||||
For: defaultEvalInterval,
|
||||
}
|
||||
)
|
||||
|
||||
testCases := []struct {
|
||||
|
@ -122,6 +136,7 @@ func TestManagerUpdate(t *testing.T) {
|
|||
{
|
||||
File: "config/testdata/dir/rules1-good.rules",
|
||||
Name: "duplicatedGroupDiffFiles",
|
||||
Type: datasource.NewPrometheusType(),
|
||||
Interval: defaultEvalInterval,
|
||||
Rules: []Rule{
|
||||
&AlertingRule{
|
||||
|
@ -146,12 +161,14 @@ func TestManagerUpdate(t *testing.T) {
|
|||
{
|
||||
File: "config/testdata/rules0-good.rules",
|
||||
Name: "groupGorSingleAlert",
|
||||
Type: datasource.NewPrometheusType(),
|
||||
Rules: []Rule{VMRows},
|
||||
Interval: defaultEvalInterval,
|
||||
},
|
||||
{
|
||||
File: "config/testdata/rules0-good.rules",
|
||||
Interval: defaultEvalInterval,
|
||||
Type: datasource.NewPrometheusType(),
|
||||
Name: "TestGroup", Rules: []Rule{
|
||||
Conns,
|
||||
ExampleAlertAlwaysFiring,
|
||||
|
@ -166,13 +183,16 @@ func TestManagerUpdate(t *testing.T) {
|
|||
{
|
||||
File: "config/testdata/rules0-good.rules",
|
||||
Name: "groupGorSingleAlert",
|
||||
Type: datasource.NewPrometheusType(),
|
||||
Interval: defaultEvalInterval,
|
||||
Rules: []Rule{VMRows},
|
||||
},
|
||||
{
|
||||
File: "config/testdata/rules0-good.rules",
|
||||
Interval: defaultEvalInterval,
|
||||
Name: "TestGroup", Rules: []Rule{
|
||||
Name: "TestGroup",
|
||||
Type: datasource.NewPrometheusType(),
|
||||
Rules: []Rule{
|
||||
Conns,
|
||||
ExampleAlertAlwaysFiring,
|
||||
}},
|
||||
|
@ -186,12 +206,14 @@ func TestManagerUpdate(t *testing.T) {
|
|||
{
|
||||
File: "config/testdata/rules0-good.rules",
|
||||
Name: "groupGorSingleAlert",
|
||||
Type: datasource.NewPrometheusType(),
|
||||
Interval: defaultEvalInterval,
|
||||
Rules: []Rule{VMRows},
|
||||
},
|
||||
{
|
||||
File: "config/testdata/rules0-good.rules",
|
||||
Interval: defaultEvalInterval,
|
||||
Type: datasource.NewPrometheusType(),
|
||||
Name: "TestGroup", Rules: []Rule{
|
||||
Conns,
|
||||
ExampleAlertAlwaysFiring,
|
||||
|
@ -199,6 +221,23 @@ func TestManagerUpdate(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "update prometheus to graphite type",
|
||||
initPath: "config/testdata/dir/rules-update0-good.rules",
|
||||
updatePath: "config/testdata/dir/rules-update1-good.rules",
|
||||
want: []*Group{
|
||||
{
|
||||
File: "config/testdata/dir/rules-update1-good.rules",
|
||||
Interval: defaultEvalInterval,
|
||||
Type: datasource.NewGraphiteType(),
|
||||
Name: "TestUpdateGroup",
|
||||
Rules: []Rule{
|
||||
ExampleAlertGraphite2,
|
||||
ExampleAlertGraphite,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
|
|
|
@ -18,6 +18,7 @@ import (
|
|||
// to evaluate configured Expression and
|
||||
// return TimeSeries as result.
|
||||
type RecordingRule struct {
|
||||
Type datasource.Type
|
||||
RuleID uint64
|
||||
Name string
|
||||
Expr string
|
||||
|
@ -53,6 +54,7 @@ func (rr *RecordingRule) ID() uint64 {
|
|||
|
||||
func newRecordingRule(group *Group, cfg config.Rule) *RecordingRule {
|
||||
rr := &RecordingRule{
|
||||
Type: cfg.Type,
|
||||
RuleID: cfg.ID,
|
||||
Name: cfg.Record,
|
||||
Expr: cfg.Expr,
|
||||
|
@ -60,6 +62,7 @@ func newRecordingRule(group *Group, cfg config.Rule) *RecordingRule {
|
|||
GroupID: group.ID(),
|
||||
metrics: &recordingRuleMetrics{},
|
||||
}
|
||||
|
||||
labels := fmt.Sprintf(`recording=%q, group=%q, id="%d"`, rr.Name, group.Name, rr.ID())
|
||||
rr.metrics.errors = getOrCreateGauge(fmt.Sprintf(`vmalert_recording_rules_error{%s}`, labels),
|
||||
func() float64 {
|
||||
|
@ -84,8 +87,7 @@ func (rr *RecordingRule) Exec(ctx context.Context, q datasource.Querier, series
|
|||
return nil, nil
|
||||
}
|
||||
|
||||
qMetrics, err := q.Query(ctx, rr.Expr)
|
||||
|
||||
qMetrics, err := q.Query(ctx, rr.Expr, rr.Type)
|
||||
rr.mu.Lock()
|
||||
defer rr.mu.Unlock()
|
||||
|
||||
|
@ -162,6 +164,7 @@ func (rr *RecordingRule) RuleAPI() APIRecordingRule {
|
|||
ID: fmt.Sprintf("%d", rr.ID()),
|
||||
GroupID: fmt.Sprintf("%d", rr.GroupID),
|
||||
Name: rr.Name,
|
||||
Type: rr.Type.String(),
|
||||
Expression: rr.Expr,
|
||||
LastError: lastErr,
|
||||
LastExec: rr.lastExecTime,
|
||||
|
|
|
@ -21,6 +21,7 @@ type APIAlert struct {
|
|||
// APIGroup represents Group for WEB view
|
||||
type APIGroup struct {
|
||||
Name string `json:"name"`
|
||||
Type string `json:"type"`
|
||||
ID string `json:"id"`
|
||||
File string `json:"file"`
|
||||
Interval string `json:"interval"`
|
||||
|
@ -33,6 +34,7 @@ type APIGroup struct {
|
|||
type APIAlertingRule struct {
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Type string `json:"type"`
|
||||
GroupID string `json:"group_id"`
|
||||
Expression string `json:"expression"`
|
||||
For string `json:"for"`
|
||||
|
@ -46,6 +48,7 @@ type APIAlertingRule struct {
|
|||
type APIRecordingRule struct {
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Type string `json:"type"`
|
||||
GroupID string `json:"group_id"`
|
||||
Expression string `json:"expression"`
|
||||
LastError string `json:"last_error"`
|
||||
|
|
|
@ -5,7 +5,7 @@ It reads username and password from [Basic Auth headers](https://en.wikipedia.or
|
|||
matches them against configs pointed by `-auth.config` command-line flag and proxies incoming HTTP requests to the configured per-user `url_prefix` on successful match.
|
||||
|
||||
|
||||
### Quick start
|
||||
## Quick start
|
||||
|
||||
Just download `vmutils-*` archive from [releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases), unpack it
|
||||
and pass the following flag to `vmauth` binary in order to start authorizing and routing requests:
|
||||
|
@ -26,7 +26,7 @@ Pass `-help` to `vmauth` in order to see all the supported command-line flags wi
|
|||
Feel free [contacting us](mailto:info@victoriametrics.com) if you need customized auth proxy for VictoriaMetrics with the support of LDAP, SSO, RBAC, SAML, accounting, limits, etc.
|
||||
|
||||
|
||||
### Auth config
|
||||
## Auth config
|
||||
|
||||
Auth config is represented in the following simple `yml` format:
|
||||
|
||||
|
@ -68,7 +68,7 @@ The config may contain `%{ENV_VAR}` placeholders, which are substituted by the c
|
|||
This may be useful for passing secrets to the config.
|
||||
|
||||
|
||||
### Security
|
||||
## Security
|
||||
|
||||
Do not transfer Basic Auth headers in plaintext over untrusted networks. Enable https. This can be done by passing the following `-tls*` command-line flags to `vmauth`:
|
||||
|
||||
|
@ -84,30 +84,30 @@ Do not transfer Basic Auth headers in plaintext over untrusted networks. Enable
|
|||
Alternatively, [https termination proxy](https://en.wikipedia.org/wiki/TLS_termination_proxy) may be put in front of `vmauth`.
|
||||
|
||||
|
||||
### Monitoring
|
||||
## Monitoring
|
||||
|
||||
`vmauth` exports various metrics in Prometheus exposition format at `http://vmauth-host:8427/metrics` page. It is recommended setting up regular scraping of this page
|
||||
either via [vmagent](https://victoriametrics.github.io/vmagent.html) or via Prometheus, so the exported metrics could be analyzed later.
|
||||
|
||||
|
||||
### How to build from sources
|
||||
## How to build from sources
|
||||
|
||||
It is recommended using [binary releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) - `vmauth` is located in `vmutils-*` archives there.
|
||||
|
||||
|
||||
#### Development build
|
||||
### Development build
|
||||
|
||||
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.13.
|
||||
2. Run `make vmauth` from the root folder of the repository.
|
||||
It builds `vmauth` binary and puts it into the `bin` folder.
|
||||
|
||||
#### Production build
|
||||
### Production build
|
||||
|
||||
1. [Install docker](https://docs.docker.com/install/).
|
||||
2. Run `make vmauth-prod` from the root folder of the repository.
|
||||
It builds `vmauth-prod` binary and puts it into the `bin` folder.
|
||||
|
||||
#### Building docker images
|
||||
### Building docker images
|
||||
|
||||
Run `make package-vmauth`. It builds `victoriametrics/vmauth:<PKG_TAG>` docker image locally.
|
||||
`<PKG_TAG>` is auto-generated image tag, which depends on source code in the repository.
|
||||
|
@ -121,7 +121,7 @@ ROOT_IMAGE=scratch make package-vmauth
|
|||
```
|
||||
|
||||
|
||||
### Profiling
|
||||
## Profiling
|
||||
|
||||
`vmauth` provides handlers for collecting the following [Go profiles](https://blog.golang.org/profiling-go-programs):
|
||||
|
||||
|
@ -142,7 +142,7 @@ The command for collecting CPU profile waits for 30 seconds before returning.
|
|||
The collected profiles may be analyzed with [go tool pprof](https://github.com/google/pprof).
|
||||
|
||||
|
||||
### Advanced usage
|
||||
## Advanced usage
|
||||
|
||||
Pass `-help` command-line arg to `vmauth` in order to see all the configuration options:
|
||||
|
||||
|
|
|
@ -23,9 +23,9 @@ See also [vmbackuper](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/
|
|||
creation of hourly, daily, weekly and monthly backups.
|
||||
|
||||
|
||||
### Use cases
|
||||
## Use cases
|
||||
|
||||
#### Regular backups
|
||||
### Regular backups
|
||||
|
||||
Regular backup can be performed with the following command:
|
||||
|
||||
|
@ -40,7 +40,7 @@ vmbackup -storageDataPath=</path/to/victoria-metrics-data> -snapshotName=<local-
|
|||
* `<path/to/new/backup>` is the destination path where new backup will be placed.
|
||||
|
||||
|
||||
#### Regular backups with server-side copy from existing backup
|
||||
### Regular backups with server-side copy from existing backup
|
||||
|
||||
If the destination GCS bucket already contains the previous backup at `-origin` path, then new backup can be sped up
|
||||
with the following command:
|
||||
|
@ -52,7 +52,7 @@ vmbackup -storageDataPath=</path/to/victoria-metrics-data> -snapshotName=<local-
|
|||
It saves time and network bandwidth costs by performing server-side copy for the shared data from the `-origin` to `-dst`.
|
||||
|
||||
|
||||
#### Incremental backups
|
||||
### Incremental backups
|
||||
|
||||
Incremental backups performed if `-dst` points to an already existing backup. In this case only new data uploaded to remote storage.
|
||||
It saves time and network bandwidth costs when working with big backups:
|
||||
|
@ -62,7 +62,7 @@ vmbackup -storageDataPath=</path/to/victoria-metrics-data> -snapshotName=<local-
|
|||
```
|
||||
|
||||
|
||||
#### Smart backups
|
||||
### Smart backups
|
||||
|
||||
Smart backups mean storing full daily backups into `YYYYMMDD` folders and creating incremental hourly backup into `latest` folder:
|
||||
|
||||
|
@ -92,7 +92,7 @@ Do not forget removing old snapshots and backups when they are no longer needed
|
|||
See also [vmbackuper tool](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/466) for automating smart backups.
|
||||
|
||||
|
||||
### How does it work?
|
||||
## How does it work?
|
||||
|
||||
The backup algorithm is the following:
|
||||
|
||||
|
@ -118,7 +118,7 @@ See [this article](https://medium.com/@valyala/speeding-up-backups-for-big-time-
|
|||
`vmbackup` can work improperly or slowly when these properties are violated.
|
||||
|
||||
|
||||
### Troubleshooting
|
||||
## Troubleshooting
|
||||
|
||||
* If the backup is slow, then try setting higher value for `-concurrency` flag. This will increase the number of concurrent workers that upload data to backup storage.
|
||||
* If `vmbackup` eats all the network bandwidth, then set `-maxBytesPerSecond` to the desired value.
|
||||
|
@ -127,7 +127,7 @@ See [this article](https://medium.com/@valyala/speeding-up-backups-for-big-time-
|
|||
at [cluster VictoriaMetrics](https://victoriametrics.github.io/Cluster-VictoriaMetrics.html) and vice versa.
|
||||
|
||||
|
||||
### Advanced usage
|
||||
## Advanced usage
|
||||
|
||||
|
||||
* Obtaining credentials from a file.
|
||||
|
@ -222,24 +222,24 @@ See [this article](https://medium.com/@valyala/speeding-up-backups-for-big-time-
|
|||
```
|
||||
|
||||
|
||||
### How to build from sources
|
||||
## How to build from sources
|
||||
|
||||
It is recommended using [binary releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) - see `vmutils-*` archives there.
|
||||
|
||||
|
||||
#### Development build
|
||||
### Development build
|
||||
|
||||
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.13.
|
||||
2. Run `make vmbackup` from the root folder of the repository.
|
||||
It builds `vmbackup` binary and puts it into the `bin` folder.
|
||||
|
||||
#### Production build
|
||||
### Production build
|
||||
|
||||
1. [Install docker](https://docs.docker.com/install/).
|
||||
2. Run `make vmbackup-prod` from the root folder of the repository.
|
||||
It builds `vmbackup-prod` binary and puts it into the `bin` folder.
|
||||
|
||||
#### Building docker images
|
||||
### Building docker images
|
||||
|
||||
Run `make package-vmbackup`. It builds `victoriametrics/vmbackup:<PKG_TAG>` docker image locally.
|
||||
`<PKG_TAG>` is auto-generated image tag, which depends on source code in the repository.
|
||||
|
|
73
app/vmctl/Makefile
Normal file
73
app/vmctl/Makefile
Normal file
|
@ -0,0 +1,73 @@
|
|||
# All these commands must run from repository root.
|
||||
|
||||
vmctl:
|
||||
APP_NAME=vmctl $(MAKE) app-local
|
||||
|
||||
vmctl-race:
|
||||
APP_NAME=vmctl RACE=-race $(MAKE) app-local
|
||||
|
||||
vmctl-prod:
|
||||
APP_NAME=vmctl $(MAKE) app-via-docker
|
||||
|
||||
vmctl-pure-prod:
|
||||
APP_NAME=vmctl $(MAKE) app-via-docker-pure
|
||||
|
||||
vmctl-amd64-prod:
|
||||
APP_NAME=vmctl $(MAKE) app-via-docker-amd64
|
||||
|
||||
vmctl-arm-prod:
|
||||
APP_NAME=vmctl $(MAKE) app-via-docker-arm
|
||||
|
||||
vmctl-arm64-prod:
|
||||
APP_NAME=vmctl $(MAKE) app-via-docker-arm64
|
||||
|
||||
vmctl-ppc64le-prod:
|
||||
APP_NAME=vmctl $(MAKE) app-via-docker-ppc64le
|
||||
|
||||
vmctl-386-prod:
|
||||
APP_NAME=vmctl $(MAKE) app-via-docker-386
|
||||
|
||||
package-vmctl:
|
||||
APP_NAME=vmctl $(MAKE) package-via-docker
|
||||
|
||||
package-vmctl-pure:
|
||||
APP_NAME=vmctl $(MAKE) package-via-docker-pure
|
||||
|
||||
package-vmctl-amd64:
|
||||
APP_NAME=vmctl $(MAKE) package-via-docker-amd64
|
||||
|
||||
package-vmctl-arm:
|
||||
APP_NAME=vmctl $(MAKE) package-via-docker-arm
|
||||
|
||||
package-vmctl-arm64:
|
||||
APP_NAME=vmctl $(MAKE) package-via-docker-arm64
|
||||
|
||||
package-vmctl-ppc64le:
|
||||
APP_NAME=vmctl $(MAKE) package-via-docker-ppc64le
|
||||
|
||||
package-vmctl-386:
|
||||
APP_NAME=vmctl $(MAKE) package-via-docker-386
|
||||
|
||||
publish-vmctl:
|
||||
APP_NAME=vmctl $(MAKE) publish-via-docker
|
||||
|
||||
vmctl-amd64:
|
||||
CGO_ENABLED=1 GOARCH=amd64 $(MAKE) vmctl-local-with-goarch
|
||||
|
||||
vmctl-arm:
|
||||
CGO_ENABLED=0 GOARCH=arm $(MAKE) vmctl-local-with-goarch
|
||||
|
||||
vmctl-arm64:
|
||||
CGO_ENABLED=0 GOARCH=arm64 $(MAKE) vmctl-local-with-goarch
|
||||
|
||||
vmctl-ppc64le:
|
||||
CGO_ENABLED=0 GOARCH=ppc64le $(MAKE) vmctl-local-with-goarch
|
||||
|
||||
vmctl-386:
|
||||
CGO_ENABLED=0 GOARCH=386 $(MAKE) vmctl-local-with-goarch
|
||||
|
||||
vmctl-local-with-goarch:
|
||||
APP_NAME=vmctl $(MAKE) app-local-with-goarch
|
||||
|
||||
vmctl-pure:
|
||||
APP_NAME=vmctl $(MAKE) app-local-pure
|
473
app/vmctl/README.md
Normal file
473
app/vmctl/README.md
Normal file
|
@ -0,0 +1,473 @@
|
|||
# vmctl - Victoria metrics command-line tool
|
||||
|
||||
Features:
|
||||
- [x] Prometheus: migrate data from Prometheus to VictoriaMetrics using snapshot API
|
||||
- [x] Thanos: migrate data from Thanos to VictoriaMetrics
|
||||
- [ ] ~~Prometheus: migrate data from Prometheus to VictoriaMetrics by query~~(discarded)
|
||||
- [x] InfluxDB: migrate data from InfluxDB to VictoriaMetrics
|
||||
- [ ] Storage Management: data re-balancing between nodes
|
||||
|
||||
# Table of contents
|
||||
|
||||
* [Articles](#articles)
|
||||
* [How to build](#how-to-build)
|
||||
* [Migrating data from InfluxDB 1.x](#migrating-data-from-influxdb-1x)
|
||||
* [Data mapping](#data-mapping)
|
||||
* [Configuration](#configuration)
|
||||
* [Filtering](#filtering)
|
||||
* [Migrating data from InfluxDB 2.x](#migrating-data-from-influxdb-2x)
|
||||
* [Migrating data from Prometheus](#migrating-data-from-prometheus)
|
||||
* [Data mapping](#data-mapping-1)
|
||||
* [Configuration](#configuration-1)
|
||||
* [Filtering](#filtering-1)
|
||||
* [Migrating data from Thanos](#migrating-data-from-thanos)
|
||||
* [Current data](#current-data)
|
||||
* [Historical data](#historical-data)
|
||||
* [Migrating data from VictoriaMetrics](#migrating-data-from-victoriametrics)
|
||||
* [Native protocol](#native-protocol)
|
||||
* [Tuning](#tuning)
|
||||
* [Influx mode](#influx-mode)
|
||||
* [Prometheus mode](#prometheus-mode)
|
||||
* [VictoriaMetrics importer](#victoriametrics-importer)
|
||||
* [Importer stats](#importer-stats)
|
||||
* [Significant figures](#significant-figures)
|
||||
* [Adding extra labels](#adding-extra-labels)
|
||||
|
||||
|
||||
## Articles
|
||||
|
||||
* [How to migrate data from Prometheus](https://medium.com/@romanhavronenko/victoriametrics-how-to-migrate-data-from-prometheus-d44a6728f043)
|
||||
* [How to migrate data from Prometheus. Filtering and modifying time series](https://medium.com/@romanhavronenko/victoriametrics-how-to-migrate-data-from-prometheus-filtering-and-modifying-time-series-6d40cea4bf21)
|
||||
|
||||
## How to build
|
||||
|
||||
It is recommended using [binary releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) - `vmctl` is located in `vmutils-*` archives there.
|
||||
|
||||
|
||||
### Development build
|
||||
|
||||
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.13.
|
||||
2. Run `make vmctl` from the root folder of the repository.
|
||||
It builds `vmctl` binary and puts it into the `bin` folder.
|
||||
|
||||
### Production build
|
||||
|
||||
1. [Install docker](https://docs.docker.com/install/).
|
||||
2. Run `make vmctl-prod` from the root folder of the repository.
|
||||
It builds `vmctl-prod` binary and puts it into the `bin` folder.
|
||||
|
||||
### Building docker images
|
||||
|
||||
Run `make package-vmctl`. It builds `victoriametrics/vmctl:<PKG_TAG>` docker image locally.
|
||||
`<PKG_TAG>` is auto-generated image tag, which depends on source code in the repository.
|
||||
The `<PKG_TAG>` may be manually set via `PKG_TAG=foobar make package-vmctl`.
|
||||
|
||||
The base docker image is [alpine](https://hub.docker.com/_/alpine) but it is possible to use any other base image
|
||||
by setting it via `<ROOT_IMAGE>` environment variable. For example, the following command builds the image on top of [scratch](https://hub.docker.com/_/scratch) image:
|
||||
|
||||
```bash
|
||||
ROOT_IMAGE=scratch make package-vmctl
|
||||
```
|
||||
|
||||
### ARM build
|
||||
|
||||
ARM build may run on Raspberry Pi or on [energy-efficient ARM servers](https://blog.cloudflare.com/arm-takes-wing/).
|
||||
|
||||
#### Development ARM build
|
||||
|
||||
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.13.
|
||||
2. Run `make vmctl-arm` or `make vmctl-arm64` from the root folder of the repository.
|
||||
It builds `vmctl-arm` or `vmctl-arm64` binary respectively and puts it into the `bin` folder.
|
||||
|
||||
#### Production ARM build
|
||||
|
||||
1. [Install docker](https://docs.docker.com/install/).
|
||||
2. Run `make vmctl-arm-prod` or `make vmctl-arm64-prod` from the root folder of the repository.
|
||||
It builds `vmctl-arm-prod` or `vmctl-arm64-prod` binary respectively and puts it into the `bin` folder.
|
||||
|
||||
|
||||
## Migrating data from InfluxDB (1.x)
|
||||
|
||||
`vmctl` supports the `influx` mode to migrate data from InfluxDB to VictoriaMetrics time-series database.
|
||||
|
||||
See `./vmctl influx --help` for details and full list of flags.
|
||||
|
||||
To use migration tool please specify the InfluxDB address `--influx-addr`, the database `--influx-database` and VictoriaMetrics address `--vm-addr`.
|
||||
Flag `--vm-addr` for single-node VM is usually equal to `--httpListenAddr`, and for cluster version
|
||||
is equal to `--httpListenAddr` flag of VMInsert component. Please note, that vmctl performs initial readiness check for the given address
|
||||
by checking `/health` endpoint. For cluster version it is additionally required to specify the `--vm-account-id` flag.
|
||||
See more details for cluster version [here](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/cluster).
|
||||
|
||||
As soon as required flags are provided and all endpoints are accessible, `vmctl` will start the InfluxDB scheme exploration.
|
||||
Basically, it just fetches all fields and timeseries from the provided database and builds up registry of all available timeseries.
|
||||
Then `vmctl` sends fetch requests for each timeseries to InfluxDB one by one and pass results to VM importer.
|
||||
VM importer then accumulates received samples in batches and sends import requests to VM.
|
||||
|
||||
The importing process example for local installation of InfluxDB(`http://localhost:8086`)
|
||||
and single-node VictoriaMetrics(`http://localhost:8428`):
|
||||
```
|
||||
./vmctl influx --influx-database benchmark
|
||||
InfluxDB import mode
|
||||
2020/01/18 20:47:11 Exploring scheme for database "benchmark"
|
||||
2020/01/18 20:47:11 fetching fields: command: "show field keys"; database: "benchmark"; retention: "autogen"
|
||||
2020/01/18 20:47:11 found 10 fields
|
||||
2020/01/18 20:47:11 fetching series: command: "show series "; database: "benchmark"; retention: "autogen"
|
||||
Found 40000 timeseries to import. Continue? [Y/n] y
|
||||
40000 / 40000 [-----------------------------------------------------------------------------------------------------------------------------------------------] 100.00% 21 p/s
|
||||
2020/01/18 21:19:00 Import finished!
|
||||
2020/01/18 21:19:00 VictoriaMetrics importer stats:
|
||||
idle duration: 13m51.461434876s;
|
||||
time spent while importing: 17m56.923899847s;
|
||||
total samples: 345600000;
|
||||
samples/s: 320914.04;
|
||||
total bytes: 5.9 GB;
|
||||
bytes/s: 5.4 MB;
|
||||
import requests: 40001;
|
||||
2020/01/18 21:19:00 Total time: 31m48.467044016s
|
||||
```
|
||||
|
||||
### Data mapping
|
||||
|
||||
Vmctl maps Influx data the same way as VictoriaMetrics does by using the following rules:
|
||||
|
||||
* `influx-database` arg is mapped into `db` label value unless `db` tag exists in the Influx line.
|
||||
* Field names are mapped to time series names prefixed with {measurement}{separator} value,
|
||||
where {separator} equals to _ by default.
|
||||
It can be changed with `--influx-measurement-field-separator` command-line flag.
|
||||
* Field values are mapped to time series values.
|
||||
* Tags are mapped to Prometheus labels format as-is.
|
||||
|
||||
For example, the following Influx line:
|
||||
```
|
||||
foo,tag1=value1,tag2=value2 field1=12,field2=40
|
||||
```
|
||||
|
||||
is converted into the following Prometheus format data points:
|
||||
```
|
||||
foo_field1{tag1="value1", tag2="value2"} 12
|
||||
foo_field2{tag1="value1", tag2="value2"} 40
|
||||
```
|
||||
|
||||
### Configuration
|
||||
|
||||
The configuration flags should contain self-explanatory descriptions.
|
||||
|
||||
### Filtering
|
||||
|
||||
The filtering consists of two parts: timeseries and time.
|
||||
The first step of application is to select all available timeseries
|
||||
for given database and retention. User may specify additional filtering
|
||||
condition via `--influx-filter-series` flag. For example:
|
||||
```
|
||||
./vmctl influx --influx-database benchmark \
|
||||
--influx-filter-series "on benchmark from cpu where hostname='host_1703'"
|
||||
InfluxDB import mode
|
||||
2020/01/26 14:23:29 Exploring scheme for database "benchmark"
|
||||
2020/01/26 14:23:29 fetching fields: command: "show field keys"; database: "benchmark"; retention: "autogen"
|
||||
2020/01/26 14:23:29 found 12 fields
|
||||
2020/01/26 14:23:29 fetching series: command: "show series on benchmark from cpu where hostname='host_1703'"; database: "benchmark"; retention: "autogen"
|
||||
Found 10 timeseries to import. Continue? [Y/n]
|
||||
```
|
||||
The timeseries select query would be following:
|
||||
`fetching series: command: "show series on benchmark from cpu where hostname='host_1703'"; database: "benchmark"; retention: "autogen"`
|
||||
|
||||
The second step of filtering is a time filter and it applies when fetching the datapoints from Influx.
|
||||
Time filtering may be configured with two flags:
|
||||
* --influx-filter-time-start
|
||||
* --influx-filter-time-end
|
||||
Here's an example of importing timeseries for one day only:
|
||||
`./vmctl influx --influx-database benchmark --influx-filter-series "where hostname='host_1703'" --influx-filter-time-start "2020-01-01T10:07:00Z" --influx-filter-time-end "2020-01-01T15:07:00Z"`
|
||||
|
||||
Please see more about time filtering [here](https://docs.influxdata.com/influxdb/v1.7/query_language/schema_exploration#filter-meta-queries-by-time).
|
||||
|
||||
## Migrating data from InfluxDB (2.x)
|
||||
|
||||
Migrating data from InfluxDB v2.x is not supported yet ([#32](https://github.com/VictoriaMetrics/vmctl/issues/32)).
|
||||
You may find useful a 3rd party solution for this - https://github.com/jonppe/influx_to_victoriametrics.
|
||||
|
||||
|
||||
## Migrating data from Prometheus
|
||||
|
||||
`vmctl` supports the `prometheus` mode for migrating data from Prometheus to VictoriaMetrics time-series database.
|
||||
Migration is based on reading Prometheus snapshot, which is basically a hard-link to Prometheus data files.
|
||||
|
||||
See `./vmctl prometheus --help` for details and full list of flags.
|
||||
|
||||
To use migration tool please specify the path to Prometheus snapshot `--prom-snapshot` and VictoriaMetrics address `--vm-addr`.
|
||||
More about Prometheus snapshots may be found [here](https://www.robustperception.io/taking-snapshots-of-prometheus-data).
|
||||
Flag `--vm-addr` for single-node VM is usually equal to `--httpListenAddr`, and for cluster version
|
||||
is equal to `--httpListenAddr` flag of VMInsert component. Please note, that vmctl performs initial readiness check for the given address
|
||||
by checking `/health` endpoint. For cluster version it is additionally required to specify the `--vm-account-id` flag.
|
||||
See more details for cluster version [here](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/cluster).
|
||||
|
||||
As soon as required flags are provided and all endpoints are accessible, `vmctl` will start the Prometheus snapshot exploration.
|
||||
Basically, it just fetches all available blocks in provided snapshot and read the metadata. It also does initial filtering by time
|
||||
if flags `--prom-filter-time-start` or `--prom-filter-time-end` were set. The exploration procedure prints some stats from read blocks.
|
||||
Please note that stats are not taking into account timeseries or samples filtering. This will be done during importing process.
|
||||
|
||||
The importing process takes the snapshot blocks revealed from Explore procedure and processes them one by one
|
||||
accumulating timeseries and samples. Please note, that `vmctl` relies on responses from Influx on this stage,
|
||||
so ensure that Explore queries are executed without errors or limits. Please see this
|
||||
[issue](https://github.com/VictoriaMetrics/vmctl/issues/30) for details.
|
||||
The data processed in chunks and then sent to VM.
|
||||
|
||||
The importing process example for local installation of Prometheus
|
||||
and single-node VictoriaMetrics(`http://localhost:8428`):
|
||||
```
|
||||
./vmctl prometheus --prom-snapshot=/path/to/snapshot \
|
||||
--vm-concurrency=1 \
|
||||
--vm-batch-size=200000 \
|
||||
--prom-concurrency=3
|
||||
Prometheus import mode
|
||||
Prometheus snapshot stats:
|
||||
blocks found: 14;
|
||||
blocks skipped: 0;
|
||||
min time: 1581288163058 (2020-02-09T22:42:43Z);
|
||||
max time: 1582409128139 (2020-02-22T22:05:28Z);
|
||||
samples: 32549106;
|
||||
series: 27289.
|
||||
Found 14 blocks to import. Continue? [Y/n] y
|
||||
14 / 14 [-------------------------------------------------------------------------------------------] 100.00% 0 p/s
|
||||
2020/02/23 15:50:03 Import finished!
|
||||
2020/02/23 15:50:03 VictoriaMetrics importer stats:
|
||||
idle duration: 6.152953029s;
|
||||
time spent while importing: 44.908522491s;
|
||||
total samples: 32549106;
|
||||
samples/s: 724786.84;
|
||||
total bytes: 669.1 MB;
|
||||
bytes/s: 14.9 MB;
|
||||
import requests: 323;
|
||||
import requests retries: 0;
|
||||
2020/02/23 15:50:03 Total time: 51.077451066s
|
||||
```
|
||||
|
||||
### Data mapping
|
||||
|
||||
VictoriaMetrics has very similar data model to Prometheus and supports [RemoteWrite integration](https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage).
|
||||
So no data changes will be applied.
|
||||
|
||||
### Configuration
|
||||
|
||||
The configuration flags should contain self-explanatory descriptions.
|
||||
|
||||
### Filtering
|
||||
|
||||
The filtering consists of three parts: by timeseries and time.
|
||||
|
||||
Filtering by time may be configured via flags `--prom-filter-time-start` and `--prom-filter-time-end`
|
||||
in in RFC3339 format. This filter applied twice: to drop blocks out of range and to filter timeseries in blocks with
|
||||
overlapping time range.
|
||||
|
||||
Example of applying time filter:
|
||||
```
|
||||
./vmctl prometheus --prom-snapshot=/path/to/snapshot \
|
||||
--prom-filter-time-start=2020-02-07T00:07:01Z \
|
||||
--prom-filter-time-end=2020-02-11T00:07:01Z
|
||||
Prometheus import mode
|
||||
Prometheus snapshot stats:
|
||||
blocks found: 2;
|
||||
blocks skipped: 12;
|
||||
min time: 1581288163058 (2020-02-09T22:42:43Z);
|
||||
max time: 1581328800000 (2020-02-10T10:00:00Z);
|
||||
samples: 1657698;
|
||||
series: 3930.
|
||||
Found 2 blocks to import. Continue? [Y/n] y
|
||||
```
|
||||
|
||||
Please notice, that total amount of blocks in provided snapshot is 14, but only 2 of them were in provided
|
||||
time range. So other 12 blocks were marked as `skipped`. The amount of samples and series is not taken into account,
|
||||
since this is heavy operation and will be done during import process.
|
||||
|
||||
|
||||
Filtering by timeseries is configured with following flags:
|
||||
* `--prom-filter-label` - the label name, e.g. `__name__` or `instance`;
|
||||
* `--prom-filter-label-value` - the regular expression to filter the label value. By default matches all `.*`
|
||||
|
||||
For example:
|
||||
```
|
||||
./vmctl prometheus --prom-snapshot=/path/to/snapshot \
|
||||
--prom-filter-label="__name__" \
|
||||
--prom-filter-label-value="promhttp.*" \
|
||||
--prom-filter-time-start=2020-02-07T00:07:01Z \
|
||||
--prom-filter-time-end=2020-02-11T00:07:01Z
|
||||
Prometheus import mode
|
||||
Prometheus snapshot stats:
|
||||
blocks found: 2;
|
||||
blocks skipped: 12;
|
||||
min time: 1581288163058 (2020-02-09T22:42:43Z);
|
||||
max time: 1581328800000 (2020-02-10T10:00:00Z);
|
||||
samples: 1657698;
|
||||
series: 3930.
|
||||
Found 2 blocks to import. Continue? [Y/n] y
|
||||
14 / 14 [------------------------------------------------------------------------------------------------------------------------------------------------------] 100.00% ? p/s
|
||||
2020/02/23 15:51:07 Import finished!
|
||||
2020/02/23 15:51:07 VictoriaMetrics importer stats:
|
||||
idle duration: 0s;
|
||||
time spent while importing: 37.415461ms;
|
||||
total samples: 10128;
|
||||
samples/s: 270690.24;
|
||||
total bytes: 195.2 kB;
|
||||
bytes/s: 5.2 MB;
|
||||
import requests: 2;
|
||||
import requests retries: 0;
|
||||
2020/02/23 15:51:07 Total time: 7.153158218s
|
||||
```
|
||||
|
||||
## Migrating data from Thanos
|
||||
|
||||
Thanos uses the same storage engine as Prometheus and the data layout on-disk should be the same. That means
|
||||
`vmctl` in mode `prometheus` may be used for Thanos historical data migration as well.
|
||||
These instructions may vary based on the details of your Thanos configuration.
|
||||
Please read carefully and verify as you go. We assume you're using Thanos Sidecar on your Prometheus pods,
|
||||
and that you have a separate Thanos Store installation.
|
||||
|
||||
### Current data
|
||||
|
||||
1. For now, keep your Thanos Sidecar and Thanos-related Prometheus configuration, but add this to also stream
|
||||
metrics to VictoriaMetrics:
|
||||
```
|
||||
remote_write:
|
||||
- url: http://victoria-metrics:8428/api/v1/write
|
||||
```
|
||||
2. Make sure VM is running, of course. Now check the logs to make sure that Prometheus is sending and VM is receiving.
|
||||
In Prometheus, make sure there are no errors. On the VM side, you should see messages like this:
|
||||
```
|
||||
2020-04-27T18:38:46.474Z info VictoriaMetrics/lib/storage/partition.go:207 creating a partition "2020_04" with smallPartsPath="/victoria-metrics-data/data/small/2020_04", bigPartsPath="/victoria-metrics-data/data/big/2020_04"
|
||||
2020-04-27T18:38:46.506Z info VictoriaMetrics/lib/storage/partition.go:222 partition "2020_04" has been created
|
||||
```
|
||||
3. Now just wait. Within two hours, Prometheus should finish its current data file and hand it off to Thanos Store for long term
|
||||
storage.
|
||||
|
||||
### Historical data
|
||||
|
||||
Let's assume your data is stored on S3 served by minio. You first need to copy that out to a local filesystem,
|
||||
then import it into VM using `vmctl` in `prometheus` mode.
|
||||
1. Copy data from minio.
|
||||
1. Run the `minio/mc` Docker container.
|
||||
1. `mc config host add minio http://minio:9000 accessKey secretKey`, substituting appropriate values for the last 3 items.
|
||||
1. `mc cp -r minio/prometheus thanos-data`
|
||||
1. Import using `vmctl`.
|
||||
1. Follow the [instructions](#how-to-build) to compile `vmctl` on your machine.
|
||||
1. Use [prometheus](#migrating-data-from-prometheus) mode to import data:
|
||||
```
|
||||
vmctl prometheus --prom-snapshot thanos-data --vm-addr http://victoria-metrics:8428
|
||||
```
|
||||
|
||||
## Migrating data from VictoriaMetrics
|
||||
|
||||
### Native protocol
|
||||
|
||||
The [native binary protocol](https://victoriametrics.github.io/#how-to-export-data-in-native-format)
|
||||
was introduced in [1.42.0 release](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.42.0)
|
||||
and provides the most efficient way to migrate data between VM instances: single to single, cluster to cluster,
|
||||
single to cluster and vice versa. Please note that both instances (source and destination) should be of v1.42.0
|
||||
or higher.
|
||||
|
||||
See `./vmctl vm-native --help` for details and full list of flags.
|
||||
|
||||
In this mode `vmctl` acts as a proxy between two VM instances, where time series filtering is done by "source" (`src`)
|
||||
and processing is done by "destination" (`dst`). Because of that, `vmctl` doesn't actually know how much data will be
|
||||
processed and can't show the progress bar. It will show the current processing speed and total number of processed bytes:
|
||||
|
||||
```
|
||||
./vmctl vm-native --vm-native-src-addr=http://localhost:8528 \
|
||||
--vm-native-dst-addr=http://localhost:8428 \
|
||||
--vm-native-filter-match='{job="vmagent"}' \
|
||||
--vm-native-filter-time-start='2020-01-01T20:07:00Z'
|
||||
VictoriaMetrics Native import mode
|
||||
Initing export pipe from "http://localhost:8528" with filters:
|
||||
filter: match[]={job="vmagent"}
|
||||
Initing import process to "http://localhost:8428":
|
||||
Total: 336.75 KiB ↖ Speed: 454.46 KiB p/s
|
||||
2020/10/13 17:04:59 Total time: 952.143376ms
|
||||
```
|
||||
|
||||
Importing tips:
|
||||
1. Migrating all the metrics from one VM to another may collide with existing application metrics
|
||||
(prefixed with `vm_`) at destination and lead to confusion when using
|
||||
[official Grafana dashboards](https://grafana.com/orgs/victoriametrics/dashboards).
|
||||
To avoid such situation try to filter out VM process metrics via `--vm-native-filter-match` flag.
|
||||
2. Migration is a backfilling process, so it is recommended to read
|
||||
[Backfilling tips](https://github.com/VictoriaMetrics/VictoriaMetrics#backfilling) section.
|
||||
3. `vmctl` doesn't provide relabeling or other types of labels management in this mode.
|
||||
Instead, use [relabeling in VictoriaMetrics](https://github.com/VictoriaMetrics/vmctl/issues/4#issuecomment-683424375).
|
||||
|
||||
|
||||
## Tuning
|
||||
|
||||
### Influx mode
|
||||
|
||||
The flag `--influx-concurrency` controls how many concurrent requests may be sent to InfluxDB while fetching
|
||||
timeseries. Please set it wisely to avoid InfluxDB overwhelming.
|
||||
|
||||
The flag `--influx-chunk-size` controls the max amount of datapoints to return in single chunk from fetch requests.
|
||||
Please see more details [here](https://docs.influxdata.com/influxdb/v1.7/guides/querying_data/#chunking).
|
||||
The chunk size is used to control InfluxDB memory usage, so it won't OOM on processing large timeseries with
|
||||
billions of datapoints.
|
||||
|
||||
### Prometheus mode
|
||||
|
||||
The flag `--prom-concurrency` controls how many concurrent readers will be reading the blocks in snapshot.
|
||||
Since snapshots are just files on disk it would be hard to overwhelm the system. Please go with value equal
|
||||
to number of free CPU cores.
|
||||
|
||||
### VictoriaMetrics importer
|
||||
|
||||
The flag `--vm-concurrency` controls the number of concurrent workers that process the input from InfluxDB query results.
|
||||
Please note that each import request can load up to a single vCPU core on VictoriaMetrics. So try to set it according
|
||||
to allocated CPU resources of your VictoriMetrics installation.
|
||||
|
||||
The flag `--vm-batch-size` controls max amount of samples collected before sending the import request.
|
||||
For example, if `--influx-chunk-size=500` and `--vm-batch-size=2000` then importer will process not more
|
||||
than 4 chunks before sending the request.
|
||||
|
||||
### Importer stats
|
||||
|
||||
After successful import `vmctl` prints some statistics for details.
|
||||
The important numbers to watch are following:
|
||||
- `idle duration` - shows time that importer spent while waiting for data from InfluxDB/Prometheus
|
||||
to fill up `--vm-batch-size` batch size. Value shows total duration across all workers configured
|
||||
via `--vm-concurrency`. High value may be a sign of too slow InfluxDB/Prometheus fetches or too
|
||||
high `--vm-concurrency` value. Try to improve it by increasing `--<mode>-concurrency` value or
|
||||
decreasing `--vm-concurrency` value.
|
||||
- `import requests` - shows how many import requests were issued to VM server.
|
||||
The import request is issued once the batch size(`--vm-batch-size`) is full and ready to be sent.
|
||||
Please prefer big batch sizes (50k-500k) to improve performance.
|
||||
- `import requests retries` - shows number of unsuccessful import requests. Non-zero value may be
|
||||
a sign of network issues or VM being overloaded. See the logs during import for error messages.
|
||||
|
||||
### Silent mode
|
||||
|
||||
By default `vmctl` waits confirmation from user before starting the import. If this is unwanted
|
||||
behavior and no user interaction required - pass `-s` flag to enable "silence" mode:
|
||||
```
|
||||
-s Whether to run in silent mode. If set to true no confirmation prompts will appear. (default: false)
|
||||
```
|
||||
|
||||
### Significant figures
|
||||
|
||||
`vmctl` allows to limit the number of [significant figures](https://en.wikipedia.org/wiki/Significant_figures)
|
||||
before importing. For example, the average value for response size is `102.342305` bytes and it has 9 significant figures.
|
||||
If you ask a human to pronounce this value then with high probability value will be rounded to first 4 or 5 figures
|
||||
because the rest aren't really that important to mention. In most cases, such a high precision is too much.
|
||||
Moreover, such values may be just a result of [floating point arithmetic](https://en.wikipedia.org/wiki/Floating-point_arithmetic),
|
||||
create a [false precision](https://en.wikipedia.org/wiki/False_precision) and result into bad compression ratio
|
||||
according to [information theory](https://en.wikipedia.org/wiki/Information_theory).
|
||||
|
||||
`vmctl` provides the following flags for improving data compression:
|
||||
|
||||
* `--vm-round-digits` flag for rounding processed values to the given number of decimal digits after the point.
|
||||
For example, `--vm-round-digits=2` would round `1.2345` to `1.23`. By default the rounding is disabled.
|
||||
|
||||
* `--vm-significant-figures` flag for limiting the number of significant figures in processed values. It takes no effect if set
|
||||
to 0 (by default), but set `--vm-significant-figures=5` and `102.342305` will be rounded to `102.34`.
|
||||
|
||||
The most common case for using these flags is to improve data compression for time series storing aggregation
|
||||
results such as `average`, `rate`, etc.
|
||||
|
||||
### Adding extra labels
|
||||
|
||||
`vmctl` allows to add extra labels to all imported series. It can be achived with flag `--vm-extra-label label=value`.
|
||||
If multiple labels needs to be added, set flag for each label, for example, `--vm-extra-label label1=value1 --vm-extra-label label2=value2`.
|
||||
If timeseries already have label, that must be added with `--vm-extra-label` flag, flag has priority and will override label value from timeseries.
|
||||
|
6
app/vmctl/deployment/Dockerfile
Normal file
6
app/vmctl/deployment/Dockerfile
Normal file
|
@ -0,0 +1,6 @@
|
|||
ARG base_image
|
||||
FROM $base_image
|
||||
|
||||
ENTRYPOINT ["/vmctl-prod"]
|
||||
ARG src_binary
|
||||
COPY $src_binary ./vmctl-prod
|
292
app/vmctl/flags.go
Normal file
292
app/vmctl/flags.go
Normal file
|
@ -0,0 +1,292 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
const (
|
||||
globalSilent = "s"
|
||||
)
|
||||
|
||||
var (
|
||||
globalFlags = []cli.Flag{
|
||||
&cli.BoolFlag{
|
||||
Name: globalSilent,
|
||||
Value: false,
|
||||
Usage: "Whether to run in silent mode. If set to true no confirmation prompts will appear.",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
const (
|
||||
vmAddr = "vm-addr"
|
||||
vmUser = "vm-user"
|
||||
vmPassword = "vm-password"
|
||||
vmAccountID = "vm-account-id"
|
||||
vmConcurrency = "vm-concurrency"
|
||||
vmCompress = "vm-compress"
|
||||
vmBatchSize = "vm-batch-size"
|
||||
vmSignificantFigures = "vm-significant-figures"
|
||||
vmRoundDigits = "vm-round-digits"
|
||||
vmExtraLabel = "vm-extra-label"
|
||||
)
|
||||
|
||||
var (
|
||||
vmFlags = []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: vmAddr,
|
||||
Value: "http://localhost:8428",
|
||||
Usage: "VictoriaMetrics address to perform import requests. \n" +
|
||||
"Should be the same as --httpListenAddr value for single-node version or VMInsert component. \n" +
|
||||
"Please note, that `vmctl` performs initial readiness check for the given address by checking `/health` endpoint.",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: vmUser,
|
||||
Usage: "VictoriaMetrics username for basic auth",
|
||||
EnvVars: []string{"VM_USERNAME"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: vmPassword,
|
||||
Usage: "VictoriaMetrics password for basic auth",
|
||||
EnvVars: []string{"VM_PASSWORD"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: vmAccountID,
|
||||
Usage: "AccountID is an arbitrary 32-bit integer identifying namespace for data ingestion (aka tenant). \n" +
|
||||
"It is possible to set it as accountID:projectID, where projectID is also arbitrary 32-bit integer. \n" +
|
||||
"If projectID isn't set, then it equals to 0",
|
||||
},
|
||||
&cli.UintFlag{
|
||||
Name: vmConcurrency,
|
||||
Usage: "Number of workers concurrently performing import requests to VM",
|
||||
Value: 2,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: vmCompress,
|
||||
Value: true,
|
||||
Usage: "Whether to apply gzip compression to import requests",
|
||||
},
|
||||
&cli.IntFlag{
|
||||
Name: vmBatchSize,
|
||||
Value: 200e3,
|
||||
Usage: "How many samples importer collects before sending the import request to VM",
|
||||
},
|
||||
&cli.IntFlag{
|
||||
Name: vmSignificantFigures,
|
||||
Value: 0,
|
||||
Usage: "The number of significant figures to leave in metric values before importing. " +
|
||||
"See https://en.wikipedia.org/wiki/Significant_figures. Zero value saves all the significant figures. " +
|
||||
"This option may be used for increasing on-disk compression level for the stored metrics. " +
|
||||
"See also --vm-round-digits option",
|
||||
},
|
||||
&cli.IntFlag{
|
||||
Name: vmRoundDigits,
|
||||
Value: 100,
|
||||
Usage: "Round metric values to the given number of decimal digits after the point. " +
|
||||
"This option may be used for increasing on-disk compression level for the stored metrics",
|
||||
},
|
||||
&cli.StringSliceFlag{
|
||||
Name: vmExtraLabel,
|
||||
Value: nil,
|
||||
Usage: "Extra labels, that will be added to imported timeseries. In case of collision, label value defined by flag" +
|
||||
"will have priority. Flag can be set multiple times, to add few additional labels.",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
const (
|
||||
influxAddr = "influx-addr"
|
||||
influxUser = "influx-user"
|
||||
influxPassword = "influx-password"
|
||||
influxDB = "influx-database"
|
||||
influxRetention = "influx-retention-policy"
|
||||
influxChunkSize = "influx-chunk-size"
|
||||
influxConcurrency = "influx-concurrency"
|
||||
influxFilterSeries = "influx-filter-series"
|
||||
influxFilterTimeStart = "influx-filter-time-start"
|
||||
influxFilterTimeEnd = "influx-filter-time-end"
|
||||
influxMeasurementFieldSeparator = "influx-measurement-field-separator"
|
||||
)
|
||||
|
||||
var (
|
||||
influxFlags = []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: influxAddr,
|
||||
Value: "http://localhost:8086",
|
||||
Usage: "Influx server addr",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: influxUser,
|
||||
Usage: "Influx user",
|
||||
EnvVars: []string{"INFLUX_USERNAME"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: influxPassword,
|
||||
Usage: "Influx user password",
|
||||
EnvVars: []string{"INFLUX_PASSWORD"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: influxDB,
|
||||
Usage: "Influx database",
|
||||
Required: true,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: influxRetention,
|
||||
Usage: "Influx retention policy",
|
||||
Value: "autogen",
|
||||
},
|
||||
&cli.IntFlag{
|
||||
Name: influxChunkSize,
|
||||
Usage: "The chunkSize defines max amount of series to be returned in one chunk",
|
||||
Value: 10e3,
|
||||
},
|
||||
&cli.IntFlag{
|
||||
Name: influxConcurrency,
|
||||
Usage: "Number of concurrently running fetch queries to InfluxDB",
|
||||
Value: 1,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: influxFilterSeries,
|
||||
Usage: "Influx filter expression to select series. E.g. \"from cpu where arch='x86' AND hostname='host_2753'\".\n" +
|
||||
"See for details https://docs.influxdata.com/influxdb/v1.7/query_language/schema_exploration#show-series",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: influxFilterTimeStart,
|
||||
Usage: "The time filter to select timeseries with timestamp equal or higher than provided value. E.g. '2020-01-01T20:07:00Z'",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: influxFilterTimeEnd,
|
||||
Usage: "The time filter to select timeseries with timestamp equal or lower than provided value. E.g. '2020-01-01T20:07:00Z'",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: influxMeasurementFieldSeparator,
|
||||
Usage: "The {separator} symbol used to concatenate {measurement} and {field} names into series name {measurement}{separator}{field}.",
|
||||
Value: "_",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
const (
|
||||
promSnapshot = "prom-snapshot"
|
||||
promConcurrency = "prom-concurrency"
|
||||
promFilterTimeStart = "prom-filter-time-start"
|
||||
promFilterTimeEnd = "prom-filter-time-end"
|
||||
promFilterLabel = "prom-filter-label"
|
||||
promFilterLabelValue = "prom-filter-label-value"
|
||||
)
|
||||
|
||||
var (
|
||||
promFlags = []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: promSnapshot,
|
||||
Usage: "Path to Prometheus snapshot. Pls see for details https://www.robustperception.io/taking-snapshots-of-prometheus-data",
|
||||
Required: true,
|
||||
},
|
||||
&cli.IntFlag{
|
||||
Name: promConcurrency,
|
||||
Usage: "Number of concurrently running snapshot readers",
|
||||
Value: 1,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: promFilterTimeStart,
|
||||
Usage: "The time filter in RFC3339 format to select timeseries with timestamp equal or higher than provided value. E.g. '2020-01-01T20:07:00Z'",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: promFilterTimeEnd,
|
||||
Usage: "The time filter in RFC3339 format to select timeseries with timestamp equal or lower than provided value. E.g. '2020-01-01T20:07:00Z'",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: promFilterLabel,
|
||||
Usage: "Prometheus label name to filter timeseries by. E.g. '__name__' will filter timeseries by name.",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: promFilterLabelValue,
|
||||
Usage: fmt.Sprintf("Prometheus regular expression to filter label from %q flag.", promFilterLabel),
|
||||
Value: ".*",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
const (
|
||||
vmNativeFilterMatch = "vm-native-filter-match"
|
||||
vmNativeFilterTimeStart = "vm-native-filter-time-start"
|
||||
vmNativeFilterTimeEnd = "vm-native-filter-time-end"
|
||||
|
||||
vmNativeSrcAddr = "vm-native-src-addr"
|
||||
vmNativeSrcUser = "vm-native-src-user"
|
||||
vmNativeSrcPassword = "vm-native-src-password"
|
||||
|
||||
vmNativeDstAddr = "vm-native-dst-addr"
|
||||
vmNativeDstUser = "vm-native-dst-user"
|
||||
vmNativeDstPassword = "vm-native-dst-password"
|
||||
)
|
||||
|
||||
var (
|
||||
vmNativeFlags = []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: vmNativeFilterMatch,
|
||||
Usage: "Time series selector to match series for export. For example, select {instance!=\"localhost\"} will " +
|
||||
"match all series with \"instance\" label different to \"localhost\".\n" +
|
||||
" See more details here https://github.com/VictoriaMetrics/VictoriaMetrics#how-to-export-data-in-native-format",
|
||||
Value: `{__name__!=""}`,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: vmNativeFilterTimeStart,
|
||||
Usage: "The time filter may contain either unix timestamp in seconds or RFC3339 values. E.g. '2020-01-01T20:07:00Z'",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: vmNativeFilterTimeEnd,
|
||||
Usage: "The time filter may contain either unix timestamp in seconds or RFC3339 values. E.g. '2020-01-01T20:07:00Z'",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: vmNativeSrcAddr,
|
||||
Usage: "VictoriaMetrics address to perform export from. \n" +
|
||||
" Should be the same as --httpListenAddr value for single-node version or VMSelect component." +
|
||||
" If exporting from cluster version - include the tenet token in address.",
|
||||
Required: true,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: vmNativeSrcUser,
|
||||
Usage: "VictoriaMetrics username for basic auth",
|
||||
EnvVars: []string{"VM_NATIVE_SRC_USERNAME"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: vmNativeSrcPassword,
|
||||
Usage: "VictoriaMetrics password for basic auth",
|
||||
EnvVars: []string{"VM_NATIVE_SRC_PASSWORD"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: vmNativeDstAddr,
|
||||
Usage: "VictoriaMetrics address to perform import to. \n" +
|
||||
" Should be the same as --httpListenAddr value for single-node version or VMInsert component." +
|
||||
" If importing into cluster version - include the tenet token in address.",
|
||||
Required: true,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: vmNativeDstUser,
|
||||
Usage: "VictoriaMetrics username for basic auth",
|
||||
EnvVars: []string{"VM_NATIVE_DST_USERNAME"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: vmNativeDstPassword,
|
||||
Usage: "VictoriaMetrics password for basic auth",
|
||||
EnvVars: []string{"VM_NATIVE_DST_PASSWORD"},
|
||||
},
|
||||
&cli.StringSliceFlag{
|
||||
Name: vmExtraLabel,
|
||||
Value: nil,
|
||||
Usage: "Extra labels, that will be added to imported timeseries. In case of collision, label value defined by flag" +
|
||||
"will have priority. Flag can be set multiple times, to add few additional labels.",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
func mergeFlags(flags ...[]cli.Flag) []cli.Flag {
|
||||
var result []cli.Flag
|
||||
for _, f := range flags {
|
||||
result = append(result, f...)
|
||||
}
|
||||
return result
|
||||
}
|
146
app/vmctl/influx.go
Normal file
146
app/vmctl/influx.go
Normal file
|
@ -0,0 +1,146 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"sync"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/influx"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/vm"
|
||||
"github.com/cheggaaa/pb/v3"
|
||||
)
|
||||
|
||||
type influxProcessor struct {
|
||||
ic *influx.Client
|
||||
im *vm.Importer
|
||||
cc int
|
||||
separator string
|
||||
}
|
||||
|
||||
func newInfluxProcessor(ic *influx.Client, im *vm.Importer, cc int, separator string) *influxProcessor {
|
||||
if cc < 1 {
|
||||
cc = 1
|
||||
}
|
||||
return &influxProcessor{
|
||||
ic: ic,
|
||||
im: im,
|
||||
cc: cc,
|
||||
separator: separator,
|
||||
}
|
||||
}
|
||||
|
||||
func (ip *influxProcessor) run(silent bool) error {
|
||||
series, err := ip.ic.Explore()
|
||||
if err != nil {
|
||||
return fmt.Errorf("explore query failed: %s", err)
|
||||
}
|
||||
if len(series) < 1 {
|
||||
return fmt.Errorf("found no timeseries to import")
|
||||
}
|
||||
|
||||
question := fmt.Sprintf("Found %d timeseries to import. Continue?", len(series))
|
||||
if !silent && !prompt(question) {
|
||||
return nil
|
||||
}
|
||||
|
||||
bar := pb.StartNew(len(series))
|
||||
seriesCh := make(chan *influx.Series)
|
||||
errCh := make(chan error)
|
||||
ip.im.ResetStats()
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(ip.cc)
|
||||
for i := 0; i < ip.cc; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for s := range seriesCh {
|
||||
if err := ip.do(s); err != nil {
|
||||
errCh <- fmt.Errorf("request failed for %q.%q: %s", s.Measurement, s.Field, err)
|
||||
return
|
||||
}
|
||||
bar.Increment()
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// any error breaks the import
|
||||
for _, s := range series {
|
||||
select {
|
||||
case infErr := <-errCh:
|
||||
return fmt.Errorf("influx error: %s", infErr)
|
||||
case vmErr := <-ip.im.Errors():
|
||||
return fmt.Errorf("Import process failed: \n%s", wrapErr(vmErr))
|
||||
case seriesCh <- s:
|
||||
}
|
||||
}
|
||||
|
||||
close(seriesCh)
|
||||
wg.Wait()
|
||||
ip.im.Close()
|
||||
// drain import errors channel
|
||||
for vmErr := range ip.im.Errors() {
|
||||
return fmt.Errorf("Import process failed: \n%s", wrapErr(vmErr))
|
||||
}
|
||||
bar.Finish()
|
||||
log.Println("Import finished!")
|
||||
log.Print(ip.im.Stats())
|
||||
return nil
|
||||
}
|
||||
|
||||
const dbLabel = "db"
|
||||
|
||||
func (ip *influxProcessor) do(s *influx.Series) error {
|
||||
cr, err := ip.ic.FetchDataPoints(s)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to fetch datapoints: %s", err)
|
||||
}
|
||||
defer func() {
|
||||
_ = cr.Close()
|
||||
}()
|
||||
var name string
|
||||
if s.Measurement != "" {
|
||||
name = fmt.Sprintf("%s%s%s", s.Measurement, ip.separator, s.Field)
|
||||
} else {
|
||||
name = s.Field
|
||||
}
|
||||
|
||||
labels := make([]vm.LabelPair, len(s.LabelPairs))
|
||||
var containsDBLabel bool
|
||||
for i, lp := range s.LabelPairs {
|
||||
if lp.Name == dbLabel {
|
||||
containsDBLabel = true
|
||||
break
|
||||
}
|
||||
labels[i] = vm.LabelPair{
|
||||
Name: lp.Name,
|
||||
Value: lp.Value,
|
||||
}
|
||||
}
|
||||
if !containsDBLabel {
|
||||
labels = append(labels, vm.LabelPair{
|
||||
Name: dbLabel,
|
||||
Value: ip.ic.Database(),
|
||||
})
|
||||
}
|
||||
|
||||
for {
|
||||
time, values, err := cr.Next()
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
// skip empty results
|
||||
if len(time) < 1 {
|
||||
continue
|
||||
}
|
||||
ip.im.Input() <- &vm.TimeSeries{
|
||||
Name: name,
|
||||
LabelPairs: labels,
|
||||
Timestamps: time,
|
||||
Values: values,
|
||||
}
|
||||
}
|
||||
}
|
362
app/vmctl/influx/influx.go
Normal file
362
app/vmctl/influx/influx.go
Normal file
|
@ -0,0 +1,362 @@
|
|||
package influx
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
influx "github.com/influxdata/influxdb/client/v2"
|
||||
)
|
||||
|
||||
// Client represents a wrapper over
|
||||
// influx HTTP client
|
||||
type Client struct {
|
||||
influx.Client
|
||||
|
||||
database string
|
||||
retention string
|
||||
chunkSize int
|
||||
|
||||
filterSeries string
|
||||
filterTime string
|
||||
}
|
||||
|
||||
// Config contains fields required
|
||||
// for Client configuration
|
||||
type Config struct {
|
||||
Addr string
|
||||
Username string
|
||||
Password string
|
||||
Database string
|
||||
Retention string
|
||||
ChunkSize int
|
||||
|
||||
Filter Filter
|
||||
}
|
||||
|
||||
// Filter contains configuration for filtering
|
||||
// the timeseries
|
||||
type Filter struct {
|
||||
Series string
|
||||
TimeStart string
|
||||
TimeEnd string
|
||||
}
|
||||
|
||||
// Series holds the time series
|
||||
type Series struct {
|
||||
Measurement string
|
||||
Field string
|
||||
LabelPairs []LabelPair
|
||||
}
|
||||
|
||||
var valueEscaper = strings.NewReplacer(`\`, `\\`, `'`, `\'`)
|
||||
|
||||
func (s Series) fetchQuery(timeFilter string) string {
|
||||
f := &strings.Builder{}
|
||||
fmt.Fprintf(f, "select %q from %q", s.Field, s.Measurement)
|
||||
if len(s.LabelPairs) > 0 || len(timeFilter) > 0 {
|
||||
f.WriteString(" where")
|
||||
}
|
||||
for i, pair := range s.LabelPairs {
|
||||
pairV := valueEscaper.Replace(pair.Value)
|
||||
fmt.Fprintf(f, " %q='%s'", pair.Name, pairV)
|
||||
if i != len(s.LabelPairs)-1 {
|
||||
f.WriteString(" and")
|
||||
}
|
||||
}
|
||||
if len(timeFilter) > 0 {
|
||||
if len(s.LabelPairs) > 0 {
|
||||
f.WriteString(" and")
|
||||
}
|
||||
fmt.Fprintf(f, " %s", timeFilter)
|
||||
}
|
||||
return f.String()
|
||||
}
|
||||
|
||||
// LabelPair is the key-value record
|
||||
// of time series label
|
||||
type LabelPair struct {
|
||||
Name string
|
||||
Value string
|
||||
}
|
||||
|
||||
// NewClient creates and returns influx client
|
||||
// configured with passed Config
|
||||
func NewClient(cfg Config) (*Client, error) {
|
||||
c := influx.HTTPConfig{
|
||||
Addr: cfg.Addr,
|
||||
Username: cfg.Username,
|
||||
Password: cfg.Password,
|
||||
InsecureSkipVerify: true,
|
||||
}
|
||||
hc, err := influx.NewHTTPClient(c)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to establish conn: %s", err)
|
||||
}
|
||||
if _, _, err := hc.Ping(time.Second); err != nil {
|
||||
return nil, fmt.Errorf("ping failed: %s", err)
|
||||
}
|
||||
|
||||
chunkSize := cfg.ChunkSize
|
||||
if chunkSize < 1 {
|
||||
chunkSize = 10e3
|
||||
}
|
||||
|
||||
client := &Client{
|
||||
Client: hc,
|
||||
database: cfg.Database,
|
||||
retention: cfg.Retention,
|
||||
chunkSize: chunkSize,
|
||||
filterTime: timeFilter(cfg.Filter.TimeStart, cfg.Filter.TimeEnd),
|
||||
filterSeries: cfg.Filter.Series,
|
||||
}
|
||||
return client, nil
|
||||
}
|
||||
|
||||
// Database returns database name
|
||||
func (c Client) Database() string {
|
||||
return c.database
|
||||
}
|
||||
|
||||
func timeFilter(start, end string) string {
|
||||
if start == "" && end == "" {
|
||||
return ""
|
||||
}
|
||||
var tf string
|
||||
if start != "" {
|
||||
tf = fmt.Sprintf("time >= '%s'", start)
|
||||
}
|
||||
if end != "" {
|
||||
if tf != "" {
|
||||
tf += " and "
|
||||
}
|
||||
tf += fmt.Sprintf("time <= '%s'", end)
|
||||
}
|
||||
return tf
|
||||
}
|
||||
|
||||
// Explore checks the existing data schema in influx
|
||||
// by checking available fields and series,
|
||||
// which unique combination represents all possible
|
||||
// time series existing in database.
|
||||
// The explore required to reduce the load on influx
|
||||
// by querying field of the exact time series at once,
|
||||
// instead of fetching all of the values over and over.
|
||||
//
|
||||
// May contain non-existing time series.
|
||||
func (c *Client) Explore() ([]*Series, error) {
|
||||
log.Printf("Exploring scheme for database %q", c.database)
|
||||
mFields, err := c.fieldsByMeasurement()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get field keys: %s", err)
|
||||
}
|
||||
|
||||
series, err := c.getSeries()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get series: %s", err)
|
||||
}
|
||||
|
||||
var iSeries []*Series
|
||||
for _, s := range series {
|
||||
fields, ok := mFields[s.Measurement]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("can't find field keys for measurement %q", s.Measurement)
|
||||
}
|
||||
for _, field := range fields {
|
||||
is := &Series{
|
||||
Measurement: s.Measurement,
|
||||
Field: field,
|
||||
LabelPairs: s.LabelPairs,
|
||||
}
|
||||
iSeries = append(iSeries, is)
|
||||
}
|
||||
}
|
||||
return iSeries, nil
|
||||
}
|
||||
|
||||
// ChunkedResponse is a wrapper over influx.ChunkedResponse.
|
||||
// Used for better memory usage control while iterating
|
||||
// over huge time series.
|
||||
type ChunkedResponse struct {
|
||||
cr *influx.ChunkedResponse
|
||||
iq influx.Query
|
||||
field string
|
||||
}
|
||||
|
||||
// Close closes cr.
|
||||
func (cr *ChunkedResponse) Close() error {
|
||||
return cr.cr.Close()
|
||||
}
|
||||
|
||||
// Next reads the next part/chunk of time series.
|
||||
// Returns io.EOF when time series was read entirely.
|
||||
func (cr *ChunkedResponse) Next() ([]int64, []float64, error) {
|
||||
resp, err := cr.cr.NextResponse()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if resp.Error() != nil {
|
||||
return nil, nil, fmt.Errorf("response error for %s: %s", cr.iq.Command, resp.Error())
|
||||
}
|
||||
if len(resp.Results) != 1 {
|
||||
return nil, nil, fmt.Errorf("unexpected number of results in response: %d", len(resp.Results))
|
||||
}
|
||||
results, err := parseResult(resp.Results[0])
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if len(results) < 1 {
|
||||
return nil, nil, nil
|
||||
}
|
||||
r := results[0]
|
||||
|
||||
const key = "time"
|
||||
timestamps, ok := r.values[key]
|
||||
if !ok {
|
||||
return nil, nil, fmt.Errorf("response doesn't contain field %q", key)
|
||||
}
|
||||
|
||||
fieldValues, ok := r.values[cr.field]
|
||||
if !ok {
|
||||
return nil, nil, fmt.Errorf("response doesn't contain filed %q", cr.field)
|
||||
}
|
||||
values := make([]float64, len(fieldValues))
|
||||
for i, fv := range fieldValues {
|
||||
v, err := toFloat64(fv)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to convert value %q.%v to float64: %s",
|
||||
cr.field, v, err)
|
||||
}
|
||||
values[i] = v
|
||||
}
|
||||
|
||||
ts := make([]int64, len(results[0].values[key]))
|
||||
for i, v := range timestamps {
|
||||
t, err := parseDate(v.(string))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
ts[i] = t
|
||||
}
|
||||
return ts, values, nil
|
||||
}
|
||||
|
||||
// FetchDataPoints performs SELECT request to fetch
|
||||
// datapoints for particular field.
|
||||
func (c *Client) FetchDataPoints(s *Series) (*ChunkedResponse, error) {
|
||||
iq := influx.Query{
|
||||
Command: s.fetchQuery(c.filterTime),
|
||||
Database: c.database,
|
||||
RetentionPolicy: c.retention,
|
||||
Chunked: true,
|
||||
ChunkSize: 1e4,
|
||||
}
|
||||
cr, err := c.QueryAsChunk(iq)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("query %q err: %s", iq.Command, err)
|
||||
}
|
||||
return &ChunkedResponse{cr, iq, s.Field}, nil
|
||||
}
|
||||
|
||||
func (c *Client) fieldsByMeasurement() (map[string][]string, error) {
|
||||
q := influx.Query{
|
||||
Command: "show field keys",
|
||||
Database: c.database,
|
||||
RetentionPolicy: c.retention,
|
||||
}
|
||||
log.Printf("fetching fields: %s", stringify(q))
|
||||
qValues, err := c.do(q)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error while executing query %q: %s", q.Command, err)
|
||||
}
|
||||
|
||||
var total int
|
||||
var skipped int
|
||||
const fKey = "fieldKey"
|
||||
const fType = "fieldType"
|
||||
result := make(map[string][]string, len(qValues))
|
||||
for _, qv := range qValues {
|
||||
types := qv.values[fType]
|
||||
fields := qv.values[fKey]
|
||||
values := make([]string, 0)
|
||||
for key, field := range fields {
|
||||
if types[key].(string) == "string" {
|
||||
skipped++
|
||||
continue
|
||||
}
|
||||
values = append(values, field.(string))
|
||||
total++
|
||||
}
|
||||
result[qv.name] = values
|
||||
}
|
||||
|
||||
if skipped > 0 {
|
||||
log.Printf("found %d fields; skipped %d non-numeric fields", total, skipped)
|
||||
} else {
|
||||
log.Printf("found %d fields", total)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (c *Client) getSeries() ([]*Series, error) {
|
||||
com := "show series"
|
||||
if c.filterSeries != "" {
|
||||
com = fmt.Sprintf("%s %s", com, c.filterSeries)
|
||||
}
|
||||
q := influx.Query{
|
||||
Command: com,
|
||||
Database: c.database,
|
||||
RetentionPolicy: c.retention,
|
||||
Chunked: true,
|
||||
ChunkSize: c.chunkSize,
|
||||
}
|
||||
|
||||
log.Printf("fetching series: %s", stringify(q))
|
||||
cr, err := c.QueryAsChunk(q)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error while executing query %q: %s", q.Command, err)
|
||||
}
|
||||
|
||||
const key = "key"
|
||||
var result []*Series
|
||||
for {
|
||||
resp, err := cr.NextResponse()
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
if resp.Error() != nil {
|
||||
return nil, fmt.Errorf("response error for query %q: %s", q.Command, resp.Error())
|
||||
}
|
||||
qValues, err := parseResult(resp.Results[0])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, qv := range qValues {
|
||||
for _, v := range qv.values[key] {
|
||||
s := &Series{}
|
||||
if err := s.unmarshal(v.(string)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
result = append(result, s)
|
||||
}
|
||||
}
|
||||
}
|
||||
log.Printf("found %d series", len(result))
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (c *Client) do(q influx.Query) ([]queryValues, error) {
|
||||
res, err := c.Query(q)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("query %q err: %s", q.Command, err)
|
||||
}
|
||||
if len(res.Results) < 1 {
|
||||
return nil, fmt.Errorf("exploration query %q returned 0 results", q.Command)
|
||||
}
|
||||
return parseResult(res.Results[0])
|
||||
}
|
127
app/vmctl/influx/influx_test.go
Normal file
127
app/vmctl/influx/influx_test.go
Normal file
|
@ -0,0 +1,127 @@
|
|||
package influx
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestFetchQuery(t *testing.T) {
|
||||
testCases := []struct {
|
||||
s Series
|
||||
timeFilter string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
s: Series{
|
||||
Measurement: "cpu",
|
||||
Field: "value",
|
||||
LabelPairs: []LabelPair{
|
||||
{
|
||||
Name: "foo",
|
||||
Value: "bar",
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: `select "value" from "cpu" where "foo"='bar'`,
|
||||
},
|
||||
{
|
||||
s: Series{
|
||||
Measurement: "cpu",
|
||||
Field: "value",
|
||||
LabelPairs: []LabelPair{
|
||||
{
|
||||
Name: "foo",
|
||||
Value: "bar",
|
||||
},
|
||||
{
|
||||
Name: "baz",
|
||||
Value: "qux",
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: `select "value" from "cpu" where "foo"='bar' and "baz"='qux'`,
|
||||
},
|
||||
{
|
||||
s: Series{
|
||||
Measurement: "cpu",
|
||||
Field: "value",
|
||||
LabelPairs: []LabelPair{
|
||||
{
|
||||
Name: "foo",
|
||||
Value: "b'ar",
|
||||
},
|
||||
},
|
||||
},
|
||||
timeFilter: "time >= now()",
|
||||
expected: `select "value" from "cpu" where "foo"='b\'ar' and time >= now()`,
|
||||
},
|
||||
{
|
||||
s: Series{
|
||||
Measurement: "cpu",
|
||||
Field: "value",
|
||||
LabelPairs: []LabelPair{
|
||||
{
|
||||
Name: "name",
|
||||
Value: `dev-mapper-centos\x2dswap.swap`,
|
||||
},
|
||||
{
|
||||
Name: "state",
|
||||
Value: "dev-mapp'er-c'en'tos",
|
||||
},
|
||||
},
|
||||
},
|
||||
timeFilter: "time >= now()",
|
||||
expected: `select "value" from "cpu" where "name"='dev-mapper-centos\\x2dswap.swap' and "state"='dev-mapp\'er-c\'en\'tos' and time >= now()`,
|
||||
},
|
||||
{
|
||||
s: Series{
|
||||
Measurement: "cpu",
|
||||
Field: "value",
|
||||
},
|
||||
timeFilter: "time >= now()",
|
||||
expected: `select "value" from "cpu" where time >= now()`,
|
||||
},
|
||||
{
|
||||
s: Series{
|
||||
Measurement: "cpu",
|
||||
Field: "value",
|
||||
},
|
||||
expected: `select "value" from "cpu"`,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
query := tc.s.fetchQuery(tc.timeFilter)
|
||||
if query != tc.expected {
|
||||
t.Fatalf("got: \n%s;\nexpected: \n%s", query, tc.expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTimeFilter(t *testing.T) {
|
||||
testCases := []struct {
|
||||
start string
|
||||
end string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
start: "2020-01-01T20:07:00Z",
|
||||
end: "2020-01-01T21:07:00Z",
|
||||
expected: "time >= '2020-01-01T20:07:00Z' and time <= '2020-01-01T21:07:00Z'",
|
||||
},
|
||||
{
|
||||
expected: "",
|
||||
},
|
||||
{
|
||||
start: "2020-01-01T20:07:00Z",
|
||||
expected: "time >= '2020-01-01T20:07:00Z'",
|
||||
},
|
||||
{
|
||||
end: "2020-01-01T21:07:00Z",
|
||||
expected: "time <= '2020-01-01T21:07:00Z'",
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
f := timeFilter(tc.start, tc.end)
|
||||
if f != tc.expected {
|
||||
t.Fatalf("got: \n%q;\nexpected: \n%q", f, tc.expected)
|
||||
}
|
||||
}
|
||||
}
|
191
app/vmctl/influx/parser.go
Normal file
191
app/vmctl/influx/parser.go
Normal file
|
@ -0,0 +1,191 @@
|
|||
package influx
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
influx "github.com/influxdata/influxdb/client/v2"
|
||||
)
|
||||
|
||||
type queryValues struct {
|
||||
name string
|
||||
values map[string][]interface{}
|
||||
}
|
||||
|
||||
func parseResult(r influx.Result) ([]queryValues, error) {
|
||||
if len(r.Err) > 0 {
|
||||
return nil, fmt.Errorf("result error: %s", r.Err)
|
||||
}
|
||||
qValues := make([]queryValues, len(r.Series))
|
||||
for i, row := range r.Series {
|
||||
values := make(map[string][]interface{}, len(row.Values))
|
||||
for _, value := range row.Values {
|
||||
for idx, v := range value {
|
||||
key := row.Columns[idx]
|
||||
values[key] = append(values[key], v)
|
||||
}
|
||||
}
|
||||
qValues[i] = queryValues{
|
||||
name: row.Name,
|
||||
values: values,
|
||||
}
|
||||
}
|
||||
return qValues, nil
|
||||
}
|
||||
|
||||
func toFloat64(v interface{}) (float64, error) {
|
||||
switch i := v.(type) {
|
||||
case json.Number:
|
||||
return i.Float64()
|
||||
case float64:
|
||||
return i, nil
|
||||
case float32:
|
||||
return float64(i), nil
|
||||
case int64:
|
||||
return float64(i), nil
|
||||
case int32:
|
||||
return float64(i), nil
|
||||
case int:
|
||||
return float64(i), nil
|
||||
case uint64:
|
||||
return float64(i), nil
|
||||
case uint32:
|
||||
return float64(i), nil
|
||||
case uint:
|
||||
return float64(i), nil
|
||||
case string:
|
||||
return strconv.ParseFloat(i, 64)
|
||||
default:
|
||||
return 0, fmt.Errorf("unexpected value type %v", i)
|
||||
}
|
||||
}
|
||||
|
||||
func parseDate(dateStr string) (int64, error) {
|
||||
startTime, err := time.Parse(time.RFC3339, dateStr)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("cannot parse %q: %s", dateStr, err)
|
||||
}
|
||||
return startTime.UnixNano() / 1e6, nil
|
||||
}
|
||||
|
||||
func stringify(q influx.Query) string {
|
||||
return fmt.Sprintf("command: %q; database: %q; retention: %q",
|
||||
q.Command, q.Database, q.RetentionPolicy)
|
||||
}
|
||||
|
||||
func (s *Series) unmarshal(v string) error {
|
||||
noEscapeChars := strings.IndexByte(v, '\\') < 0
|
||||
n := nextUnescapedChar(v, ',', noEscapeChars)
|
||||
if n < 0 {
|
||||
s.Measurement = unescapeTagValue(v, noEscapeChars)
|
||||
return nil
|
||||
}
|
||||
s.Measurement = unescapeTagValue(v[:n], noEscapeChars)
|
||||
var err error
|
||||
s.LabelPairs, err = unmarshalTags(v[n+1:], noEscapeChars)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to unmarhsal tags: %s", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func unmarshalTags(s string, noEscapeChars bool) ([]LabelPair, error) {
|
||||
var result []LabelPair
|
||||
for {
|
||||
lp := LabelPair{}
|
||||
n := nextUnescapedChar(s, ',', noEscapeChars)
|
||||
if n < 0 {
|
||||
if err := lp.unmarshal(s, noEscapeChars); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(lp.Name) == 0 || len(lp.Value) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
result = append(result, lp)
|
||||
return result, nil
|
||||
}
|
||||
if err := lp.unmarshal(s[:n], noEscapeChars); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s = s[n+1:]
|
||||
if len(lp.Name) == 0 || len(lp.Value) == 0 {
|
||||
continue
|
||||
}
|
||||
result = append(result, lp)
|
||||
}
|
||||
}
|
||||
|
||||
func (lp *LabelPair) unmarshal(s string, noEscapeChars bool) error {
|
||||
n := nextUnescapedChar(s, '=', noEscapeChars)
|
||||
if n < 0 {
|
||||
return fmt.Errorf("missing tag value for %q", s)
|
||||
}
|
||||
lp.Name = unescapeTagValue(s[:n], noEscapeChars)
|
||||
lp.Value = unescapeTagValue(s[n+1:], noEscapeChars)
|
||||
return nil
|
||||
}
|
||||
|
||||
func unescapeTagValue(s string, noEscapeChars bool) string {
|
||||
if noEscapeChars {
|
||||
// Fast path - no escape chars.
|
||||
return s
|
||||
}
|
||||
n := strings.IndexByte(s, '\\')
|
||||
if n < 0 {
|
||||
return s
|
||||
}
|
||||
|
||||
// Slow path. Remove escape chars.
|
||||
dst := make([]byte, 0, len(s))
|
||||
for {
|
||||
dst = append(dst, s[:n]...)
|
||||
s = s[n+1:]
|
||||
if len(s) == 0 {
|
||||
return string(append(dst, '\\'))
|
||||
}
|
||||
ch := s[0]
|
||||
if ch != ' ' && ch != ',' && ch != '=' && ch != '\\' {
|
||||
dst = append(dst, '\\')
|
||||
}
|
||||
dst = append(dst, ch)
|
||||
s = s[1:]
|
||||
n = strings.IndexByte(s, '\\')
|
||||
if n < 0 {
|
||||
return string(append(dst, s...))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func nextUnescapedChar(s string, ch byte, noEscapeChars bool) int {
|
||||
if noEscapeChars {
|
||||
// Fast path: just search for ch in s, since s has no escape chars.
|
||||
return strings.IndexByte(s, ch)
|
||||
}
|
||||
|
||||
sOrig := s
|
||||
again:
|
||||
n := strings.IndexByte(s, ch)
|
||||
if n < 0 {
|
||||
return -1
|
||||
}
|
||||
if n == 0 {
|
||||
return len(sOrig) - len(s) + n
|
||||
}
|
||||
if s[n-1] != '\\' {
|
||||
return len(sOrig) - len(s) + n
|
||||
}
|
||||
nOrig := n
|
||||
slashes := 0
|
||||
for n > 0 && s[n-1] == '\\' {
|
||||
slashes++
|
||||
n--
|
||||
}
|
||||
if slashes&1 == 0 {
|
||||
return len(sOrig) - len(s) + nOrig
|
||||
}
|
||||
s = s[nOrig+1:]
|
||||
goto again
|
||||
}
|
60
app/vmctl/influx/parser_test.go
Normal file
60
app/vmctl/influx/parser_test.go
Normal file
|
@ -0,0 +1,60 @@
|
|||
package influx
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestSeries_Unmarshal(t *testing.T) {
|
||||
tag := func(name, value string) LabelPair {
|
||||
return LabelPair{
|
||||
Name: name,
|
||||
Value: value,
|
||||
}
|
||||
}
|
||||
series := func(measurement string, lp ...LabelPair) Series {
|
||||
return Series{
|
||||
Measurement: measurement,
|
||||
LabelPairs: lp,
|
||||
}
|
||||
}
|
||||
testCases := []struct {
|
||||
got string
|
||||
want Series
|
||||
}{
|
||||
{
|
||||
got: "cpu",
|
||||
want: series("cpu"),
|
||||
},
|
||||
{
|
||||
got: "cpu,host=localhost",
|
||||
want: series("cpu", tag("host", "localhost")),
|
||||
},
|
||||
{
|
||||
got: "cpu,host=localhost,instance=instance",
|
||||
want: series("cpu", tag("host", "localhost"), tag("instance", "instance")),
|
||||
},
|
||||
{
|
||||
got: `fo\,bar\=baz,x\=\b=\\a\,\=\q\ `,
|
||||
want: series("fo,bar=baz", tag(`x=\b`, `\a,=\q `)),
|
||||
},
|
||||
{
|
||||
got: "cpu,host=192.168.0.1,instance=fe80::fdc8:5e36:c2c6:baac%utun1",
|
||||
want: series("cpu", tag("host", "192.168.0.1"), tag("instance", "fe80::fdc8:5e36:c2c6:baac%utun1")),
|
||||
},
|
||||
{
|
||||
got: `cpu,db=db1,host=localhost,server=host\=localhost\ user\=user\ `,
|
||||
want: series("cpu", tag("db", "db1"),
|
||||
tag("host", "localhost"), tag("server", "host=localhost user=user ")),
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
s := Series{}
|
||||
if err := s.unmarshal(tc.got); err != nil {
|
||||
t.Fatalf("%q: unmarshal err: %s", tc.got, err)
|
||||
}
|
||||
if !reflect.DeepEqual(s, tc.want) {
|
||||
t.Fatalf("%q: expected\n%#v\nto be equal\n%#v", tc.got, s, tc.want)
|
||||
}
|
||||
}
|
||||
}
|
159
app/vmctl/main.go
Normal file
159
app/vmctl/main.go
Normal file
|
@ -0,0 +1,159 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"os/signal"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/influx"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/prometheus"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/vm"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/buildinfo"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
func main() {
|
||||
start := time.Now()
|
||||
app := &cli.App{
|
||||
Name: "vmctl",
|
||||
Usage: "Victoria metrics command-line tool",
|
||||
Version: buildinfo.Version,
|
||||
Commands: []*cli.Command{
|
||||
{
|
||||
Name: "influx",
|
||||
Usage: "Migrate timeseries from InfluxDB",
|
||||
Flags: mergeFlags(globalFlags, influxFlags, vmFlags),
|
||||
Action: func(c *cli.Context) error {
|
||||
fmt.Println("InfluxDB import mode")
|
||||
|
||||
iCfg := influx.Config{
|
||||
Addr: c.String(influxAddr),
|
||||
Username: c.String(influxUser),
|
||||
Password: c.String(influxPassword),
|
||||
Database: c.String(influxDB),
|
||||
Retention: c.String(influxRetention),
|
||||
Filter: influx.Filter{
|
||||
Series: c.String(influxFilterSeries),
|
||||
TimeStart: c.String(influxFilterTimeStart),
|
||||
TimeEnd: c.String(influxFilterTimeEnd),
|
||||
},
|
||||
ChunkSize: c.Int(influxChunkSize),
|
||||
}
|
||||
influxClient, err := influx.NewClient(iCfg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create influx client: %s", err)
|
||||
}
|
||||
|
||||
vmCfg := initConfigVM(c)
|
||||
importer, err := vm.NewImporter(vmCfg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create VM importer: %s", err)
|
||||
}
|
||||
|
||||
processor := newInfluxProcessor(influxClient, importer,
|
||||
c.Int(influxConcurrency), c.String(influxMeasurementFieldSeparator))
|
||||
return processor.run(c.Bool(globalSilent))
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "prometheus",
|
||||
Usage: "Migrate timeseries from Prometheus",
|
||||
Flags: mergeFlags(globalFlags, promFlags, vmFlags),
|
||||
Action: func(c *cli.Context) error {
|
||||
fmt.Println("Prometheus import mode")
|
||||
|
||||
vmCfg := initConfigVM(c)
|
||||
importer, err := vm.NewImporter(vmCfg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create VM importer: %s", err)
|
||||
}
|
||||
|
||||
promCfg := prometheus.Config{
|
||||
Snapshot: c.String(promSnapshot),
|
||||
Filter: prometheus.Filter{
|
||||
TimeMin: c.String(promFilterTimeStart),
|
||||
TimeMax: c.String(promFilterTimeEnd),
|
||||
Label: c.String(promFilterLabel),
|
||||
LabelValue: c.String(promFilterLabelValue),
|
||||
},
|
||||
}
|
||||
cl, err := prometheus.NewClient(promCfg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create prometheus client: %s", err)
|
||||
}
|
||||
pp := prometheusProcessor{
|
||||
cl: cl,
|
||||
im: importer,
|
||||
cc: c.Int(promConcurrency),
|
||||
}
|
||||
return pp.run(c.Bool(globalSilent))
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "vm-native",
|
||||
Usage: "Migrate time series between VictoriaMetrics installations via native binary format",
|
||||
Flags: vmNativeFlags,
|
||||
Action: func(c *cli.Context) error {
|
||||
fmt.Println("VictoriaMetrics Native import mode")
|
||||
|
||||
if c.String(vmNativeFilterMatch) == "" {
|
||||
return fmt.Errorf("flag %q can't be empty", vmNativeFilterMatch)
|
||||
}
|
||||
|
||||
p := vmNativeProcessor{
|
||||
filter: filter{
|
||||
match: c.String(vmNativeFilterMatch),
|
||||
timeStart: c.String(vmNativeFilterTimeStart),
|
||||
timeEnd: c.String(vmNativeFilterTimeEnd),
|
||||
},
|
||||
src: &vmNativeClient{
|
||||
addr: strings.Trim(c.String(vmNativeSrcAddr), "/"),
|
||||
user: c.String(vmNativeSrcUser),
|
||||
password: c.String(vmNativeSrcPassword),
|
||||
},
|
||||
dst: &vmNativeClient{
|
||||
addr: strings.Trim(c.String(vmNativeDstAddr), "/"),
|
||||
user: c.String(vmNativeDstUser),
|
||||
password: c.String(vmNativeDstPassword),
|
||||
extraLabels: c.StringSlice(vmExtraLabel),
|
||||
},
|
||||
}
|
||||
return p.run()
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
c := make(chan os.Signal, 1)
|
||||
signal.Notify(c, syscall.SIGINT, syscall.SIGTERM)
|
||||
go func() {
|
||||
<-c
|
||||
fmt.Println("\r- Execution cancelled")
|
||||
os.Exit(0)
|
||||
}()
|
||||
|
||||
err := app.Run(os.Args)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
log.Printf("Total time: %v", time.Since(start))
|
||||
}
|
||||
|
||||
func initConfigVM(c *cli.Context) vm.Config {
|
||||
return vm.Config{
|
||||
Addr: c.String(vmAddr),
|
||||
User: c.String(vmUser),
|
||||
Password: c.String(vmPassword),
|
||||
Concurrency: uint8(c.Int(vmConcurrency)),
|
||||
Compress: c.Bool(vmCompress),
|
||||
AccountID: c.String(vmAccountID),
|
||||
BatchSize: c.Int(vmBatchSize),
|
||||
SignificantFigures: c.Int(vmSignificantFigures),
|
||||
RoundDigits: c.Int(vmRoundDigits),
|
||||
ExtraLabels: c.StringSlice(vmExtraLabel),
|
||||
}
|
||||
}
|
131
app/vmctl/prometheus.go
Normal file
131
app/vmctl/prometheus.go
Normal file
|
@ -0,0 +1,131 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"sync"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/prometheus"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/vm"
|
||||
"github.com/cheggaaa/pb/v3"
|
||||
"github.com/prometheus/prometheus/tsdb"
|
||||
)
|
||||
|
||||
type prometheusProcessor struct {
|
||||
// prometheus client fetches and reads
|
||||
// snapshot blocks
|
||||
cl *prometheus.Client
|
||||
// importer performs import requests
|
||||
// for timeseries data returned from
|
||||
// snapshot blocks
|
||||
im *vm.Importer
|
||||
// cc stands for concurrency
|
||||
// and defines number of concurrently
|
||||
// running snapshot block readers
|
||||
cc int
|
||||
}
|
||||
|
||||
func (pp *prometheusProcessor) run(silent bool) error {
|
||||
blocks, err := pp.cl.Explore()
|
||||
if err != nil {
|
||||
return fmt.Errorf("explore failed: %s", err)
|
||||
}
|
||||
if len(blocks) < 1 {
|
||||
return fmt.Errorf("found no blocks to import")
|
||||
}
|
||||
question := fmt.Sprintf("Found %d blocks to import. Continue?", len(blocks))
|
||||
if !silent && !prompt(question) {
|
||||
return nil
|
||||
}
|
||||
|
||||
bar := pb.StartNew(len(blocks))
|
||||
blockReadersCh := make(chan tsdb.BlockReader)
|
||||
errCh := make(chan error, pp.cc)
|
||||
pp.im.ResetStats()
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(pp.cc)
|
||||
for i := 0; i < pp.cc; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for br := range blockReadersCh {
|
||||
if err := pp.do(br); err != nil {
|
||||
errCh <- fmt.Errorf("read failed for block %q: %s", br.Meta().ULID, err)
|
||||
return
|
||||
}
|
||||
bar.Increment()
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// any error breaks the import
|
||||
for _, br := range blocks {
|
||||
select {
|
||||
case promErr := <-errCh:
|
||||
close(blockReadersCh)
|
||||
return fmt.Errorf("prometheus error: %s", promErr)
|
||||
case vmErr := <-pp.im.Errors():
|
||||
close(blockReadersCh)
|
||||
return fmt.Errorf("Import process failed: \n%s", wrapErr(vmErr))
|
||||
case blockReadersCh <- br:
|
||||
}
|
||||
}
|
||||
|
||||
close(blockReadersCh)
|
||||
wg.Wait()
|
||||
// wait for all buffers to flush
|
||||
pp.im.Close()
|
||||
// drain import errors channel
|
||||
for vmErr := range pp.im.Errors() {
|
||||
return fmt.Errorf("Import process failed: \n%s", wrapErr(vmErr))
|
||||
}
|
||||
bar.Finish()
|
||||
log.Println("Import finished!")
|
||||
log.Print(pp.im.Stats())
|
||||
return nil
|
||||
}
|
||||
|
||||
func (pp *prometheusProcessor) do(b tsdb.BlockReader) error {
|
||||
ss, err := pp.cl.Read(b)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read block: %s", err)
|
||||
}
|
||||
for ss.Next() {
|
||||
var name string
|
||||
var labels []vm.LabelPair
|
||||
series := ss.At()
|
||||
|
||||
for _, label := range series.Labels() {
|
||||
if label.Name == "__name__" {
|
||||
name = label.Value
|
||||
continue
|
||||
}
|
||||
labels = append(labels, vm.LabelPair{
|
||||
Name: label.Name,
|
||||
Value: label.Value,
|
||||
})
|
||||
}
|
||||
if name == "" {
|
||||
return fmt.Errorf("failed to find `__name__` label in labelset for block %v", b.Meta().ULID)
|
||||
}
|
||||
|
||||
var timestamps []int64
|
||||
var values []float64
|
||||
it := series.Iterator()
|
||||
for it.Next() {
|
||||
t, v := it.At()
|
||||
timestamps = append(timestamps, t)
|
||||
values = append(values, v)
|
||||
}
|
||||
if err := it.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
pp.im.Input() <- &vm.TimeSeries{
|
||||
Name: name,
|
||||
LabelPairs: labels,
|
||||
Timestamps: timestamps,
|
||||
Values: values,
|
||||
}
|
||||
}
|
||||
return ss.Err()
|
||||
}
|
147
app/vmctl/prometheus/prometheus.go
Normal file
147
app/vmctl/prometheus/prometheus.go
Normal file
|
@ -0,0 +1,147 @@
|
|||
package prometheus
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/prometheus/pkg/labels"
|
||||
"github.com/prometheus/prometheus/storage"
|
||||
"github.com/prometheus/prometheus/tsdb"
|
||||
)
|
||||
|
||||
// Config contains a list of params needed
|
||||
// for reading Prometheus snapshots
|
||||
type Config struct {
|
||||
// Path to snapshot directory
|
||||
Snapshot string
|
||||
|
||||
Filter Filter
|
||||
}
|
||||
|
||||
// Filter contains configuration for filtering
|
||||
// the timeseries
|
||||
type Filter struct {
|
||||
TimeMin string
|
||||
TimeMax string
|
||||
Label string
|
||||
LabelValue string
|
||||
}
|
||||
|
||||
// Client is a wrapper over Prometheus tsdb.DBReader
|
||||
type Client struct {
|
||||
*tsdb.DBReadOnly
|
||||
filter filter
|
||||
}
|
||||
|
||||
type filter struct {
|
||||
min, max int64
|
||||
label string
|
||||
labelValue string
|
||||
}
|
||||
|
||||
func (f filter) inRange(min, max int64) bool {
|
||||
fmin, fmax := f.min, f.max
|
||||
if min == 0 {
|
||||
fmin = min
|
||||
}
|
||||
if fmax == 0 {
|
||||
fmax = max
|
||||
}
|
||||
return min <= fmax && fmin <= max
|
||||
}
|
||||
|
||||
// NewClient creates and validates new Client
|
||||
// with given Config
|
||||
func NewClient(cfg Config) (*Client, error) {
|
||||
db, err := tsdb.OpenDBReadOnly(cfg.Snapshot, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to open snapshot %q: %s", cfg.Snapshot, err)
|
||||
}
|
||||
c := &Client{DBReadOnly: db}
|
||||
min, max, err := parseTime(cfg.Filter.TimeMin, cfg.Filter.TimeMax)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse time in filter: %s", err)
|
||||
}
|
||||
c.filter = filter{
|
||||
min: min,
|
||||
max: max,
|
||||
label: cfg.Filter.Label,
|
||||
labelValue: cfg.Filter.LabelValue,
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Explore fetches all available blocks from a snapshot
|
||||
// and collects the Meta() data from each block.
|
||||
// Explore does initial filtering by time-range
|
||||
// for snapshot blocks but does not take into account
|
||||
// label filters.
|
||||
func (c *Client) Explore() ([]tsdb.BlockReader, error) {
|
||||
blocks, err := c.Blocks()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to fetch blocks: %s", err)
|
||||
}
|
||||
s := &Stats{
|
||||
Filtered: c.filter.min != 0 || c.filter.max != 0 || c.filter.label != "",
|
||||
Blocks: len(blocks),
|
||||
}
|
||||
var blocksToImport []tsdb.BlockReader
|
||||
for _, block := range blocks {
|
||||
meta := block.Meta()
|
||||
if !c.filter.inRange(meta.MinTime, meta.MaxTime) {
|
||||
s.SkippedBlocks++
|
||||
continue
|
||||
}
|
||||
if s.MinTime == 0 || meta.MinTime < s.MinTime {
|
||||
s.MinTime = meta.MinTime
|
||||
}
|
||||
if s.MaxTime == 0 || meta.MaxTime > s.MaxTime {
|
||||
s.MaxTime = meta.MaxTime
|
||||
}
|
||||
s.Samples += meta.Stats.NumSamples
|
||||
s.Series += meta.Stats.NumSeries
|
||||
blocksToImport = append(blocksToImport, block)
|
||||
}
|
||||
fmt.Println(s)
|
||||
return blocksToImport, nil
|
||||
}
|
||||
|
||||
// Read reads the given BlockReader according to configured
|
||||
// time and label filters.
|
||||
func (c *Client) Read(block tsdb.BlockReader) (storage.SeriesSet, error) {
|
||||
minTime, maxTime := block.Meta().MinTime, block.Meta().MaxTime
|
||||
if c.filter.min != 0 {
|
||||
minTime = c.filter.min
|
||||
}
|
||||
if c.filter.max != 0 {
|
||||
maxTime = c.filter.max
|
||||
}
|
||||
q, err := tsdb.NewBlockQuerier(block, minTime, maxTime)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ss := q.Select(false, nil, labels.MustNewMatcher(labels.MatchRegexp, c.filter.label, c.filter.labelValue))
|
||||
return ss, nil
|
||||
}
|
||||
|
||||
func parseTime(start, end string) (int64, int64, error) {
|
||||
var s, e int64
|
||||
if start == "" && end == "" {
|
||||
return 0, 0, nil
|
||||
}
|
||||
if start != "" {
|
||||
v, err := time.Parse(time.RFC3339, start)
|
||||
if err != nil {
|
||||
return 0, 0, fmt.Errorf("failed to parse %q: %s", start, err)
|
||||
}
|
||||
s = v.UnixNano() / int64(time.Millisecond)
|
||||
}
|
||||
if end != "" {
|
||||
v, err := time.Parse(time.RFC3339, end)
|
||||
if err != nil {
|
||||
return 0, 0, fmt.Errorf("failed to parse %q: %s", end, err)
|
||||
}
|
||||
e = v.UnixNano() / int64(time.Millisecond)
|
||||
}
|
||||
return s, e, nil
|
||||
}
|
34
app/vmctl/prometheus/prometheus_test.go
Normal file
34
app/vmctl/prometheus/prometheus_test.go
Normal file
|
@ -0,0 +1,34 @@
|
|||
package prometheus
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestInRange(t *testing.T) {
|
||||
testCases := []struct {
|
||||
filterMin, filterMax int64
|
||||
blockMin, blockMax int64
|
||||
expected bool
|
||||
}{
|
||||
{0, 0, 1, 2, true},
|
||||
{0, 3, 1, 2, true},
|
||||
{0, 3, 4, 5, false},
|
||||
{3, 0, 1, 2, false},
|
||||
{3, 0, 2, 4, true},
|
||||
{3, 10, 1, 2, false},
|
||||
{3, 10, 1, 4, true},
|
||||
{3, 10, 5, 9, true},
|
||||
{3, 10, 9, 12, true},
|
||||
{3, 10, 12, 15, false},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
f := filter{
|
||||
min: tc.filterMin,
|
||||
max: tc.filterMax,
|
||||
}
|
||||
got := f.inRange(tc.blockMin, tc.blockMax)
|
||||
if got != tc.expected {
|
||||
t.Fatalf("got %v; expected %v: %v", got, tc.expected, tc)
|
||||
}
|
||||
}
|
||||
}
|
38
app/vmctl/prometheus/stats.go
Normal file
38
app/vmctl/prometheus/stats.go
Normal file
|
@ -0,0 +1,38 @@
|
|||
package prometheus
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Stats represents data migration stats.
|
||||
type Stats struct {
|
||||
Filtered bool
|
||||
MinTime int64
|
||||
MaxTime int64
|
||||
Samples uint64
|
||||
Series uint64
|
||||
Blocks int
|
||||
SkippedBlocks int
|
||||
}
|
||||
|
||||
// String returns string representation for s.
|
||||
func (s Stats) String() string {
|
||||
str := fmt.Sprintf("Prometheus snapshot stats:\n"+
|
||||
" blocks found: %d;\n"+
|
||||
" blocks skipped by time filter: %d;\n"+
|
||||
" min time: %d (%v);\n"+
|
||||
" max time: %d (%v);\n"+
|
||||
" samples: %d;\n"+
|
||||
" series: %d.",
|
||||
s.Blocks, s.SkippedBlocks,
|
||||
s.MinTime, time.Unix(s.MinTime/1e3, 0).Format(time.RFC3339),
|
||||
s.MaxTime, time.Unix(s.MaxTime/1e3, 0).Format(time.RFC3339),
|
||||
s.Samples, s.Series)
|
||||
|
||||
if s.Filtered {
|
||||
str += "\n* Stats numbers are based on blocks meta info and don't account for applied filters."
|
||||
}
|
||||
|
||||
return str
|
||||
}
|
33
app/vmctl/utils.go
Normal file
33
app/vmctl/utils.go
Normal file
|
@ -0,0 +1,33 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/vm"
|
||||
)
|
||||
|
||||
func prompt(question string) bool {
|
||||
reader := bufio.NewReader(os.Stdin)
|
||||
fmt.Print(question, " [Y/n] ")
|
||||
answer, err := reader.ReadString('\n')
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
answer = strings.TrimSpace(strings.ToLower(answer))
|
||||
if answer == "" || answer == "yes" || answer == "y" {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func wrapErr(vmErr *vm.ImportError) error {
|
||||
var errTS string
|
||||
for _, ts := range vmErr.Batch {
|
||||
errTS += fmt.Sprintf("%s for timestamps range %d - %d\n",
|
||||
ts.String(), ts.Timestamps[0], ts.Timestamps[len(ts.Timestamps)-1])
|
||||
}
|
||||
return fmt.Errorf("%s with error: %s", errTS, vmErr.Err)
|
||||
}
|
47
app/vmctl/vm/stats.go
Normal file
47
app/vmctl/vm/stats.go
Normal file
|
@ -0,0 +1,47 @@
|
|||
package vm
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
type stats struct {
|
||||
sync.Mutex
|
||||
samples uint64
|
||||
bytes uint64
|
||||
requests uint64
|
||||
retries uint64
|
||||
startTime time.Time
|
||||
idleDuration time.Duration
|
||||
}
|
||||
|
||||
func (s *stats) String() string {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
totalImportDuration := time.Since(s.startTime)
|
||||
totalImportDurationS := totalImportDuration.Seconds()
|
||||
var samplesPerS float64
|
||||
if s.samples > 0 && totalImportDurationS > 0 {
|
||||
samplesPerS = float64(s.samples) / totalImportDurationS
|
||||
}
|
||||
bytesPerS := byteCountSI(0)
|
||||
if s.bytes > 0 && totalImportDurationS > 0 {
|
||||
bytesPerS = byteCountSI(int64(float64(s.bytes) / totalImportDurationS))
|
||||
}
|
||||
|
||||
return fmt.Sprintf("VictoriaMetrics importer stats:\n"+
|
||||
" idle duration: %v;\n"+
|
||||
" time spent while importing: %v;\n"+
|
||||
" total samples: %d;\n"+
|
||||
" samples/s: %.2f;\n"+
|
||||
" total bytes: %s;\n"+
|
||||
" bytes/s: %s;\n"+
|
||||
" import requests: %d;\n"+
|
||||
" import requests retries: %d;",
|
||||
s.idleDuration, totalImportDuration,
|
||||
s.samples, samplesPerS,
|
||||
byteCountSI(int64(s.bytes)), bytesPerS,
|
||||
s.requests, s.retries)
|
||||
}
|
82
app/vmctl/vm/timeseries.go
Normal file
82
app/vmctl/vm/timeseries.go
Normal file
|
@ -0,0 +1,82 @@
|
|||
package vm
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
// TimeSeries represents a time series.
|
||||
type TimeSeries struct {
|
||||
Name string
|
||||
LabelPairs []LabelPair
|
||||
Timestamps []int64
|
||||
Values []float64
|
||||
}
|
||||
|
||||
// LabelPair represents a label
|
||||
type LabelPair struct {
|
||||
Name string
|
||||
Value string
|
||||
}
|
||||
|
||||
// String returns user-readable ts.
|
||||
func (ts TimeSeries) String() string {
|
||||
s := ts.Name
|
||||
if len(ts.LabelPairs) < 1 {
|
||||
return s
|
||||
}
|
||||
var labels string
|
||||
for i, lp := range ts.LabelPairs {
|
||||
labels += fmt.Sprintf("%s=%q", lp.Name, lp.Value)
|
||||
if i < len(ts.LabelPairs)-1 {
|
||||
labels += ","
|
||||
}
|
||||
}
|
||||
return fmt.Sprintf("%s{%s}", s, labels)
|
||||
}
|
||||
|
||||
// cWriter used to avoid error checking
|
||||
// while doing Write calls.
|
||||
// cWriter caches the first error if any
|
||||
// and discards all sequential write calls
|
||||
type cWriter struct {
|
||||
w io.Writer
|
||||
n int
|
||||
err error
|
||||
}
|
||||
|
||||
func (cw *cWriter) printf(format string, args ...interface{}) {
|
||||
if cw.err != nil {
|
||||
return
|
||||
}
|
||||
n, err := fmt.Fprintf(cw.w, format, args...)
|
||||
cw.n += n
|
||||
cw.err = err
|
||||
}
|
||||
|
||||
//"{"metric":{"__name__":"cpu_usage_guest","arch":"x64","hostname":"host_19",},"timestamps":[1567296000000,1567296010000],"values":[1567296000000,66]}
|
||||
func (ts *TimeSeries) write(w io.Writer) (int, error) {
|
||||
pointsCount := len(ts.Timestamps)
|
||||
if pointsCount == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
cw := &cWriter{w: w}
|
||||
cw.printf(`{"metric":{"__name__":%q`, ts.Name)
|
||||
if len(ts.LabelPairs) > 0 {
|
||||
for _, lp := range ts.LabelPairs {
|
||||
cw.printf(",%q:%q", lp.Name, lp.Value)
|
||||
}
|
||||
}
|
||||
|
||||
cw.printf(`},"timestamps":[`)
|
||||
for i := 0; i < pointsCount-1; i++ {
|
||||
cw.printf(`%d,`, ts.Timestamps[i])
|
||||
}
|
||||
cw.printf(`%d],"values":[`, ts.Timestamps[pointsCount-1])
|
||||
for i := 0; i < pointsCount-1; i++ {
|
||||
cw.printf(`%v,`, ts.Values[i])
|
||||
}
|
||||
cw.printf("%v]}\n", ts.Values[pointsCount-1])
|
||||
return cw.n, cw.err
|
||||
}
|
89
app/vmctl/vm/timeseries_test.go
Normal file
89
app/vmctl/vm/timeseries_test.go
Normal file
|
@ -0,0 +1,89 @@
|
|||
package vm
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"math"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestTimeSeries_Write(t *testing.T) {
|
||||
var testCases = []struct {
|
||||
name string
|
||||
ts *TimeSeries
|
||||
exp string
|
||||
}{
|
||||
{
|
||||
name: "one datapoint",
|
||||
ts: &TimeSeries{
|
||||
Name: "foo",
|
||||
LabelPairs: []LabelPair{
|
||||
{
|
||||
Name: "key",
|
||||
Value: "val",
|
||||
},
|
||||
},
|
||||
Timestamps: []int64{1577877162200},
|
||||
Values: []float64{1},
|
||||
},
|
||||
exp: `{"metric":{"__name__":"foo","key":"val"},"timestamps":[1577877162200],"values":[1]}`,
|
||||
},
|
||||
{
|
||||
name: "multiple samples",
|
||||
ts: &TimeSeries{
|
||||
Name: "foo",
|
||||
LabelPairs: []LabelPair{
|
||||
{
|
||||
Name: "key",
|
||||
Value: "val",
|
||||
},
|
||||
},
|
||||
Timestamps: []int64{1577877162200, 15778771622400, 15778771622600},
|
||||
Values: []float64{1, 1.6263, 32.123},
|
||||
},
|
||||
exp: `{"metric":{"__name__":"foo","key":"val"},"timestamps":[1577877162200,15778771622400,15778771622600],"values":[1,1.6263,32.123]}`,
|
||||
},
|
||||
{
|
||||
name: "no samples",
|
||||
ts: &TimeSeries{
|
||||
Name: "foo",
|
||||
LabelPairs: []LabelPair{
|
||||
{
|
||||
Name: "key",
|
||||
Value: "val",
|
||||
},
|
||||
},
|
||||
},
|
||||
exp: ``,
|
||||
},
|
||||
{
|
||||
name: "inf values",
|
||||
ts: &TimeSeries{
|
||||
Name: "foo",
|
||||
LabelPairs: []LabelPair{
|
||||
{
|
||||
Name: "key",
|
||||
Value: "val",
|
||||
},
|
||||
},
|
||||
Timestamps: []int64{1577877162200, 1577877162200, 1577877162200},
|
||||
Values: []float64{0, math.Inf(-1), math.Inf(1)},
|
||||
},
|
||||
exp: `{"metric":{"__name__":"foo","key":"val"},"timestamps":[1577877162200,1577877162200,1577877162200],"values":[0,-Inf,+Inf]}`,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
b := &bytes.Buffer{}
|
||||
_, err := tc.ts.write(b)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
got := strings.TrimSpace(b.String())
|
||||
if got != tc.exp {
|
||||
t.Fatalf("\ngot: %q\nwant: %q", got, tc.exp)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
384
app/vmctl/vm/vm.go
Normal file
384
app/vmctl/vm/vm.go
Normal file
|
@ -0,0 +1,384 @@
|
|||
package vm
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"compress/gzip"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/decimal"
|
||||
)
|
||||
|
||||
// Config contains list of params to configure
|
||||
// the Importer
|
||||
type Config struct {
|
||||
// VictoriaMetrics address to perform import requests
|
||||
// --httpListenAddr value for single node version
|
||||
// --httpListenAddr value of VMSelect component for cluster version
|
||||
Addr string
|
||||
// Concurrency defines number of worker
|
||||
// performing the import requests concurrently
|
||||
Concurrency uint8
|
||||
// Whether to apply gzip compression
|
||||
Compress bool
|
||||
// AccountID for cluster version.
|
||||
// Empty value assumes it is a single node version
|
||||
AccountID string
|
||||
// BatchSize defines how many samples
|
||||
// importer collects before sending the import request
|
||||
BatchSize int
|
||||
// User name for basic auth
|
||||
User string
|
||||
// Password for basic auth
|
||||
Password string
|
||||
// SignificantFigures defines the number of significant figures to leave
|
||||
// in metric values before importing.
|
||||
// Zero value saves all the significant decimal places
|
||||
SignificantFigures int
|
||||
// RoundDigits defines the number of decimal digits after the point that must be left
|
||||
// in metric values before importing.
|
||||
RoundDigits int
|
||||
// ExtraLabels that will be added to all imported series. Must be in label=value format.
|
||||
ExtraLabels []string
|
||||
}
|
||||
|
||||
// Importer performs insertion of timeseries
|
||||
// via VictoriaMetrics import protocol
|
||||
// see https://github.com/VictoriaMetrics/VictoriaMetrics/tree/master#how-to-import-time-series-data
|
||||
type Importer struct {
|
||||
addr string
|
||||
importPath string
|
||||
compress bool
|
||||
user string
|
||||
password string
|
||||
|
||||
close chan struct{}
|
||||
input chan *TimeSeries
|
||||
errors chan *ImportError
|
||||
|
||||
wg sync.WaitGroup
|
||||
once sync.Once
|
||||
|
||||
s *stats
|
||||
}
|
||||
|
||||
// ResetStats resets im stats.
|
||||
func (im *Importer) ResetStats() {
|
||||
im.s = &stats{
|
||||
startTime: time.Now(),
|
||||
}
|
||||
}
|
||||
|
||||
// Stats returns im stats.
|
||||
func (im *Importer) Stats() string {
|
||||
return im.s.String()
|
||||
}
|
||||
|
||||
// AddExtraLabelsToImportPath - adds extra labels query params to given url path.
|
||||
func AddExtraLabelsToImportPath(path string, extraLabels []string) (string, error) {
|
||||
dst := path
|
||||
separator := "?"
|
||||
for _, extraLabel := range extraLabels {
|
||||
if !strings.Contains(extraLabel, "=") {
|
||||
return path, fmt.Errorf("bad format for extra_label flag, it must be `key=value`, got: %q", extraLabel)
|
||||
}
|
||||
if strings.Contains(dst, "?") {
|
||||
separator = "&"
|
||||
}
|
||||
dst += fmt.Sprintf("%sextra_label=%s", separator, extraLabel)
|
||||
}
|
||||
return dst, nil
|
||||
}
|
||||
|
||||
// NewImporter creates new Importer for the given cfg.
|
||||
func NewImporter(cfg Config) (*Importer, error) {
|
||||
if cfg.Concurrency < 1 {
|
||||
return nil, fmt.Errorf("concurrency can't be lower than 1")
|
||||
}
|
||||
|
||||
addr := strings.TrimRight(cfg.Addr, "/")
|
||||
// if single version
|
||||
// see https://github.com/VictoriaMetrics/VictoriaMetrics/tree/master#how-to-import-time-series-data
|
||||
importPath := addr + "/api/v1/import"
|
||||
if cfg.AccountID != "" {
|
||||
// if cluster version
|
||||
// see https://github.com/VictoriaMetrics/VictoriaMetrics/tree/cluster#url-format
|
||||
importPath = fmt.Sprintf("%s/insert/%s/prometheus/api/v1/import", addr, cfg.AccountID)
|
||||
}
|
||||
importPath, err := AddExtraLabelsToImportPath(importPath, cfg.ExtraLabels)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
im := &Importer{
|
||||
addr: addr,
|
||||
importPath: importPath,
|
||||
compress: cfg.Compress,
|
||||
user: cfg.User,
|
||||
password: cfg.Password,
|
||||
close: make(chan struct{}),
|
||||
input: make(chan *TimeSeries, cfg.Concurrency*4),
|
||||
errors: make(chan *ImportError, cfg.Concurrency),
|
||||
}
|
||||
if err := im.Ping(); err != nil {
|
||||
return nil, fmt.Errorf("ping to %q failed: %s", addr, err)
|
||||
}
|
||||
|
||||
if cfg.BatchSize < 1 {
|
||||
cfg.BatchSize = 1e5
|
||||
}
|
||||
|
||||
im.wg.Add(int(cfg.Concurrency))
|
||||
for i := 0; i < int(cfg.Concurrency); i++ {
|
||||
go func() {
|
||||
defer im.wg.Done()
|
||||
im.startWorker(cfg.BatchSize, cfg.SignificantFigures, cfg.RoundDigits)
|
||||
}()
|
||||
}
|
||||
im.ResetStats()
|
||||
return im, nil
|
||||
}
|
||||
|
||||
// ImportError is type of error generated
|
||||
// in case of unsuccessful import request
|
||||
type ImportError struct {
|
||||
// The batch of timeseries that failed
|
||||
Batch []*TimeSeries
|
||||
// The error that appeared during insert
|
||||
Err error
|
||||
}
|
||||
|
||||
// Errors returns a channel for receiving
|
||||
// import errors if any
|
||||
func (im *Importer) Errors() chan *ImportError { return im.errors }
|
||||
|
||||
// Input returns a channel for sending timeseries
|
||||
// that need to be imported
|
||||
func (im *Importer) Input() chan<- *TimeSeries { return im.input }
|
||||
|
||||
// Close sends signal to all goroutines to exit
|
||||
// and waits until they are finished
|
||||
func (im *Importer) Close() {
|
||||
im.once.Do(func() {
|
||||
close(im.close)
|
||||
im.wg.Wait()
|
||||
close(im.errors)
|
||||
})
|
||||
}
|
||||
|
||||
func (im *Importer) startWorker(batchSize, significantFigures, roundDigits int) {
|
||||
var batch []*TimeSeries
|
||||
var dataPoints int
|
||||
var waitForBatch time.Time
|
||||
for {
|
||||
select {
|
||||
case <-im.close:
|
||||
if err := im.Import(batch); err != nil {
|
||||
im.errors <- &ImportError{
|
||||
Batch: batch,
|
||||
Err: err,
|
||||
}
|
||||
}
|
||||
return
|
||||
case ts := <-im.input:
|
||||
// init waitForBatch when first
|
||||
// value was received
|
||||
if waitForBatch.IsZero() {
|
||||
waitForBatch = time.Now()
|
||||
}
|
||||
|
||||
if significantFigures > 0 {
|
||||
for i, v := range ts.Values {
|
||||
ts.Values[i] = decimal.RoundToSignificantFigures(v, significantFigures)
|
||||
}
|
||||
}
|
||||
if roundDigits < 100 {
|
||||
for i, v := range ts.Values {
|
||||
ts.Values[i] = decimal.RoundToDecimalDigits(v, roundDigits)
|
||||
}
|
||||
}
|
||||
|
||||
batch = append(batch, ts)
|
||||
dataPoints += len(ts.Values)
|
||||
if dataPoints < batchSize {
|
||||
continue
|
||||
}
|
||||
im.s.Lock()
|
||||
im.s.idleDuration += time.Since(waitForBatch)
|
||||
im.s.Unlock()
|
||||
|
||||
if err := im.flush(batch); err != nil {
|
||||
im.errors <- &ImportError{
|
||||
Batch: batch,
|
||||
Err: err,
|
||||
}
|
||||
// make a new batch, since old one was referenced as err
|
||||
batch = make([]*TimeSeries, len(batch))
|
||||
}
|
||||
batch = batch[:0]
|
||||
dataPoints = 0
|
||||
waitForBatch = time.Now()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
// TODO: make configurable
|
||||
backoffRetries = 5
|
||||
backoffFactor = 1.7
|
||||
backoffMinDuration = time.Second
|
||||
)
|
||||
|
||||
func (im *Importer) flush(b []*TimeSeries) error {
|
||||
var err error
|
||||
for i := 0; i < backoffRetries; i++ {
|
||||
err = im.Import(b)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
if errors.Is(err, ErrBadRequest) {
|
||||
return err // fail fast if not recoverable
|
||||
}
|
||||
im.s.Lock()
|
||||
im.s.retries++
|
||||
im.s.Unlock()
|
||||
backoff := float64(backoffMinDuration) * math.Pow(backoffFactor, float64(i))
|
||||
time.Sleep(time.Duration(backoff))
|
||||
}
|
||||
return fmt.Errorf("import failed with %d retries: %s", backoffRetries, err)
|
||||
}
|
||||
|
||||
// Ping sends a ping to im.addr.
|
||||
func (im *Importer) Ping() error {
|
||||
url := fmt.Sprintf("%s/health", im.addr)
|
||||
req, err := http.NewRequest("GET", url, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot create request to %q: %s", im.addr, err)
|
||||
}
|
||||
if im.user != "" {
|
||||
req.SetBasicAuth(im.user, im.password)
|
||||
}
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return fmt.Errorf("bad status code: %d", resp.StatusCode)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Import imports tsBatch.
|
||||
func (im *Importer) Import(tsBatch []*TimeSeries) error {
|
||||
if len(tsBatch) < 1 {
|
||||
return nil
|
||||
}
|
||||
|
||||
pr, pw := io.Pipe()
|
||||
req, err := http.NewRequest("POST", im.importPath, pr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot create request to %q: %s", im.addr, err)
|
||||
}
|
||||
if im.user != "" {
|
||||
req.SetBasicAuth(im.user, im.password)
|
||||
}
|
||||
if im.compress {
|
||||
req.Header.Set("Content-Encoding", "gzip")
|
||||
}
|
||||
|
||||
errCh := make(chan error)
|
||||
go func() {
|
||||
errCh <- do(req)
|
||||
close(errCh)
|
||||
}()
|
||||
|
||||
w := io.Writer(pw)
|
||||
if im.compress {
|
||||
zw, err := gzip.NewWriterLevel(pw, 1)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unexpected error when creating gzip writer: %s", err)
|
||||
}
|
||||
w = zw
|
||||
}
|
||||
bw := bufio.NewWriterSize(w, 16*1024)
|
||||
|
||||
var totalSamples, totalBytes int
|
||||
for _, ts := range tsBatch {
|
||||
n, err := ts.write(bw)
|
||||
if err != nil {
|
||||
return fmt.Errorf("write err: %w", err)
|
||||
}
|
||||
totalBytes += n
|
||||
totalSamples += len(ts.Values)
|
||||
}
|
||||
if err := bw.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
if im.compress {
|
||||
err := w.(*gzip.Writer).Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := pw.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
requestErr := <-errCh
|
||||
if requestErr != nil {
|
||||
return fmt.Errorf("import request error for %q: %w", im.addr, requestErr)
|
||||
}
|
||||
|
||||
im.s.Lock()
|
||||
im.s.bytes += uint64(totalBytes)
|
||||
im.s.samples += uint64(totalSamples)
|
||||
im.s.requests++
|
||||
im.s.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ErrBadRequest represents bad request error.
|
||||
var ErrBadRequest = errors.New("bad request")
|
||||
|
||||
func do(req *http.Request) error {
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unexpected error when performing request: %s", err)
|
||||
}
|
||||
defer func() {
|
||||
_ = resp.Body.Close()
|
||||
}()
|
||||
if resp.StatusCode != http.StatusNoContent {
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read response body for status code %d: %s", resp.StatusCode, err)
|
||||
}
|
||||
if resp.StatusCode == http.StatusBadRequest {
|
||||
return fmt.Errorf("%w: unexpected response code %d: %s", ErrBadRequest, resp.StatusCode, string(body))
|
||||
}
|
||||
return fmt.Errorf("unexpected response code %d: %s", resp.StatusCode, string(body))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func byteCountSI(b int64) string {
|
||||
const unit = 1000
|
||||
if b < unit {
|
||||
return fmt.Sprintf("%d B", b)
|
||||
}
|
||||
div, exp := int64(unit), 0
|
||||
for n := b / unit; n >= unit; n /= unit {
|
||||
div *= unit
|
||||
exp++
|
||||
}
|
||||
return fmt.Sprintf("%.1f %cB",
|
||||
float64(b)/float64(div), "kMGTPE"[exp])
|
||||
}
|
69
app/vmctl/vm/vm_test.go
Normal file
69
app/vmctl/vm/vm_test.go
Normal file
|
@ -0,0 +1,69 @@
|
|||
package vm
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestAddExtraLabelsToImportPath(t *testing.T) {
|
||||
type args struct {
|
||||
path string
|
||||
extraLabels []string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want string
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "ok w/o extra labels",
|
||||
args: args{
|
||||
path: "/api/v1/import",
|
||||
},
|
||||
want: "/api/v1/import",
|
||||
},
|
||||
{
|
||||
name: "ok one extra label",
|
||||
args: args{
|
||||
path: "/api/v1/import",
|
||||
extraLabels: []string{"instance=host-1"},
|
||||
},
|
||||
want: "/api/v1/import?extra_label=instance=host-1",
|
||||
},
|
||||
{
|
||||
name: "ok two extra labels",
|
||||
args: args{
|
||||
path: "/api/v1/import",
|
||||
extraLabels: []string{"instance=host-2", "job=vmagent"},
|
||||
},
|
||||
want: "/api/v1/import?extra_label=instance=host-2&extra_label=job=vmagent",
|
||||
},
|
||||
{
|
||||
name: "ok two extra with exist param",
|
||||
args: args{
|
||||
path: "/api/v1/import?timeout=50",
|
||||
extraLabels: []string{"instance=host-2", "job=vmagent"},
|
||||
},
|
||||
want: "/api/v1/import?timeout=50&extra_label=instance=host-2&extra_label=job=vmagent",
|
||||
},
|
||||
{
|
||||
name: "bad incorrect format for extra label",
|
||||
args: args{
|
||||
path: "/api/v1/import",
|
||||
extraLabels: []string{"label=value", "bad_label_wo_value"},
|
||||
},
|
||||
want: "/api/v1/import",
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := AddExtraLabelsToImportPath(tt.args.path, tt.args.extraLabels)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("AddExtraLabelsToImportPath() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if got != tt.want {
|
||||
t.Errorf("AddExtraLabelsToImportPath() got = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
143
app/vmctl/vm_native.go
Normal file
143
app/vmctl/vm_native.go
Normal file
|
@ -0,0 +1,143 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/vm"
|
||||
"github.com/cheggaaa/pb/v3"
|
||||
)
|
||||
|
||||
type vmNativeProcessor struct {
|
||||
filter filter
|
||||
|
||||
dst *vmNativeClient
|
||||
src *vmNativeClient
|
||||
}
|
||||
|
||||
type vmNativeClient struct {
|
||||
addr string
|
||||
user string
|
||||
password string
|
||||
extraLabels []string
|
||||
}
|
||||
|
||||
type filter struct {
|
||||
match string
|
||||
timeStart string
|
||||
timeEnd string
|
||||
}
|
||||
|
||||
func (f filter) String() string {
|
||||
s := fmt.Sprintf("\n\tfilter: match[]=%s", f.match)
|
||||
if f.timeStart != "" {
|
||||
s += fmt.Sprintf("\n\tstart: %s", f.timeStart)
|
||||
}
|
||||
if f.timeEnd != "" {
|
||||
s += fmt.Sprintf("\n\tend: %s", f.timeEnd)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
const (
|
||||
nativeExportAddr = "api/v1/export/native"
|
||||
nativeImportAddr = "api/v1/import/native"
|
||||
|
||||
barTpl = `Total: {{counters . }} {{ cycle . "↖" "↗" "↘" "↙" }} Speed: {{speed . }} {{string . "suffix"}}`
|
||||
)
|
||||
|
||||
func (p *vmNativeProcessor) run() error {
|
||||
pr, pw := io.Pipe()
|
||||
|
||||
fmt.Printf("Initing export pipe from %q with filters: %s\n", p.src.addr, p.filter)
|
||||
exportReader, err := p.exportPipe()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to init export pipe: %s", err)
|
||||
}
|
||||
|
||||
sync := make(chan struct{})
|
||||
nativeImportAddr, err := vm.AddExtraLabelsToImportPath(nativeImportAddr, p.dst.extraLabels)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
go func() {
|
||||
defer func() { close(sync) }()
|
||||
u := fmt.Sprintf("%s/%s", p.dst.addr, nativeImportAddr)
|
||||
req, err := http.NewRequest("POST", u, pr)
|
||||
if err != nil {
|
||||
log.Fatalf("cannot create import request to %q: %s", p.dst.addr, err)
|
||||
}
|
||||
importResp, err := p.dst.do(req, http.StatusNoContent)
|
||||
if err != nil {
|
||||
log.Fatalf("import request failed: %s", err)
|
||||
}
|
||||
if err := importResp.Body.Close(); err != nil {
|
||||
log.Fatalf("cannot close import response body: %s", err)
|
||||
}
|
||||
}()
|
||||
|
||||
fmt.Printf("Initing import process to %q:\n", p.dst.addr)
|
||||
bar := pb.ProgressBarTemplate(barTpl).Start64(0)
|
||||
barReader := bar.NewProxyReader(exportReader)
|
||||
|
||||
_, err = io.Copy(pw, barReader)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to write into %q: %s", p.dst.addr, err)
|
||||
}
|
||||
if err := pw.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
<-sync
|
||||
|
||||
bar.Finish()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *vmNativeProcessor) exportPipe() (io.ReadCloser, error) {
|
||||
u := fmt.Sprintf("%s/%s", p.src.addr, nativeExportAddr)
|
||||
req, err := http.NewRequest("GET", u, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot create request to %q: %s", p.src.addr, err)
|
||||
}
|
||||
|
||||
params := req.URL.Query()
|
||||
params.Set("match[]", p.filter.match)
|
||||
if p.filter.timeStart != "" {
|
||||
params.Set("start", p.filter.timeStart)
|
||||
}
|
||||
if p.filter.timeEnd != "" {
|
||||
params.Set("end", p.filter.timeEnd)
|
||||
}
|
||||
req.URL.RawQuery = params.Encode()
|
||||
|
||||
// disable compression since it is meaningless for native format
|
||||
req.Header.Set("Accept-Encoding", "identity")
|
||||
resp, err := p.src.do(req, http.StatusOK)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("export request failed: %s", err)
|
||||
}
|
||||
return resp.Body, nil
|
||||
}
|
||||
|
||||
func (c *vmNativeClient) do(req *http.Request, expSC int) (*http.Response, error) {
|
||||
if c.user != "" {
|
||||
req.SetBasicAuth(c.user, c.password)
|
||||
}
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unexpected error when performing request: %s", err)
|
||||
}
|
||||
|
||||
if resp.StatusCode != expSC {
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read response body for status code %d: %s", resp.StatusCode, err)
|
||||
}
|
||||
return nil, fmt.Errorf("unexpected response code %d: %s", resp.StatusCode, string(body))
|
||||
}
|
||||
return resp, err
|
||||
}
|
|
@ -7,7 +7,7 @@ Restore process can be interrupted at any time. It is automatically resumed from
|
|||
when restarting `vmrestore` with the same args.
|
||||
|
||||
|
||||
### Usage
|
||||
## Usage
|
||||
|
||||
VictoriaMetrics must be stopped during the restore process.
|
||||
|
||||
|
@ -25,13 +25,13 @@ The original `-storageDataPath` directory may contain old files. They will be su
|
|||
i.e. the end result would be similar to [rsync --delete](https://askubuntu.com/questions/476041/how-do-i-make-rsync-delete-files-that-have-been-deleted-from-the-source-folder).
|
||||
|
||||
|
||||
### Troubleshooting
|
||||
## Troubleshooting
|
||||
|
||||
* If `vmrestore` eats all the network bandwidth, then set `-maxBytesPerSecond` to the desired value.
|
||||
* If `vmrestore` has been interrupted due to temporary error, then just restart it with the same args. It will resume the restore process.
|
||||
|
||||
|
||||
### Advanced usage
|
||||
## Advanced usage
|
||||
|
||||
* Obtaining credentials from a file.
|
||||
|
||||
|
@ -118,24 +118,24 @@ i.e. the end result would be similar to [rsync --delete](https://askubuntu.com/q
|
|||
```
|
||||
|
||||
|
||||
### How to build from sources
|
||||
## How to build from sources
|
||||
|
||||
It is recommended using [binary releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) - see `vmutils-*` archives there.
|
||||
|
||||
|
||||
#### Development build
|
||||
### Development build
|
||||
|
||||
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.13.
|
||||
2. Run `make vmrestore` from the root folder of the repository.
|
||||
It builds `vmrestore` binary and puts it into the `bin` folder.
|
||||
|
||||
#### Production build
|
||||
### Production build
|
||||
|
||||
1. [Install docker](https://docs.docker.com/install/).
|
||||
2. Run `make vmrestore-prod` from the root folder of the repository.
|
||||
It builds `vmrestore-prod` binary and puts it into the `bin` folder.
|
||||
|
||||
#### Building docker images
|
||||
### Building docker images
|
||||
|
||||
Run `make package-vmrestore`. It builds `victoriametrics/vmrestore:<PKG_TAG>` docker image locally.
|
||||
`<PKG_TAG>` is auto-generated image tag, which depends on source code in the repository.
|
||||
|
|
417
app/vmselect/graphiteql/lexer.go
Normal file
417
app/vmselect/graphiteql/lexer.go
Normal file
|
@ -0,0 +1,417 @@
|
|||
package graphiteql
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type lexer struct {
|
||||
// Token contains the currently parsed token.
|
||||
// An empty token means EOF.
|
||||
Token string
|
||||
|
||||
sOrig string
|
||||
sTail string
|
||||
|
||||
err error
|
||||
}
|
||||
|
||||
func (lex *lexer) Context() string {
|
||||
return fmt.Sprintf("%s%s", lex.Token, lex.sTail)
|
||||
}
|
||||
|
||||
func (lex *lexer) Init(s string) {
|
||||
lex.Token = ""
|
||||
|
||||
lex.sOrig = s
|
||||
lex.sTail = s
|
||||
|
||||
lex.err = nil
|
||||
}
|
||||
|
||||
func (lex *lexer) Next() error {
|
||||
if lex.err != nil {
|
||||
return lex.err
|
||||
}
|
||||
token, err := lex.next()
|
||||
if err != nil {
|
||||
lex.err = err
|
||||
return err
|
||||
}
|
||||
lex.Token = token
|
||||
return nil
|
||||
}
|
||||
|
||||
func (lex *lexer) next() (string, error) {
|
||||
// Skip whitespace
|
||||
s := lex.sTail
|
||||
i := 0
|
||||
for i < len(s) && isSpaceChar(s[i]) {
|
||||
i++
|
||||
}
|
||||
s = s[i:]
|
||||
lex.sTail = s
|
||||
|
||||
if len(s) == 0 {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
var token string
|
||||
var err error
|
||||
switch s[0] {
|
||||
case '(', ')', ',', '|', '=', '+', '-':
|
||||
token = s[:1]
|
||||
goto tokenFoundLabel
|
||||
}
|
||||
if isStringPrefix(s) {
|
||||
token, err = scanString(s)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
goto tokenFoundLabel
|
||||
}
|
||||
if isPositiveNumberPrefix(s) {
|
||||
token, err = scanPositiveNumber(s)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
goto tokenFoundLabel
|
||||
}
|
||||
token, err = scanIdent(s)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
tokenFoundLabel:
|
||||
lex.sTail = s[len(token):]
|
||||
return token, nil
|
||||
}
|
||||
|
||||
func scanString(s string) (string, error) {
|
||||
if len(s) < 2 {
|
||||
return "", fmt.Errorf("cannot find end of string in %q", s)
|
||||
}
|
||||
|
||||
quote := s[0]
|
||||
i := 1
|
||||
for {
|
||||
n := strings.IndexByte(s[i:], quote)
|
||||
if n < 0 {
|
||||
return "", fmt.Errorf("cannot find closing quote %c for the string %q", quote, s)
|
||||
}
|
||||
i += n
|
||||
bs := 0
|
||||
for bs < i && s[i-bs-1] == '\\' {
|
||||
bs++
|
||||
}
|
||||
if bs%2 == 0 {
|
||||
token := s[:i+1]
|
||||
return token, nil
|
||||
}
|
||||
i++
|
||||
}
|
||||
}
|
||||
|
||||
func scanPositiveNumber(s string) (string, error) {
|
||||
// Scan integer part. It may be empty if fractional part exists.
|
||||
i := 0
|
||||
skipChars, isHex := scanSpecialIntegerPrefix(s)
|
||||
i += skipChars
|
||||
if isHex {
|
||||
// Scan integer hex number
|
||||
for i < len(s) && isHexChar(s[i]) {
|
||||
i++
|
||||
}
|
||||
if i == skipChars {
|
||||
return "", fmt.Errorf("number cannot be empty")
|
||||
}
|
||||
return s[:i], nil
|
||||
}
|
||||
for i < len(s) && isDecimalChar(s[i]) {
|
||||
i++
|
||||
}
|
||||
|
||||
if i == len(s) {
|
||||
if i == skipChars {
|
||||
return "", fmt.Errorf("number cannot be empty")
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
if s[i] != '.' && s[i] != 'e' && s[i] != 'E' {
|
||||
return s[:i], nil
|
||||
}
|
||||
|
||||
if s[i] == '.' {
|
||||
// Scan fractional part. It cannot be empty.
|
||||
i++
|
||||
j := i
|
||||
for j < len(s) && isDecimalChar(s[j]) {
|
||||
j++
|
||||
}
|
||||
if j == i {
|
||||
return "", fmt.Errorf("missing fractional part in %q", s)
|
||||
}
|
||||
i = j
|
||||
if i == len(s) {
|
||||
return s, nil
|
||||
}
|
||||
}
|
||||
|
||||
if s[i] != 'e' && s[i] != 'E' {
|
||||
return s[:i], nil
|
||||
}
|
||||
i++
|
||||
|
||||
// Scan exponent part.
|
||||
if i == len(s) {
|
||||
return "", fmt.Errorf("missing exponent part in %q", s)
|
||||
}
|
||||
if s[i] == '-' || s[i] == '+' {
|
||||
i++
|
||||
}
|
||||
j := i
|
||||
for j < len(s) && isDecimalChar(s[j]) {
|
||||
j++
|
||||
}
|
||||
if j == i {
|
||||
return "", fmt.Errorf("missing exponent part in %q", s)
|
||||
}
|
||||
return s[:j], nil
|
||||
}
|
||||
|
||||
func scanIdent(s string) (string, error) {
|
||||
i := 0
|
||||
for i < len(s) {
|
||||
switch s[i] {
|
||||
case '\\':
|
||||
// Skip the next char, since it is escaped
|
||||
i += 2
|
||||
if i > len(s) {
|
||||
return "", fmt.Errorf("missing escaped char in the end of %q", s)
|
||||
}
|
||||
case '[':
|
||||
n := strings.IndexByte(s[i+1:], ']')
|
||||
if n < 0 {
|
||||
return "", fmt.Errorf("missing `]` char in %q", s)
|
||||
}
|
||||
i += n + 2
|
||||
case '{':
|
||||
n := strings.IndexByte(s[i+1:], '}')
|
||||
if n < 0 {
|
||||
return "", fmt.Errorf("missing '}' char in %q", s)
|
||||
}
|
||||
i += n + 2
|
||||
case '*', '.':
|
||||
i++
|
||||
default:
|
||||
if !isIdentChar(s[i]) {
|
||||
goto end
|
||||
}
|
||||
i++
|
||||
}
|
||||
}
|
||||
end:
|
||||
if i == 0 {
|
||||
return "", fmt.Errorf("cannot find a single ident char in %q", s)
|
||||
}
|
||||
return s[:i], nil
|
||||
}
|
||||
|
||||
func unescapeIdent(s string) string {
|
||||
n := strings.IndexByte(s, '\\')
|
||||
if n < 0 {
|
||||
return s
|
||||
}
|
||||
dst := make([]byte, 0, len(s))
|
||||
for {
|
||||
dst = append(dst, s[:n]...)
|
||||
s = s[n+1:]
|
||||
if len(s) == 0 {
|
||||
return string(dst)
|
||||
}
|
||||
if s[0] == 'x' && len(s) >= 3 {
|
||||
h1 := fromHex(s[1])
|
||||
h2 := fromHex(s[2])
|
||||
if h1 >= 0 && h2 >= 0 {
|
||||
dst = append(dst, byte((h1<<4)|h2))
|
||||
s = s[3:]
|
||||
} else {
|
||||
dst = append(dst, s[0])
|
||||
s = s[1:]
|
||||
}
|
||||
} else {
|
||||
dst = append(dst, s[0])
|
||||
s = s[1:]
|
||||
}
|
||||
n = strings.IndexByte(s, '\\')
|
||||
if n < 0 {
|
||||
dst = append(dst, s...)
|
||||
return string(dst)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func fromHex(ch byte) int {
|
||||
if ch >= '0' && ch <= '9' {
|
||||
return int(ch - '0')
|
||||
}
|
||||
if ch >= 'a' && ch <= 'f' {
|
||||
return int((ch - 'a') + 10)
|
||||
}
|
||||
if ch >= 'A' && ch <= 'F' {
|
||||
return int((ch - 'A') + 10)
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
func toHex(n byte) byte {
|
||||
if n < 10 {
|
||||
return '0' + n
|
||||
}
|
||||
return 'a' + (n - 10)
|
||||
}
|
||||
|
||||
func isMetricExprChar(ch byte) bool {
|
||||
switch ch {
|
||||
case '.', '*', '[', ']', '{', '}', ',':
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func appendEscapedIdent(dst []byte, s string) []byte {
|
||||
for i := 0; i < len(s); i++ {
|
||||
ch := s[i]
|
||||
if isIdentChar(ch) || isMetricExprChar(ch) {
|
||||
if i == 0 && !isFirstIdentChar(ch) {
|
||||
// hex-encode the first char
|
||||
dst = append(dst, '\\', 'x', toHex(ch>>4), toHex(ch&0xf))
|
||||
} else {
|
||||
dst = append(dst, ch)
|
||||
}
|
||||
} else if ch >= 0x20 && ch < 0x7f {
|
||||
// Leave ASCII printable chars as is
|
||||
dst = append(dst, '\\', ch)
|
||||
} else {
|
||||
// hex-encode non-printable chars
|
||||
dst = append(dst, '\\', 'x', toHex(ch>>4), toHex(ch&0xf))
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
func isEOF(s string) bool {
|
||||
return len(s) == 0
|
||||
}
|
||||
|
||||
func isBool(s string) bool {
|
||||
s = strings.ToLower(s)
|
||||
return s == "true" || s == "false"
|
||||
}
|
||||
|
||||
func isStringPrefix(s string) bool {
|
||||
if len(s) == 0 {
|
||||
return false
|
||||
}
|
||||
switch s[0] {
|
||||
case '"', '\'':
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func isPositiveNumberPrefix(s string) bool {
|
||||
if len(s) == 0 {
|
||||
return false
|
||||
}
|
||||
if isDecimalChar(s[0]) {
|
||||
return true
|
||||
}
|
||||
|
||||
// Check for .234 numbers
|
||||
if s[0] != '.' || len(s) < 2 {
|
||||
return false
|
||||
}
|
||||
return isDecimalChar(s[1])
|
||||
}
|
||||
|
||||
func isSpecialIntegerPrefix(s string) bool {
|
||||
skipChars, _ := scanSpecialIntegerPrefix(s)
|
||||
return skipChars > 0
|
||||
}
|
||||
|
||||
func scanSpecialIntegerPrefix(s string) (skipChars int, isHex bool) {
|
||||
if len(s) < 1 || s[0] != '0' {
|
||||
return 0, false
|
||||
}
|
||||
s = strings.ToLower(s[1:])
|
||||
if len(s) == 0 {
|
||||
return 0, false
|
||||
}
|
||||
if isDecimalChar(s[0]) {
|
||||
// octal number: 0123
|
||||
return 1, false
|
||||
}
|
||||
if s[0] == 'x' {
|
||||
// 0x
|
||||
return 2, true
|
||||
}
|
||||
if s[0] == 'o' || s[0] == 'b' {
|
||||
// 0x, 0o or 0b prefix
|
||||
return 2, false
|
||||
}
|
||||
return 0, false
|
||||
}
|
||||
|
||||
func isDecimalChar(ch byte) bool {
|
||||
return ch >= '0' && ch <= '9'
|
||||
}
|
||||
|
||||
func isHexChar(ch byte) bool {
|
||||
return isDecimalChar(ch) || ch >= 'a' && ch <= 'f' || ch >= 'A' && ch <= 'F'
|
||||
}
|
||||
|
||||
func isIdentPrefix(s string) bool {
|
||||
if len(s) == 0 {
|
||||
return false
|
||||
}
|
||||
if s[0] == '\\' {
|
||||
// Assume this is an escape char for the next char.
|
||||
return true
|
||||
}
|
||||
return isFirstIdentChar(s[0])
|
||||
}
|
||||
|
||||
func isFirstIdentChar(ch byte) bool {
|
||||
if !isIdentChar(ch) {
|
||||
return false
|
||||
}
|
||||
if isDecimalChar(ch) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func isIdentChar(ch byte) bool {
|
||||
if ch >= 'a' && ch <= 'z' || ch >= 'A' && ch <= 'Z' {
|
||||
return true
|
||||
}
|
||||
if isDecimalChar(ch) {
|
||||
return true
|
||||
}
|
||||
switch ch {
|
||||
case '-', '_', '$', ':', '*', '{', '[':
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func isSpaceChar(ch byte) bool {
|
||||
switch ch {
|
||||
case ' ', '\t', '\n', '\v', '\f', '\r':
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
151
app/vmselect/graphiteql/lexer_test.go
Normal file
151
app/vmselect/graphiteql/lexer_test.go
Normal file
|
@ -0,0 +1,151 @@
|
|||
package graphiteql
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestScanStringSuccess(t *testing.T) {
|
||||
f := func(s, sExpected string) {
|
||||
t.Helper()
|
||||
result, err := scanString(s)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error in scanString(%s): %s", s, err)
|
||||
}
|
||||
if result != sExpected {
|
||||
t.Fatalf("unexpected string scanned from %s; got %s; want %s", s, result, sExpected)
|
||||
}
|
||||
if !strings.HasPrefix(s, result) {
|
||||
t.Fatalf("invalid prefix for scanne string %s: %s", s, result)
|
||||
}
|
||||
}
|
||||
f(`""`, `""`)
|
||||
f(`''`, `''`)
|
||||
f(`""tail`, `""`)
|
||||
f(`''tail`, `''`)
|
||||
f(`"foo", bar`, `"foo"`)
|
||||
f(`'foo', bar`, `'foo'`)
|
||||
f(`"foo\.bar"`, `"foo\.bar"`)
|
||||
f(`"foo\"bar\1"\"`, `"foo\"bar\1"`)
|
||||
f(`"foo\\"bar\1"\"`, `"foo\\"`)
|
||||
f(`'foo\\'bar\1"\"`, `'foo\\'`)
|
||||
}
|
||||
|
||||
func TestScanStringFailure(t *testing.T) {
|
||||
f := func(s string) {
|
||||
t.Helper()
|
||||
result, err := scanString(s)
|
||||
if err == nil {
|
||||
t.Fatalf("expecting non-nil error for scanString(%s)", s)
|
||||
}
|
||||
if result != "" {
|
||||
t.Fatalf("expecting empty result for scanString(%s); got %s", s, result)
|
||||
}
|
||||
}
|
||||
f(``)
|
||||
f(`"foo`)
|
||||
f(`'bar`)
|
||||
}
|
||||
|
||||
func TestAppendEscapedIdent(t *testing.T) {
|
||||
f := func(s, resultExpected string) {
|
||||
t.Helper()
|
||||
result := appendEscapedIdent(nil, s)
|
||||
if string(result) != resultExpected {
|
||||
t.Fatalf("unexpected result; got\n%s\nwant\n%s", result, resultExpected)
|
||||
}
|
||||
}
|
||||
f("", "")
|
||||
f("a", "a")
|
||||
f("fo_o.$b-ar.b[a]*z{aa,bb}", "fo_o.$b-ar.b[a]*z{aa,bb}")
|
||||
f("a(b =C)", `a\(b\ \=C\)`)
|
||||
}
|
||||
|
||||
func TestLexerSuccess(t *testing.T) {
|
||||
f := func(s string, tokensExpected []string) {
|
||||
t.Helper()
|
||||
var lex lexer
|
||||
var tokens []string
|
||||
lex.Init(s)
|
||||
for {
|
||||
if err := lex.Next(); err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
if isEOF(lex.Token) {
|
||||
break
|
||||
}
|
||||
tokens = append(tokens, lex.Token)
|
||||
}
|
||||
if !reflect.DeepEqual(tokens, tokensExpected) {
|
||||
t.Fatalf("unexpected tokens; got\n%q\nwant\n%q", tokens, tokensExpected)
|
||||
}
|
||||
}
|
||||
f("", nil)
|
||||
f("a", []string{"a"})
|
||||
f("*", []string{"*"})
|
||||
f("*.a", []string{"*.a"})
|
||||
f("[a-z]zx", []string{"[a-z]zx"})
|
||||
f("fo_o.$ba-r", []string{"fo_o.$ba-r"})
|
||||
f("{foo,bar}", []string{"{foo,bar}"})
|
||||
f("[a-z]", []string{"[a-z]"})
|
||||
f("fo*.bar10[s-z]as{aaa,bb}.ss{aa*ss,DS[c-D],ss}ss", []string{"fo*.bar10[s-z]as{aaa,bb}.ss{aa*ss,DS[c-D],ss}ss"})
|
||||
f("FOO.bar:avg", []string{"FOO.bar:avg"})
|
||||
f("FOO.bar\\:avg", []string{"FOO.bar\\:avg"})
|
||||
f(`foo.Bar|aaa(b,cd,e=aa)`, []string{"foo.Bar", "|", "aaa", "(", "b", ",", "cd", ",", "e", "=", "aa", ")"})
|
||||
f(`foo.Bar\|aaa\(b\,cd\,e\=aa\)`, []string{`foo.Bar\|aaa\(b\,cd\,e\=aa\)`})
|
||||
f(`123`, []string{`123`})
|
||||
f(`12.34`, []string{`12.34`})
|
||||
f(`12.34e4`, []string{`12.34e4`})
|
||||
f(`12.34e-4`, []string{`12.34e-4`})
|
||||
f(`12E+45`, []string{`12E+45`})
|
||||
f(`+12.34`, []string{`+`, `12.34`})
|
||||
f("0xABcd", []string{`0xABcd`})
|
||||
f("f(0o765,0b1101,0734,12.34)", []string{"f", "(", "0o765", ",", "0b1101", ",", "0734", ",", "12.34", ")"})
|
||||
f(`f ( foo, -.54e6,bar)`, []string{"f", "(", "foo", ",", "-", ".54e6", ",", "bar", ")"})
|
||||
f(`"foo(b'ar:baz)"`, []string{`"foo(b'ar:baz)"`})
|
||||
f(`'a"bc'`, []string{`'a"bc'`})
|
||||
f(`"f\"oo\\b"`, []string{`"f\"oo\\b"`})
|
||||
f(`a("b,c", 'de')`, []string{`a`, `(`, `"b,c"`, `,`, `'de'`, `)`})
|
||||
}
|
||||
|
||||
func TestLexerError(t *testing.T) {
|
||||
f := func(s string) {
|
||||
t.Helper()
|
||||
var lex lexer
|
||||
lex.Init(s)
|
||||
for {
|
||||
if err := lex.Next(); err != nil {
|
||||
// Make sure lex.Next() consistently returns the error.
|
||||
if err1 := lex.Next(); err1 != err {
|
||||
t.Fatalf("unexpected error returned; got %v; want %v", err1, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
if isEOF(lex.Token) {
|
||||
t.Fatalf("expecting non-nil error when parsing %q", s)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Invalid identifier
|
||||
f("foo\\")
|
||||
f(`foo[bar`)
|
||||
f(`foo{bar`)
|
||||
f("~")
|
||||
f(",~")
|
||||
|
||||
// Invalid string
|
||||
f(`"`)
|
||||
f(`"foo`)
|
||||
f(`'aa`)
|
||||
|
||||
// Invalid number
|
||||
f(`0x`)
|
||||
f(`0o`)
|
||||
f(`-0b`)
|
||||
f(`13.`)
|
||||
f(`1e`)
|
||||
f(`1E+`)
|
||||
f(`1.3e`)
|
||||
}
|
407
app/vmselect/graphiteql/parser.go
Normal file
407
app/vmselect/graphiteql/parser.go
Normal file
|
@ -0,0 +1,407 @@
|
|||
package graphiteql
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type parser struct {
|
||||
lex lexer
|
||||
}
|
||||
|
||||
// Expr is Graphite expression for render API.
|
||||
type Expr interface {
|
||||
// AppendString appends Expr contents to dst and returns the result.
|
||||
AppendString(dst []byte) []byte
|
||||
}
|
||||
|
||||
// Parse parses Graphite render API target expression.
|
||||
//
|
||||
// See https://graphite.readthedocs.io/en/stable/render_api.html
|
||||
func Parse(s string) (Expr, error) {
|
||||
var p parser
|
||||
p.lex.Init(s)
|
||||
if err := p.lex.Next(); err != nil {
|
||||
return nil, fmt.Errorf("cannot parse target expression: %w; context: %q", err, p.lex.Context())
|
||||
}
|
||||
expr, err := p.parseExpr()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse target expression: %w; context: %q", err, p.lex.Context())
|
||||
}
|
||||
if !isEOF(p.lex.Token) {
|
||||
return nil, fmt.Errorf("unexpected tail left after parsing %q; context: %q", expr.AppendString(nil), p.lex.Context())
|
||||
}
|
||||
return expr, nil
|
||||
}
|
||||
|
||||
func (p *parser) parseExpr() (Expr, error) {
|
||||
var expr Expr
|
||||
var err error
|
||||
token := p.lex.Token
|
||||
switch {
|
||||
case isPositiveNumberPrefix(token) || token == "+" || token == "-":
|
||||
expr, err = p.parseNumber()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case isStringPrefix(token):
|
||||
expr, err = p.parseString()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case isIdentPrefix(token):
|
||||
expr, err = p.parseMetricExprOrFuncCall()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("unexpected token when parsing expression: %q", token)
|
||||
}
|
||||
|
||||
for {
|
||||
switch p.lex.Token {
|
||||
case "|":
|
||||
// Chained function call. For example, `metric|func`
|
||||
firstArg := &ArgExpr{
|
||||
Expr: expr,
|
||||
}
|
||||
expr, err = p.parseChainedFunc(firstArg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
continue
|
||||
default:
|
||||
return expr, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *parser) parseNumber() (*NumberExpr, error) {
|
||||
token := p.lex.Token
|
||||
isMinus := false
|
||||
if token == "-" || token == "+" {
|
||||
if err := p.lex.Next(); err != nil {
|
||||
return nil, fmt.Errorf("cannot find number after %q token: %w", token, err)
|
||||
}
|
||||
isMinus = token == "-"
|
||||
token = p.lex.Token
|
||||
}
|
||||
var n float64
|
||||
if isSpecialIntegerPrefix(token) {
|
||||
d, err := strconv.ParseInt(token, 0, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse integer %q: %w", token, err)
|
||||
}
|
||||
n = float64(d)
|
||||
} else {
|
||||
f, err := strconv.ParseFloat(token, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse floating-point number %q: %w", token, err)
|
||||
}
|
||||
n = f
|
||||
}
|
||||
if isMinus {
|
||||
n = -n
|
||||
}
|
||||
if err := p.lex.Next(); err != nil {
|
||||
return nil, fmt.Errorf("cannot find next token after %q: %w", token, err)
|
||||
}
|
||||
ne := &NumberExpr{
|
||||
N: n,
|
||||
}
|
||||
return ne, nil
|
||||
}
|
||||
|
||||
// NoneExpr contains None value
|
||||
type NoneExpr struct{}
|
||||
|
||||
// AppendString appends string representation of nne to dst and returns the result.
|
||||
func (nne *NoneExpr) AppendString(dst []byte) []byte {
|
||||
return append(dst, "None"...)
|
||||
}
|
||||
|
||||
// BoolExpr contains bool value (True or False).
|
||||
type BoolExpr struct {
|
||||
// B is bool value
|
||||
B bool
|
||||
}
|
||||
|
||||
// AppendString appends string representation of be to dst and returns the result.
|
||||
func (be *BoolExpr) AppendString(dst []byte) []byte {
|
||||
if be.B {
|
||||
return append(dst, "True"...)
|
||||
}
|
||||
return append(dst, "False"...)
|
||||
}
|
||||
|
||||
// NumberExpr contains float64 constant.
|
||||
type NumberExpr struct {
|
||||
// N is float64 constant
|
||||
N float64
|
||||
}
|
||||
|
||||
// AppendString appends string representation of ne to dst and returns the result.
|
||||
func (ne *NumberExpr) AppendString(dst []byte) []byte {
|
||||
return strconv.AppendFloat(dst, ne.N, 'g', -1, 64)
|
||||
}
|
||||
|
||||
func (p *parser) parseString() (*StringExpr, error) {
|
||||
token := p.lex.Token
|
||||
if len(token) < 2 || token[0] != token[len(token)-1] {
|
||||
return nil, fmt.Errorf(`string literal contains unexpected trailing char; got %q`, token)
|
||||
}
|
||||
quote := string(append([]byte{}, token[0]))
|
||||
s := token[1 : len(token)-1]
|
||||
s = strings.ReplaceAll(s, `\`+quote, quote)
|
||||
s = strings.ReplaceAll(s, `\\`, `\`)
|
||||
if err := p.lex.Next(); err != nil {
|
||||
return nil, fmt.Errorf("cannot find next token after %s: %w", token, err)
|
||||
}
|
||||
se := &StringExpr{
|
||||
S: s,
|
||||
}
|
||||
return se, nil
|
||||
}
|
||||
|
||||
// StringExpr represents string contant.
|
||||
type StringExpr struct {
|
||||
// S contains unquoted string contents.
|
||||
S string
|
||||
}
|
||||
|
||||
// AppendString appends se to dst and returns the result.
|
||||
func (se *StringExpr) AppendString(dst []byte) []byte {
|
||||
dst = append(dst, '\'')
|
||||
s := strings.ReplaceAll(se.S, `\`, `\\`)
|
||||
s = strings.ReplaceAll(s, `'`, `\'`)
|
||||
dst = append(dst, s...)
|
||||
dst = append(dst, '\'')
|
||||
return dst
|
||||
}
|
||||
|
||||
// QuoteString quotes s, so it could be used in Graphite queries.
|
||||
func QuoteString(s string) string {
|
||||
se := &StringExpr{
|
||||
S: s,
|
||||
}
|
||||
return string(se.AppendString(nil))
|
||||
}
|
||||
|
||||
func (p *parser) parseMetricExprOrFuncCall() (Expr, error) {
|
||||
token := p.lex.Token
|
||||
ident := unescapeIdent(token)
|
||||
if err := p.lex.Next(); err != nil {
|
||||
return nil, fmt.Errorf("cannot find next token after %q: %w", token, err)
|
||||
}
|
||||
token = p.lex.Token
|
||||
switch token {
|
||||
case "(":
|
||||
// Function call. For example, `func(foo,bar)`
|
||||
funcName := ident
|
||||
args, err := p.parseArgs()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse args for function %q: %w", funcName, err)
|
||||
}
|
||||
fe := &FuncExpr{
|
||||
FuncName: funcName,
|
||||
Args: args,
|
||||
printState: printStateNormal,
|
||||
}
|
||||
return fe, nil
|
||||
default:
|
||||
// Metric epxression or bool expression or None.
|
||||
if isBool(ident) {
|
||||
be := &BoolExpr{
|
||||
B: strings.ToLower(ident) == "true",
|
||||
}
|
||||
return be, nil
|
||||
}
|
||||
if strings.ToLower(ident) == "none" {
|
||||
nne := &NoneExpr{}
|
||||
return nne, nil
|
||||
}
|
||||
me := &MetricExpr{
|
||||
Query: ident,
|
||||
}
|
||||
return me, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (p *parser) parseChainedFunc(firstArg *ArgExpr) (*FuncExpr, error) {
|
||||
for {
|
||||
if err := p.lex.Next(); err != nil {
|
||||
return nil, fmt.Errorf("cannot find function name after %q|: %w", firstArg.AppendString(nil), err)
|
||||
}
|
||||
if !isIdentPrefix(p.lex.Token) {
|
||||
return nil, fmt.Errorf("expecting function name after %q|, got %q", firstArg.AppendString(nil), p.lex.Token)
|
||||
}
|
||||
funcName := unescapeIdent(p.lex.Token)
|
||||
if err := p.lex.Next(); err != nil {
|
||||
return nil, fmt.Errorf("cannot find next token after %q|%q: %w", firstArg.AppendString(nil), funcName, err)
|
||||
}
|
||||
fe := &FuncExpr{
|
||||
FuncName: funcName,
|
||||
printState: printStateChained,
|
||||
}
|
||||
if p.lex.Token != "(" {
|
||||
fe.Args = []*ArgExpr{firstArg}
|
||||
} else {
|
||||
args, err := p.parseArgs()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse args for %q|%q: %w", firstArg.AppendString(nil), funcName, err)
|
||||
}
|
||||
fe.Args = append([]*ArgExpr{firstArg}, args...)
|
||||
}
|
||||
if p.lex.Token != "|" {
|
||||
return fe, nil
|
||||
}
|
||||
firstArg = &ArgExpr{
|
||||
Expr: fe,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *parser) parseArgs() ([]*ArgExpr, error) {
|
||||
var args []*ArgExpr
|
||||
for {
|
||||
if err := p.lex.Next(); err != nil {
|
||||
return nil, fmt.Errorf("cannot find arg #%d: %w", len(args), err)
|
||||
}
|
||||
if p.lex.Token == ")" {
|
||||
if err := p.lex.Next(); err != nil {
|
||||
return nil, fmt.Errorf("cannot find next token after function args: %w", err)
|
||||
}
|
||||
return args, nil
|
||||
}
|
||||
expr, err := p.parseExpr()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse arg #%d: %w", len(args), err)
|
||||
}
|
||||
if p.lex.Token == "=" {
|
||||
// Named expression
|
||||
me, ok := expr.(*MetricExpr)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("expecting a name for named expression; got %q", expr.AppendString(nil))
|
||||
}
|
||||
argName := me.Query
|
||||
if err := p.lex.Next(); err != nil {
|
||||
return nil, fmt.Errorf("cannot find named value for %q: %w", argName, err)
|
||||
}
|
||||
argValue, err := p.parseExpr()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse named value for %q: %w", argName, err)
|
||||
}
|
||||
args = append(args, &ArgExpr{
|
||||
Name: argName,
|
||||
Expr: argValue,
|
||||
})
|
||||
} else {
|
||||
args = append(args, &ArgExpr{
|
||||
Expr: expr,
|
||||
})
|
||||
}
|
||||
switch p.lex.Token {
|
||||
case ",":
|
||||
// Continue parsing args
|
||||
case ")":
|
||||
// End of args
|
||||
if err := p.lex.Next(); err != nil {
|
||||
return nil, fmt.Errorf("cannot find next token after func args: %w", err)
|
||||
}
|
||||
return args, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unexpected token detected in func args: %q", p.lex.Token)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ArgExpr represents function arg (which may be named).
|
||||
type ArgExpr struct {
|
||||
// Name is named arg name. It is empty for positional arg.
|
||||
Name string
|
||||
|
||||
// Expr arg expression.
|
||||
Expr Expr
|
||||
}
|
||||
|
||||
// AppendString appends string representation of ae to dst and returns the result.
|
||||
func (ae *ArgExpr) AppendString(dst []byte) []byte {
|
||||
if ae.Name != "" {
|
||||
dst = appendEscapedIdent(dst, ae.Name)
|
||||
dst = append(dst, '=')
|
||||
}
|
||||
dst = ae.Expr.AppendString(dst)
|
||||
return dst
|
||||
}
|
||||
|
||||
// FuncExpr represents function call.
|
||||
type FuncExpr struct {
|
||||
// FuncName is the function name
|
||||
FuncName string
|
||||
|
||||
// Args is function args.
|
||||
Args []*ArgExpr
|
||||
|
||||
printState funcPrintState
|
||||
}
|
||||
|
||||
type funcPrintState int
|
||||
|
||||
const (
|
||||
// Normal func call: `func(arg1, ..., argN)`
|
||||
printStateNormal = funcPrintState(0)
|
||||
|
||||
// Chained func call: `arg1|func(arg2, ..., argN)`
|
||||
printStateChained = funcPrintState(1)
|
||||
)
|
||||
|
||||
// AppendString appends string representation of fe to dst and returns the result.
|
||||
func (fe *FuncExpr) AppendString(dst []byte) []byte {
|
||||
switch fe.printState {
|
||||
case printStateNormal:
|
||||
dst = appendEscapedIdent(dst, fe.FuncName)
|
||||
dst = appendArgsString(dst, fe.Args)
|
||||
case printStateChained:
|
||||
if len(fe.Args) == 0 {
|
||||
panic("BUG: chained func call must have at least a single arg")
|
||||
}
|
||||
firstArg := fe.Args[0]
|
||||
tailArgs := fe.Args[1:]
|
||||
if firstArg.Name != "" {
|
||||
panic("BUG: the first chained arg must have no name")
|
||||
}
|
||||
dst = firstArg.AppendString(dst)
|
||||
dst = append(dst, '|')
|
||||
dst = appendEscapedIdent(dst, fe.FuncName)
|
||||
if len(tailArgs) > 0 {
|
||||
dst = appendArgsString(dst, tailArgs)
|
||||
}
|
||||
default:
|
||||
panic(fmt.Sprintf("BUG: unexpected printState=%d", fe.printState))
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// MetricExpr represents metric expression.
|
||||
type MetricExpr struct {
|
||||
// Query is the query for fetching metrics.
|
||||
Query string
|
||||
}
|
||||
|
||||
// AppendString append string representation of me to dst and returns the result.
|
||||
func (me *MetricExpr) AppendString(dst []byte) []byte {
|
||||
return appendEscapedIdent(dst, me.Query)
|
||||
}
|
||||
|
||||
func appendArgsString(dst []byte, args []*ArgExpr) []byte {
|
||||
dst = append(dst, '(')
|
||||
for i, arg := range args {
|
||||
dst = arg.AppendString(dst)
|
||||
if i+1 < len(args) {
|
||||
dst = append(dst, ',')
|
||||
}
|
||||
}
|
||||
dst = append(dst, ')')
|
||||
return dst
|
||||
}
|
121
app/vmselect/graphiteql/parser_test.go
Normal file
121
app/vmselect/graphiteql/parser_test.go
Normal file
|
@ -0,0 +1,121 @@
|
|||
package graphiteql
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestQuoteString(t *testing.T) {
|
||||
f := func(s, qExpected string) {
|
||||
t.Helper()
|
||||
q := QuoteString(s)
|
||||
if q != qExpected {
|
||||
t.Fatalf("unexpected result from QuoteString(%q); got %s; want %s", s, q, qExpected)
|
||||
}
|
||||
}
|
||||
f(``, `''`)
|
||||
f(`foo`, `'foo'`)
|
||||
f(`f'o\ba"r`, `'f\'o\\ba"r'`)
|
||||
}
|
||||
|
||||
func TestParseSuccess(t *testing.T) {
|
||||
another := func(s, resultExpected string) {
|
||||
t.Helper()
|
||||
expr, err := Parse(s)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error when parsing %s: %s", s, err)
|
||||
}
|
||||
result := expr.AppendString(nil)
|
||||
if string(result) != resultExpected {
|
||||
t.Fatalf("unexpected result when marshaling %s;\ngot\n%s\nwant\n%s", s, result, resultExpected)
|
||||
}
|
||||
}
|
||||
same := func(s string) {
|
||||
t.Helper()
|
||||
another(s, s)
|
||||
}
|
||||
// Metric expressions
|
||||
same("a")
|
||||
same("foo.bar.baz")
|
||||
same("foo.bar.baz:aa:bb")
|
||||
same("fOO.*.b[a-z]R{aa*,bb}s_s.$aaa")
|
||||
same("*")
|
||||
same("*.foo")
|
||||
same("{x,y}.z")
|
||||
same("[x-zaBc]DeF")
|
||||
another(`\f\ oo`, `f\ oo`)
|
||||
another(`f\x1B\x3a`, `f\x1b:`)
|
||||
|
||||
// booleans
|
||||
same("True")
|
||||
same("False")
|
||||
another("true", "True")
|
||||
another("faLSe", "False")
|
||||
|
||||
// Numbers
|
||||
same("123")
|
||||
same("-123")
|
||||
another("+123", "123")
|
||||
same("12.3")
|
||||
same("-1.23")
|
||||
another("+1.23", "1.23")
|
||||
another("123e5", "1.23e+07")
|
||||
another("-123e5", "-1.23e+07")
|
||||
another("+123e5", "1.23e+07")
|
||||
another("1.23E5", "123000")
|
||||
another("-1.23e5", "-123000")
|
||||
another("+1.23e5", "123000")
|
||||
another("0xab", "171")
|
||||
another("0b1011101", "93")
|
||||
another("0O12345", "5349")
|
||||
|
||||
// strings
|
||||
another(`"foo'"`, `'foo\''`)
|
||||
same(`'fo\'o"b\\ar'`)
|
||||
another(`"f\\oo\.bar\1"`, `'f\\oo\\.bar\\1'`)
|
||||
|
||||
// function calls
|
||||
same("foo()")
|
||||
another("foo(bar,)", "foo(bar)")
|
||||
same("foo(bar,123,'baz')")
|
||||
another("foo(foo(bar), BAZ = xx ( 123, x))", `foo(foo(bar),BAZ=xx(123,x))`)
|
||||
|
||||
// chained functions
|
||||
another("foo | bar", "foo|bar")
|
||||
same("foo|bar|baz")
|
||||
same("foo(sss)|bar(aa)|xxx.ss")
|
||||
another(`foo|bar(1,"sdf")`, `foo|bar(1,'sdf')`)
|
||||
|
||||
// mix
|
||||
same(`f(a,xx=b|c|aa(124,'456'),aa=bb)`)
|
||||
}
|
||||
|
||||
func TestParseFailure(t *testing.T) {
|
||||
f := func(s string) {
|
||||
t.Helper()
|
||||
expr, err := Parse(s)
|
||||
if err == nil {
|
||||
t.Fatalf("expecting error when parsing %s", s)
|
||||
}
|
||||
if expr != nil {
|
||||
t.Fatalf("expecting nil expr when parsing %s; got %s", s, expr.AppendString(nil))
|
||||
}
|
||||
}
|
||||
f("")
|
||||
f("'asdf")
|
||||
f("foo bar")
|
||||
f("f(a")
|
||||
f("f(1.2.3")
|
||||
f("foo|bar(")
|
||||
f("+foo")
|
||||
f("-bar")
|
||||
f("123 '")
|
||||
f("f '")
|
||||
f("f|")
|
||||
f("f|'")
|
||||
f("f|123")
|
||||
f("f('")
|
||||
f("f(f()=123)")
|
||||
f("f(a=')")
|
||||
f("f(a=foo(")
|
||||
f("f()'")
|
||||
}
|
|
@ -54,10 +54,6 @@ func FederateHandler(startTime time.Time, w http.ResponseWriter, r *http.Request
|
|||
if err := r.ParseForm(); err != nil {
|
||||
return fmt.Errorf("cannot parse request form values: %w", err)
|
||||
}
|
||||
matches := r.Form["match[]"]
|
||||
if len(matches) == 0 {
|
||||
return fmt.Errorf("missing `match[]` arg")
|
||||
}
|
||||
lookbackDelta, err := getMaxLookback(r)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -77,7 +73,7 @@ func FederateHandler(startTime time.Time, w http.ResponseWriter, r *http.Request
|
|||
if start >= end {
|
||||
start = end - defaultStep
|
||||
}
|
||||
tagFilterss, err := getTagFilterssFromMatches(matches)
|
||||
tagFilterss, err := getTagFilterssFromRequest(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -123,15 +119,6 @@ func ExportCSVHandler(startTime time.Time, w http.ResponseWriter, r *http.Reques
|
|||
return fmt.Errorf("missing `format` arg; see https://victoriametrics.github.io/#how-to-export-csv-data")
|
||||
}
|
||||
fieldNames := strings.Split(format, ",")
|
||||
matches := r.Form["match[]"]
|
||||
if len(matches) == 0 {
|
||||
// Maintain backwards compatibility
|
||||
match := r.FormValue("match")
|
||||
if len(match) == 0 {
|
||||
return fmt.Errorf("missing `match[]` arg")
|
||||
}
|
||||
matches = []string{match}
|
||||
}
|
||||
start, err := searchutils.GetTime(r, "start", 0)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -141,7 +128,7 @@ func ExportCSVHandler(startTime time.Time, w http.ResponseWriter, r *http.Reques
|
|||
return err
|
||||
}
|
||||
deadline := searchutils.GetDeadlineForExport(r, startTime)
|
||||
tagFilterss, err := getTagFilterssFromMatches(matches)
|
||||
tagFilterss, err := getTagFilterssFromRequest(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -200,15 +187,6 @@ func ExportNativeHandler(startTime time.Time, w http.ResponseWriter, r *http.Req
|
|||
if err := r.ParseForm(); err != nil {
|
||||
return fmt.Errorf("cannot parse request form values: %w", err)
|
||||
}
|
||||
matches := r.Form["match[]"]
|
||||
if len(matches) == 0 {
|
||||
// Maintain backwards compatibility
|
||||
match := r.FormValue("match")
|
||||
if len(match) == 0 {
|
||||
return fmt.Errorf("missing `match[]` arg")
|
||||
}
|
||||
matches = []string{match}
|
||||
}
|
||||
start, err := searchutils.GetTime(r, "start", 0)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -218,7 +196,7 @@ func ExportNativeHandler(startTime time.Time, w http.ResponseWriter, r *http.Req
|
|||
return err
|
||||
}
|
||||
deadline := searchutils.GetDeadlineForExport(r, startTime)
|
||||
tagFilterss, err := getTagFilterssFromMatches(matches)
|
||||
tagFilterss, err := getTagFilterssFromRequest(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -282,14 +260,9 @@ func ExportHandler(startTime time.Time, w http.ResponseWriter, r *http.Request)
|
|||
if err := r.ParseForm(); err != nil {
|
||||
return fmt.Errorf("cannot parse request form values: %w", err)
|
||||
}
|
||||
matches := r.Form["match[]"]
|
||||
if len(matches) == 0 {
|
||||
// Maintain backwards compatibility
|
||||
match := r.FormValue("match")
|
||||
if len(match) == 0 {
|
||||
return fmt.Errorf("missing `match[]` arg")
|
||||
}
|
||||
matches = []string{match}
|
||||
matches, err := getMatchesFromRequest(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
start, err := searchutils.GetTime(r, "start", 0)
|
||||
if err != nil {
|
||||
|
@ -306,7 +279,11 @@ func ExportHandler(startTime time.Time, w http.ResponseWriter, r *http.Request)
|
|||
if start >= end {
|
||||
end = start + defaultStep
|
||||
}
|
||||
if err := exportHandler(w, matches, start, end, format, maxRowsPerLine, reduceMemUsage, deadline); err != nil {
|
||||
etf, err := getEnforcedTagFiltersFromRequest(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := exportHandler(w, matches, etf, start, end, format, maxRowsPerLine, reduceMemUsage, deadline); err != nil {
|
||||
return fmt.Errorf("error when exporting data for queries=%q on the time range (start=%d, end=%d): %w", matches, start, end, err)
|
||||
}
|
||||
exportDuration.UpdateDuration(startTime)
|
||||
|
@ -315,7 +292,7 @@ func ExportHandler(startTime time.Time, w http.ResponseWriter, r *http.Request)
|
|||
|
||||
var exportDuration = metrics.NewSummary(`vm_request_duration_seconds{path="/api/v1/export"}`)
|
||||
|
||||
func exportHandler(w http.ResponseWriter, matches []string, start, end int64, format string, maxRowsPerLine int, reduceMemUsage bool, deadline searchutils.Deadline) error {
|
||||
func exportHandler(w http.ResponseWriter, matches []string, etf []storage.TagFilter, start, end int64, format string, maxRowsPerLine int, reduceMemUsage bool, deadline searchutils.Deadline) error {
|
||||
writeResponseFunc := WriteExportStdResponse
|
||||
writeLineFunc := func(xb *exportBlock, resultsCh chan<- *quicktemplate.ByteBuffer) {
|
||||
bb := quicktemplate.AcquireByteBuffer()
|
||||
|
@ -372,6 +349,8 @@ func exportHandler(w http.ResponseWriter, matches []string, start, end int64, fo
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tagFilterss = addEnforcedFiltersToTagFilterss(tagFilterss, etf)
|
||||
|
||||
sq := storage.NewSearchQuery(start, end, tagFilterss)
|
||||
w.Header().Set("Content-Type", contentType)
|
||||
bw := bufferedwriter.Get(w)
|
||||
|
@ -465,18 +444,14 @@ func DeleteHandler(startTime time.Time, r *http.Request) error {
|
|||
if r.FormValue("start") != "" || r.FormValue("end") != "" {
|
||||
return fmt.Errorf("start and end aren't supported. Remove these args from the query in order to delete all the matching metrics")
|
||||
}
|
||||
matches := r.Form["match[]"]
|
||||
if len(matches) == 0 {
|
||||
return fmt.Errorf("missing `match[]` arg")
|
||||
}
|
||||
tagFilterss, err := getTagFilterssFromMatches(matches)
|
||||
tagFilterss, err := getTagFilterssFromRequest(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sq := storage.NewSearchQuery(0, 0, tagFilterss)
|
||||
deletedCount, err := netstorage.DeleteSeries(sq)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot delete time series matching %q: %w", matches, err)
|
||||
return fmt.Errorf("cannot delete time series: %w", err)
|
||||
}
|
||||
if deletedCount > 0 {
|
||||
promql.ResetRollupResultCache()
|
||||
|
@ -495,8 +470,12 @@ func LabelValuesHandler(startTime time.Time, labelName string, w http.ResponseWr
|
|||
if err := r.ParseForm(); err != nil {
|
||||
return fmt.Errorf("cannot parse form values: %w", err)
|
||||
}
|
||||
etf, err := getEnforcedTagFiltersFromRequest(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var labelValues []string
|
||||
if len(r.Form["match[]"]) == 0 {
|
||||
if len(r.Form["match[]"]) == 0 && len(etf) == 0 {
|
||||
if len(r.Form["start"]) == 0 && len(r.Form["end"]) == 0 {
|
||||
var err error
|
||||
labelValues, err = netstorage.GetLabelValues(labelName, deadline)
|
||||
|
@ -540,7 +519,7 @@ func LabelValuesHandler(startTime time.Time, labelName string, w http.ResponseWr
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
labelValues, err = labelValuesWithMatches(labelName, matches, start, end, deadline)
|
||||
labelValues, err = labelValuesWithMatches(labelName, matches, etf, start, end, deadline)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot obtain label values for %q, match[]=%q, start=%d, end=%d: %w", labelName, matches, start, end, err)
|
||||
}
|
||||
|
@ -557,10 +536,7 @@ func LabelValuesHandler(startTime time.Time, labelName string, w http.ResponseWr
|
|||
return nil
|
||||
}
|
||||
|
||||
func labelValuesWithMatches(labelName string, matches []string, start, end int64, deadline searchutils.Deadline) ([]string, error) {
|
||||
if len(matches) == 0 {
|
||||
logger.Panicf("BUG: matches must be non-empty")
|
||||
}
|
||||
func labelValuesWithMatches(labelName string, matches []string, etf []storage.TagFilter, start, end int64, deadline searchutils.Deadline) ([]string, error) {
|
||||
tagFilterss, err := getTagFilterssFromMatches(matches)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -581,6 +557,10 @@ func labelValuesWithMatches(labelName string, matches []string, start, end int64
|
|||
if start >= end {
|
||||
end = start + defaultStep
|
||||
}
|
||||
tagFilterss = addEnforcedFiltersToTagFilterss(tagFilterss, etf)
|
||||
if len(tagFilterss) == 0 {
|
||||
logger.Panicf("BUG: tagFilterss must be non-empty")
|
||||
}
|
||||
sq := storage.NewSearchQuery(start, end, tagFilterss)
|
||||
m := make(map[string]struct{})
|
||||
if end-start > 24*3600*1000 {
|
||||
|
@ -705,8 +685,12 @@ func LabelsHandler(startTime time.Time, w http.ResponseWriter, r *http.Request)
|
|||
if err := r.ParseForm(); err != nil {
|
||||
return fmt.Errorf("cannot parse form values: %w", err)
|
||||
}
|
||||
etf, err := getEnforcedTagFiltersFromRequest(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var labels []string
|
||||
if len(r.Form["match[]"]) == 0 {
|
||||
if len(r.Form["match[]"]) == 0 && len(etf) == 0 {
|
||||
if len(r.Form["start"]) == 0 && len(r.Form["end"]) == 0 {
|
||||
var err error
|
||||
labels, err = netstorage.GetLabels(deadline)
|
||||
|
@ -748,7 +732,7 @@ func LabelsHandler(startTime time.Time, w http.ResponseWriter, r *http.Request)
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
labels, err = labelsWithMatches(matches, start, end, deadline)
|
||||
labels, err = labelsWithMatches(matches, etf, start, end, deadline)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot obtain labels for match[]=%q, start=%d, end=%d: %w", matches, start, end, err)
|
||||
}
|
||||
|
@ -765,10 +749,7 @@ func LabelsHandler(startTime time.Time, w http.ResponseWriter, r *http.Request)
|
|||
return nil
|
||||
}
|
||||
|
||||
func labelsWithMatches(matches []string, start, end int64, deadline searchutils.Deadline) ([]string, error) {
|
||||
if len(matches) == 0 {
|
||||
logger.Panicf("BUG: matches must be non-empty")
|
||||
}
|
||||
func labelsWithMatches(matches []string, etf []storage.TagFilter, start, end int64, deadline searchutils.Deadline) ([]string, error) {
|
||||
tagFilterss, err := getTagFilterssFromMatches(matches)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -776,6 +757,10 @@ func labelsWithMatches(matches []string, start, end int64, deadline searchutils.
|
|||
if start >= end {
|
||||
end = start + defaultStep
|
||||
}
|
||||
tagFilterss = addEnforcedFiltersToTagFilterss(tagFilterss, etf)
|
||||
if len(tagFilterss) == 0 {
|
||||
logger.Panicf("BUG: tagFilterss must be non-empty")
|
||||
}
|
||||
sq := storage.NewSearchQuery(start, end, tagFilterss)
|
||||
m := make(map[string]struct{})
|
||||
if end-start > 24*3600*1000 {
|
||||
|
@ -849,10 +834,6 @@ func SeriesHandler(startTime time.Time, w http.ResponseWriter, r *http.Request)
|
|||
if err := r.ParseForm(); err != nil {
|
||||
return fmt.Errorf("cannot parse form values: %w", err)
|
||||
}
|
||||
matches := r.Form["match[]"]
|
||||
if len(matches) == 0 {
|
||||
return fmt.Errorf("missing `match[]` arg")
|
||||
}
|
||||
end, err := searchutils.GetTime(r, "end", ct)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -868,7 +849,7 @@ func SeriesHandler(startTime time.Time, w http.ResponseWriter, r *http.Request)
|
|||
}
|
||||
deadline := searchutils.GetDeadlineForQuery(r, startTime)
|
||||
|
||||
tagFilterss, err := getTagFilterssFromMatches(matches)
|
||||
tagFilterss, err := getTagFilterssFromRequest(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -969,6 +950,10 @@ func QueryHandler(startTime time.Time, w http.ResponseWriter, r *http.Request) e
|
|||
if len(query) > maxQueryLen.N {
|
||||
return fmt.Errorf("too long query; got %d bytes; mustn't exceed `-search.maxQueryLen=%d` bytes", len(query), maxQueryLen.N)
|
||||
}
|
||||
etf, err := getEnforcedTagFiltersFromRequest(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if childQuery, windowStr, offsetStr := promql.IsMetricSelectorWithRollup(query); childQuery != "" {
|
||||
window, err := parsePositiveDuration(windowStr, step)
|
||||
if err != nil {
|
||||
|
@ -981,7 +966,7 @@ func QueryHandler(startTime time.Time, w http.ResponseWriter, r *http.Request) e
|
|||
start -= offset
|
||||
end := start
|
||||
start = end - window
|
||||
if err := exportHandler(w, []string{childQuery}, start, end, "promapi", 0, false, deadline); err != nil {
|
||||
if err := exportHandler(w, []string{childQuery}, etf, start, end, "promapi", 0, false, deadline); err != nil {
|
||||
return fmt.Errorf("error when exporting data for query=%q on the time range (start=%d, end=%d): %w", childQuery, start, end, err)
|
||||
}
|
||||
queryDuration.UpdateDuration(startTime)
|
||||
|
@ -1006,7 +991,7 @@ func QueryHandler(startTime time.Time, w http.ResponseWriter, r *http.Request) e
|
|||
start -= offset
|
||||
end := start
|
||||
start = end - window
|
||||
if err := queryRangeHandler(startTime, w, childQuery, start, end, step, r, ct); err != nil {
|
||||
if err := queryRangeHandler(startTime, w, childQuery, start, end, step, r, ct, etf); err != nil {
|
||||
return fmt.Errorf("error when executing query=%q on the time range (start=%d, end=%d, step=%d): %w", childQuery, start, end, step, err)
|
||||
}
|
||||
queryDuration.UpdateDuration(startTime)
|
||||
|
@ -1030,6 +1015,7 @@ func QueryHandler(startTime time.Time, w http.ResponseWriter, r *http.Request) e
|
|||
QuotedRemoteAddr: httpserver.GetQuotedRemoteAddr(r),
|
||||
Deadline: deadline,
|
||||
LookbackDelta: lookbackDelta,
|
||||
EnforcedTagFilters: etf,
|
||||
}
|
||||
result, err := promql.Exec(&ec, query, true)
|
||||
if err != nil {
|
||||
|
@ -1092,14 +1078,18 @@ func QueryRangeHandler(startTime time.Time, w http.ResponseWriter, r *http.Reque
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := queryRangeHandler(startTime, w, query, start, end, step, r, ct); err != nil {
|
||||
etf, err := getEnforcedTagFiltersFromRequest(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := queryRangeHandler(startTime, w, query, start, end, step, r, ct, etf); err != nil {
|
||||
return fmt.Errorf("error when executing query=%q on the time range (start=%d, end=%d, step=%d): %w", query, start, end, step, err)
|
||||
}
|
||||
queryRangeDuration.UpdateDuration(startTime)
|
||||
return nil
|
||||
}
|
||||
|
||||
func queryRangeHandler(startTime time.Time, w http.ResponseWriter, query string, start, end, step int64, r *http.Request, ct int64) error {
|
||||
func queryRangeHandler(startTime time.Time, w http.ResponseWriter, query string, start, end, step int64, r *http.Request, ct int64, etf []storage.TagFilter) error {
|
||||
deadline := searchutils.GetDeadlineForQuery(r, startTime)
|
||||
mayCache := !searchutils.GetBool(r, "nocache")
|
||||
lookbackDelta, err := getMaxLookback(r)
|
||||
|
@ -1129,6 +1119,7 @@ func queryRangeHandler(startTime time.Time, w http.ResponseWriter, query string,
|
|||
Deadline: deadline,
|
||||
MayCache: mayCache,
|
||||
LookbackDelta: lookbackDelta,
|
||||
EnforcedTagFilters: etf,
|
||||
}
|
||||
result, err := promql.Exec(&ec, query, false)
|
||||
if err != nil {
|
||||
|
@ -1236,6 +1227,38 @@ func getMaxLookback(r *http.Request) (int64, error) {
|
|||
return searchutils.GetDuration(r, "max_lookback", d)
|
||||
}
|
||||
|
||||
func getEnforcedTagFiltersFromRequest(r *http.Request) ([]storage.TagFilter, error) {
|
||||
// fast path.
|
||||
extraLabels := r.Form["extra_label"]
|
||||
if len(extraLabels) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
tagFilters := make([]storage.TagFilter, 0, len(extraLabels))
|
||||
for _, match := range extraLabels {
|
||||
tmp := strings.SplitN(match, "=", 2)
|
||||
if len(tmp) != 2 {
|
||||
return nil, fmt.Errorf("`extra_label` query arg must have the format `name=value`; got %q", match)
|
||||
}
|
||||
tagFilters = append(tagFilters, storage.TagFilter{
|
||||
Key: []byte(tmp[0]),
|
||||
Value: []byte(tmp[1]),
|
||||
})
|
||||
}
|
||||
return tagFilters, nil
|
||||
}
|
||||
|
||||
func addEnforcedFiltersToTagFilterss(dstTfss [][]storage.TagFilter, enforcedFilters []storage.TagFilter) [][]storage.TagFilter {
|
||||
if len(dstTfss) == 0 {
|
||||
return [][]storage.TagFilter{
|
||||
enforcedFilters,
|
||||
}
|
||||
}
|
||||
for i := range dstTfss {
|
||||
dstTfss[i] = append(dstTfss[i], enforcedFilters...)
|
||||
}
|
||||
return dstTfss
|
||||
}
|
||||
|
||||
func getTagFilterssFromMatches(matches []string) ([][]storage.TagFilter, error) {
|
||||
tagFilterss := make([][]storage.TagFilter, 0, len(matches))
|
||||
for _, match := range matches {
|
||||
|
@ -1248,6 +1271,35 @@ func getTagFilterssFromMatches(matches []string) ([][]storage.TagFilter, error)
|
|||
return tagFilterss, nil
|
||||
}
|
||||
|
||||
func getTagFilterssFromRequest(r *http.Request) ([][]storage.TagFilter, error) {
|
||||
matches, err := getMatchesFromRequest(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tagFilterss, err := getTagFilterssFromMatches(matches)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
etf, err := getEnforcedTagFiltersFromRequest(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tagFilterss = addEnforcedFiltersToTagFilterss(tagFilterss, etf)
|
||||
return tagFilterss, nil
|
||||
}
|
||||
|
||||
func getMatchesFromRequest(r *http.Request) ([]string, error) {
|
||||
matches := r.Form["match[]"]
|
||||
if len(matches) > 0 {
|
||||
return matches, nil
|
||||
}
|
||||
match := r.Form.Get("match")
|
||||
if len(match) == 0 {
|
||||
return nil, fmt.Errorf("missing `match[]` query arg")
|
||||
}
|
||||
return []string{match}, nil
|
||||
}
|
||||
|
||||
func getLatencyOffsetMilliseconds() int64 {
|
||||
d := latencyOffset.Milliseconds()
|
||||
if d <= 1000 {
|
||||
|
|
|
@ -2,10 +2,12 @@ package prometheus
|
|||
|
||||
import (
|
||||
"math"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/netstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
)
|
||||
|
||||
func TestRemoveEmptyValuesAndTimeseries(t *testing.T) {
|
||||
|
@ -195,3 +197,75 @@ func TestAdjustLastPoints(t *testing.T) {
|
|||
},
|
||||
})
|
||||
}
|
||||
|
||||
// helper for tests
|
||||
func tfFromKV(k, v string) storage.TagFilter {
|
||||
return storage.TagFilter{
|
||||
Key: []byte(k),
|
||||
Value: []byte(v),
|
||||
}
|
||||
}
|
||||
|
||||
func Test_addEnforcedFiltersToTagFilterss(t *testing.T) {
|
||||
f := func(t *testing.T, dstTfss [][]storage.TagFilter, enforcedFilters []storage.TagFilter, want [][]storage.TagFilter) {
|
||||
t.Helper()
|
||||
got := addEnforcedFiltersToTagFilterss(dstTfss, enforcedFilters)
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Fatalf("unxpected result for addEnforcedFiltersToTagFilterss, \ngot: %v,\n want: %v", want, got)
|
||||
}
|
||||
}
|
||||
f(t, [][]storage.TagFilter{{tfFromKV("label", "value")}},
|
||||
nil,
|
||||
[][]storage.TagFilter{{tfFromKV("label", "value")}})
|
||||
|
||||
f(t, nil,
|
||||
[]storage.TagFilter{tfFromKV("ext-label", "ext-value")},
|
||||
[][]storage.TagFilter{{tfFromKV("ext-label", "ext-value")}})
|
||||
|
||||
f(t, [][]storage.TagFilter{
|
||||
{tfFromKV("l1", "v1")},
|
||||
{tfFromKV("l2", "v2")},
|
||||
},
|
||||
[]storage.TagFilter{tfFromKV("ext-l1", "v2")},
|
||||
[][]storage.TagFilter{
|
||||
{tfFromKV("l1", "v1"), tfFromKV("ext-l1", "v2")},
|
||||
{tfFromKV("l2", "v2"), tfFromKV("ext-l1", "v2")},
|
||||
})
|
||||
}
|
||||
|
||||
func Test_getEnforcedTagFiltersFromRequest(t *testing.T) {
|
||||
httpReqWithForm := func(tfs []string) *http.Request {
|
||||
return &http.Request{
|
||||
Form: map[string][]string{
|
||||
"extra_label": tfs,
|
||||
},
|
||||
}
|
||||
}
|
||||
f := func(t *testing.T, r *http.Request, want []storage.TagFilter, wantErr bool) {
|
||||
t.Helper()
|
||||
got, err := getEnforcedTagFiltersFromRequest(r)
|
||||
if (err != nil) != wantErr {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Fatalf("unxpected result for getEnforcedTagFiltersFromRequest, \ngot: %v,\n want: %v", want, got)
|
||||
}
|
||||
}
|
||||
|
||||
f(t, httpReqWithForm([]string{"label=value"}),
|
||||
[]storage.TagFilter{
|
||||
tfFromKV("label", "value"),
|
||||
},
|
||||
false)
|
||||
|
||||
f(t, httpReqWithForm([]string{"job=vmagent", "dc=gce"}),
|
||||
[]storage.TagFilter{tfFromKV("job", "vmagent"), tfFromKV("dc", "gce")},
|
||||
false,
|
||||
)
|
||||
f(t, httpReqWithForm([]string{"bad_filter"}),
|
||||
nil,
|
||||
true,
|
||||
)
|
||||
f(t, &http.Request{},
|
||||
nil, false)
|
||||
}
|
||||
|
|
|
@ -100,6 +100,9 @@ type EvalConfig struct {
|
|||
|
||||
timestamps []int64
|
||||
timestampsOnce sync.Once
|
||||
|
||||
// EnforcedTagFilters used for apply additional label filters to query.
|
||||
EnforcedTagFilters []storage.TagFilter
|
||||
}
|
||||
|
||||
// newEvalConfig returns new EvalConfig copy from src.
|
||||
|
@ -111,6 +114,7 @@ func newEvalConfig(src *EvalConfig) *EvalConfig {
|
|||
ec.Deadline = src.Deadline
|
||||
ec.MayCache = src.MayCache
|
||||
ec.LookbackDelta = src.LookbackDelta
|
||||
ec.EnforcedTagFilters = src.EnforcedTagFilters
|
||||
|
||||
// do not copy src.timestamps - they must be generated again.
|
||||
return &ec
|
||||
|
@ -647,6 +651,8 @@ func evalRollupFuncWithMetricExpr(ec *EvalConfig, name string, rf rollupFunc,
|
|||
|
||||
// Fetch the remaining part of the result.
|
||||
tfs := toTagFilters(me.LabelFilters)
|
||||
// append external filters.
|
||||
tfs = append(tfs, ec.EnforcedTagFilters...)
|
||||
minTimestamp := start - maxSilenceInterval
|
||||
if window > ec.Step {
|
||||
minTimestamp -= window
|
||||
|
|
|
@ -2,13 +2,22 @@
|
|||
|
||||
# tip
|
||||
|
||||
* FEATURE: added [vmctl tool](https://victoriametrics.github.io/vmctl.html) to VictoriaMetrics release process. Now it is packaged in `vmutils-*.tar.gz` archive on [the releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases). Source code for `vmctl` tool has been moved from [github.com/VictoriaMetrics/vmctl](https://github.com/VictoriaMetrics/vmctl) to [github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/master/app/vmctl).
|
||||
* FEATURE: added `-loggerTimezone` command-line flag for adjusting time zone for timestamps in log messages. By default UTC is used.
|
||||
* FEATURE: added `-search.maxStepForPointsAdjustment` command-line flag, which can be used for disabling adjustment for points returned `/api/v1/query_range` handler if such points have timestamps closer than `-search.latencyOffset` to the current time. Such points may contain incomplete data, so they are substituted by the previous values for `step` query args smaller than one minute by default.
|
||||
* FEATURE: vmalert: added `-datasource.queryStep` command-line flag for passing `step` query arg to `/api/v1/query` endpoint. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1025
|
||||
* FEATURE: added `-search.maxStepForPointsAdjustment` command-line flag, which can be used for disabling adjustment for points returned by `/api/v1/query_range` handler if such points have timestamps closer than `-search.latencyOffset` to the current time. Such points may contain incomplete data, so they are substituted by the previous values for `step` query args smaller than one minute by default.
|
||||
* FEATURE: vmselect: added ability to set additional label filters, which must be applied during queries. Such label filters can be set via optional `extra_label` query arg, which is accepted by [querying API](https://victoriametrics.github.io/#prometheus-querying-api-usage) handlers. For example, the request to `/api/v1/query_range?extra_label=tenant_id=123&query=<query>` adds `{tenant_id="123"}` label filter to the given `<query>`. It is expected that the `extra_label` query arg is automatically set by auth proxy sitting
|
||||
in front of VictoriaMetrics. [Contact us](mailto:sales@victoriametrics.com) if you need assistance with such a proxy. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1021 .
|
||||
* FEATURE: vmalert: added `-datasource.queryStep` command-line flag for passing optional `step` query arg to `/api/v1/query` endpoint. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1025
|
||||
* FEATURE: vmalert: added ability to query Graphite datasource when evaluating alerting and recording rules. See [these docs](https://victoriametrics.github.io/vmalert.html#graphite) for details.
|
||||
* FEATURE: vmagent: added `-remoteWrite.roundDigits` command-line option for rounding metric values to the given number of decimal digits after the point before sending the metric to the corresponding `-remoteWrite.url`. This option can be used for improving data compression on the remote storage, because values with lower number of decimal digits can be compressed better than values with bigger number of decimal digits.
|
||||
* FEATURE: vmagent: added `-remoteWrite.rateLimit` command-line flag for limiting data transfer rate to `-remoteWrite.url`. This may be useful when big amounts of buffered data is sent after temporarily unavailability of the remote storage. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1035
|
||||
* FEATURE: vmagent: export `vm_promscrape_scrapes_failed_per_url_total` and `vm_promscrape_scrapes_skipped_by_sample_limit_per_url_total` counters, which may help identifying improperly working scrape targets.
|
||||
|
||||
* BUGFIX: vmagent: reduce the HTTP reconnection rate to scrape targets. Previously vmagent could errorneusly close HTTP keep-alive connections more often than needed.
|
||||
* FEATURE: vmagent: export the following additional metrics, which may be useful during troubleshooting:
|
||||
- `vm_promscrape_scrapes_failed_per_url_total`
|
||||
- `vm_promscrape_scrapes_skipped_by_sample_limit_per_url_total`
|
||||
- `vm_promscrape_discovery_requests_total`
|
||||
- `vm_promscrape_discovery_retries_total`
|
||||
- `vm_promscrape_scrape_retries_total`
|
||||
* BUGFIX: vmagent: reduce HTTP reconnection rate for scrape targets. Previously vmagent could errorneusly close HTTP keep-alive connections more frequently than needed.
|
||||
* BUGFIX: vmagent: retry scrape and service discovery requests when the remote server closes HTTP keep-alive connection. Previously `disable_keepalive: true` option could be used under `scrape_configs` section when working with such servers.
|
||||
|
||||
|
||||
|
|
|
@ -269,7 +269,7 @@ the update process. See [cluster availability](#cluster-availability) section fo
|
|||
* The cluster remains available if at least a single `vmstorage` node exists:
|
||||
|
||||
- `vminsert` re-routes incoming data from unavailable `vmstorage` nodes to healthy `vmstorage` nodes
|
||||
- `vmselect` continues serving partial responses if at least a single `vmstorage` node is available.
|
||||
- `vmselect` continues serving partial responses if at least a single `vmstorage` node is available. If consistency over availability is preferred, then either pass `-search.denyPartialResponse` command-line flag to `vmselect` or pass `deny_partial_response=1` query arg in requests to `vmselect`.
|
||||
|
||||
Data replication can be used for increasing storage durability. See [these docs](#replication-and-data-safety) for details.
|
||||
|
||||
|
|
|
@ -154,6 +154,7 @@ Alphabetically sorted links to case studies:
|
|||
* [Tuning](#tuning)
|
||||
* [Monitoring](#monitoring)
|
||||
* [Troubleshooting](#troubleshooting)
|
||||
* [Data migration](#data-migration)
|
||||
* [Backfilling](#backfilling)
|
||||
* [Data updates](#data-updates)
|
||||
* [Replication](#replication)
|
||||
|
@ -547,10 +548,15 @@ These handlers can be queried from Prometheus-compatible clients such as Grafana
|
|||
|
||||
### Prometheus querying API enhancements
|
||||
|
||||
Additionally to unix timestamps and [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) VictoriaMetrics accepts relative times in `time`, `start` and `end` query args.
|
||||
VictoriaMetrics accepts optional `extra_label=<label_name>=<label_value>` query arg, which can be used for enforcing additional label filters for queries. For example,
|
||||
`/api/v1/query_range?extra_label=user_id=123&query=<query>` would automatically add `{user_id="123"}` label filter to the given `<query>`. This functionality can be used
|
||||
for limiting the scope of time series visible to the given tenant. It is expected that the `extra_label` query arg is automatically set by auth proxy sitting
|
||||
in front of VictoriaMetrics. [Contact us](mailto:sales@victoriametrics.com) if you need assistance with such a proxy.
|
||||
|
||||
VictoriaMetrics accepts relative times in `time`, `start` and `end` query args additionally to unix timestamps and [RFC3339](https://www.ietf.org/rfc/rfc3339.txt).
|
||||
For example, the following query would return data for the last 30 minutes: `/api/v1/query_range?start=-30m&query=...`.
|
||||
|
||||
By default, VictoriaMetrics returns time series for the last 5 minutes from /api/v1/series, while the Prometheus API defaults to all time. Use `start` and `end` to select a different time range.
|
||||
By default, VictoriaMetrics returns time series for the last 5 minutes from `/api/v1/series`, while the Prometheus API defaults to all time. Use `start` and `end` to select a different time range.
|
||||
|
||||
VictoriaMetrics accepts additional args for `/api/v1/labels` and `/api/v1/label/.../values` handlers.
|
||||
See [this feature request](https://github.com/prometheus/prometheus/issues/6178) for details:
|
||||
|
@ -1353,6 +1359,17 @@ See the example of alerting rules for VM components [here](https://github.com/Vi
|
|||
* VictoriaMetrics ignores `NaN` values during data ingestion.
|
||||
|
||||
|
||||
## Data migration
|
||||
|
||||
Use [vmctl](https://victoriametrics.github.io/vmctl.html) for data migration. It supports the following data migration types:
|
||||
|
||||
* From Prometheus to VictoriaMetrics
|
||||
* From InfluxDB to VictoriaMetrics
|
||||
* From VictoriaMetrics to VictoriaMetrics
|
||||
|
||||
See [vmctl docs](https://victoriametrics.github.io/vmctl.html) for more details.
|
||||
|
||||
|
||||
## Backfilling
|
||||
|
||||
VictoriaMetrics accepts historical data in arbitrary order of time via [any supported ingestion method](#how-to-import-time-series-data).
|
||||
|
@ -1420,7 +1437,6 @@ The collected profiles may be analyzed with [go tool pprof](https://github.com/g
|
|||
|
||||
* [Helm charts for single-node and cluster versions of VictoriaMetrics](https://github.com/VictoriaMetrics/helm-charts).
|
||||
* [Kubernetes operator for VictoriaMetrics](https://github.com/VictoriaMetrics/operator).
|
||||
* [vmctl tool for data migration to VictoriaMetrics](https://github.com/VictoriaMetrics/vmctl).
|
||||
* [netdata](https://github.com/netdata/netdata) can push data into VictoriaMetrics via `Prometheus remote_write API`.
|
||||
See [these docs](https://github.com/netdata/netdata#integrations).
|
||||
* [go-graphite/carbonapi](https://github.com/go-graphite/carbonapi) can use VictoriaMetrics as time series backend.
|
||||
|
|
|
@ -7,7 +7,7 @@ or any other Prometheus-compatible storage system that supports the `remote_writ
|
|||
<img alt="vmagent" src="vmagent.png">
|
||||
|
||||
|
||||
### Motivation
|
||||
## Motivation
|
||||
|
||||
While VictoriaMetrics provides an efficient solution to store and observe metrics, our users needed something fast
|
||||
and RAM friendly to scrape metrics from Prometheus-compatible exporters to VictoriaMetrics.
|
||||
|
@ -15,7 +15,7 @@ Also, we found that users’ infrastructure are snowflakes - no two are alike, a
|
|||
to `vmagent` (like the ability to push metrics instead of pulling them). We did our best and plan to do even more.
|
||||
|
||||
|
||||
### Features
|
||||
## Features
|
||||
|
||||
* Can be used as drop-in replacement for Prometheus for scraping targets such as [node_exporter](https://github.com/prometheus/node_exporter).
|
||||
See [Quick Start](#quick-start) for details.
|
||||
|
@ -36,7 +36,7 @@ to `vmagent` (like the ability to push metrics instead of pulling them). We did
|
|||
* Uses lower amounts of RAM, CPU, disk IO and network bandwidth compared to Prometheus.
|
||||
|
||||
|
||||
### Quick Start
|
||||
## Quick Start
|
||||
|
||||
Just download `vmutils-*` archive from [releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases), unpack it
|
||||
and pass the following flags to `vmagent` binary in order to start scraping Prometheus targets:
|
||||
|
@ -63,7 +63,7 @@ Then send Influx data to `http://vmagent-host:8429`. See [these docs](https://vi
|
|||
Pass `-help` to `vmagent` in order to see the full list of supported command-line flags with their descriptions.
|
||||
|
||||
|
||||
### Configuration update
|
||||
## Configuration update
|
||||
|
||||
`vmagent` should be restarted in order to update config options set via command-line args.
|
||||
|
||||
|
@ -79,10 +79,10 @@ Pass `-help` to `vmagent` in order to see the full list of supported command-lin
|
|||
There is also `-promscrape.configCheckInterval` command-line option, which can be used for automatic reloading configs from updated `-promscrape.config` file.
|
||||
|
||||
|
||||
### Use cases
|
||||
## Use cases
|
||||
|
||||
|
||||
#### IoT and Edge monitoring
|
||||
### IoT and Edge monitoring
|
||||
|
||||
`vmagent` can run and collect metrics in IoT and industrial networks with unreliable or scheduled connections to the remote storage.
|
||||
It buffers the collected data in local files until the connection to remote storage becomes available and then sends the buffered
|
||||
|
@ -93,14 +93,14 @@ The maximum buffer size can be limited with `-remoteWrite.maxDiskUsagePerURL`.
|
|||
See [the corresponding Makefile rules](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/app/vmagent/Makefile) for details.
|
||||
|
||||
|
||||
#### Drop-in replacement for Prometheus
|
||||
### Drop-in replacement for Prometheus
|
||||
|
||||
If you use Prometheus only for scraping metrics from various targets and forwarding these metrics to remote storage,
|
||||
then `vmagent` can replace such Prometheus setup. Usually `vmagent` requires lower amounts of RAM, CPU and network bandwidth comparing to Prometheus for such a setup.
|
||||
See [these docs](#how-to-collect-metrics-in-prometheus-format) for details.
|
||||
|
||||
|
||||
#### Replication and high availability
|
||||
### Replication and high availability
|
||||
|
||||
`vmagent` replicates the collected metrics among multiple remote storage instances configured via `-remoteWrite.url` args.
|
||||
If a single remote storage instance temporarily is out of service, then the collected data remains available in another remote storage instances.
|
||||
|
@ -108,14 +108,14 @@ If a single remote storage instance temporarily is out of service, then the coll
|
|||
Then it sends the buffered data to the remote storage in order to prevent data gaps in the remote storage.
|
||||
|
||||
|
||||
#### Relabeling and filtering
|
||||
### Relabeling and filtering
|
||||
|
||||
`vmagent` can add, remove or update labels on the collected data before sending it to remote storage. Additionally,
|
||||
it can remove unwanted samples via Prometheus-like relabeling before sending the collected data to remote storage.
|
||||
See [these docs](#relabeling) for details.
|
||||
|
||||
|
||||
#### Splitting data streams among multiple systems
|
||||
### Splitting data streams among multiple systems
|
||||
|
||||
`vmagent` supports splitting the collected data between muliple destinations with the help of `-remoteWrite.urlRelabelConfig`,
|
||||
which is applied independently for each configured `-remoteWrite.url` destination. For instance, it is possible to replicate or split
|
||||
|
@ -123,7 +123,7 @@ data among long-term remote storage, short-term remote storage and real-time ana
|
|||
Note that each destination can receive its own subset of the collected data thanks to per-destination relabeling via `-remoteWrite.urlRelabelConfig`.
|
||||
|
||||
|
||||
#### Prometheus remote_write proxy
|
||||
### Prometheus remote_write proxy
|
||||
|
||||
`vmagent` may be used as a proxy for Prometheus data sent via Prometheus `remote_write` protocol. It can accept data via `remote_write` API
|
||||
at `/api/v1/write` endpoint, apply relabeling and filtering and then proxy it to another `remote_write` systems.
|
||||
|
@ -131,12 +131,12 @@ The `vmagent` can be configured to encrypt the incoming `remote_write` requests
|
|||
Additionally, Basic Auth can be enabled for the incoming `remote_write` requests with `-httpAuth.*` command-line flags.
|
||||
|
||||
|
||||
#### remote_write for clustered version
|
||||
### remote_write for clustered version
|
||||
|
||||
Despite `vmagent` can accept data in several supported protocols (OpenTSDB, Influx, Prometheus, Graphite) and scrape data from various targets, writes always peformed in Promethes remote_write protocol. Therefore for clustered version `-remoteWrite.url` command-line flag should be configured as `<schema>://<vminsert-host>:8480/insert/<customer-id>/prometheus/api/v1/write`
|
||||
|
||||
|
||||
### How to collect metrics in Prometheus format
|
||||
## How to collect metrics in Prometheus format
|
||||
|
||||
Pass the path to `prometheus.yml` to `-promscrape.config` command-line flag. `vmagent` takes into account the following
|
||||
sections from [Prometheus config file](https://prometheus.io/docs/prometheus/latest/configuration/configuration/):
|
||||
|
@ -192,7 +192,7 @@ entries to 60s. Run `vmagent -help` in order to see default values for `-promscr
|
|||
The file pointed by `-promscrape.config` may contain `%{ENV_VAR}` placeholders, which are substituted by the corresponding `ENV_VAR` environment variable values.
|
||||
|
||||
|
||||
### Adding labels to metrics
|
||||
## Adding labels to metrics
|
||||
|
||||
Labels can be added to metrics via the following mechanisms:
|
||||
|
||||
|
@ -200,7 +200,7 @@ Labels can be added to metrics via the following mechanisms:
|
|||
* Via `-remoteWrite.label` command-line flag. These labels are added to all the collected metrics before sending them to `-remoteWrite.url`.
|
||||
|
||||
|
||||
### Relabeling
|
||||
## Relabeling
|
||||
|
||||
`vmagent` supports [Prometheus relabeling](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config).
|
||||
Additionally it provides the following extra actions:
|
||||
|
@ -227,7 +227,7 @@ Read more about relabeling in the following articles:
|
|||
* [relabel_configs vs metric_relabel_configs](https://www.robustperception.io/relabel_configs-vs-metric_relabel_configs)
|
||||
|
||||
|
||||
### Monitoring
|
||||
## Monitoring
|
||||
|
||||
`vmagent` exports various metrics in Prometheus exposition format at `http://vmagent-host:8429/metrics` page. It is recommended setting up regular scraping of this page
|
||||
either via `vmagent` itself or via Prometheus, so the exported metrics could be analyzed later.
|
||||
|
@ -246,7 +246,7 @@ This information may be useful for debugging target relabeling.
|
|||
It may be useful for performing `vmagent` rolling update without scrape loss.
|
||||
|
||||
|
||||
### Troubleshooting
|
||||
## Troubleshooting
|
||||
|
||||
* It is recommended [setting up the official Grafana dashboard](#monitoring) in order to monitor `vmagent` state.
|
||||
|
||||
|
@ -322,24 +322,24 @@ It may be useful for performing `vmagent` rolling update without scrape loss.
|
|||
```
|
||||
|
||||
|
||||
### How to build from sources
|
||||
## How to build from sources
|
||||
|
||||
It is recommended using [binary releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) - `vmagent` is located in `vmutils-*` archives there.
|
||||
|
||||
|
||||
#### Development build
|
||||
### Development build
|
||||
|
||||
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.13.
|
||||
2. Run `make vmagent` from the root folder of the repository.
|
||||
It builds `vmagent` binary and puts it into the `bin` folder.
|
||||
|
||||
#### Production build
|
||||
### Production build
|
||||
|
||||
1. [Install docker](https://docs.docker.com/install/).
|
||||
2. Run `make vmagent-prod` from the root folder of the repository.
|
||||
It builds `vmagent-prod` binary and puts it into the `bin` folder.
|
||||
|
||||
#### Building docker images
|
||||
### Building docker images
|
||||
|
||||
Run `make package-vmagent`. It builds `victoriametrics/vmagent:<PKG_TAG>` docker image locally.
|
||||
`<PKG_TAG>` is auto-generated image tag, which depends on source code in the repository.
|
||||
|
@ -352,24 +352,24 @@ by setting it via `<ROOT_IMAGE>` environment variable. For example, the followin
|
|||
ROOT_IMAGE=scratch make package-vmagent
|
||||
```
|
||||
|
||||
#### ARM build
|
||||
### ARM build
|
||||
|
||||
ARM build may run on Raspberry Pi or on [energy-efficient ARM servers](https://blog.cloudflare.com/arm-takes-wing/).
|
||||
|
||||
#### Development ARM build
|
||||
### Development ARM build
|
||||
|
||||
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.13.
|
||||
2. Run `make vmagent-arm` or `make vmagent-arm64` from the root folder of the repository.
|
||||
It builds `vmagent-arm` or `vmagent-arm64` binary respectively and puts it into the `bin` folder.
|
||||
|
||||
#### Production ARM build
|
||||
### Production ARM build
|
||||
|
||||
1. [Install docker](https://docs.docker.com/install/).
|
||||
2. Run `make vmagent-arm-prod` or `make vmagent-arm64-prod` from the root folder of the repository.
|
||||
It builds `vmagent-arm-prod` or `vmagent-arm64-prod` binary respectively and puts it into the `bin` folder.
|
||||
|
||||
|
||||
### Profiling
|
||||
## Profiling
|
||||
|
||||
`vmagent` provides handlers for collecting the following [Go profiles](https://blog.golang.org/profiling-go-programs):
|
||||
|
||||
|
|
|
@ -12,6 +12,7 @@ rules against configured address.
|
|||
support;
|
||||
* Integration with [Alertmanager](https://github.com/prometheus/alertmanager);
|
||||
* Keeps the alerts [state on restarts](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/master/app/vmalert#alerts-state-on-restarts);
|
||||
* Graphite datasource can be used for alerting and recording rules. See [these docs](#graphite) for details.
|
||||
* Lightweight without extra dependencies.
|
||||
|
||||
### Limitations:
|
||||
|
@ -105,7 +106,13 @@ The syntax for alerting rule is following:
|
|||
# The name of the alert. Must be a valid metric name.
|
||||
alert: <string>
|
||||
|
||||
# The MetricsQL expression to evaluate.
|
||||
# Optional type for the rule. Supported values: "graphite", "prometheus".
|
||||
# By default "prometheus" rule type is used.
|
||||
[ type: <string> ]
|
||||
|
||||
# The expression to evaluate. The expression language depends on the type value.
|
||||
# By default MetricsQL expression is used. If type="graphite", then the expression
|
||||
# must contain valid Graphite expression.
|
||||
expr: <string>
|
||||
|
||||
# Alerts are considered firing once they have been returned for this long.
|
||||
|
@ -128,7 +135,13 @@ The syntax for recording rules is following:
|
|||
# The name of the time series to output to. Must be a valid metric name.
|
||||
record: <string>
|
||||
|
||||
# The MetricsQL expression to evaluate.
|
||||
# Optional type for the rule. Supported values: "graphite", "prometheus".
|
||||
# By default "prometheus" rule type is used.
|
||||
[ type: <string> ]
|
||||
|
||||
# The expression to evaluate. The expression language depends on the type value.
|
||||
# By default MetricsQL expression is used. If type="graphite", then the expression
|
||||
# must contain valid Graphite expression.
|
||||
expr: <string>
|
||||
|
||||
# Labels to add or overwrite before storing the result.
|
||||
|
@ -166,6 +179,13 @@ Used as alert source in AlertManager.
|
|||
* `http://<vmalert-addr>/-/reload` - hot configuration reload.
|
||||
|
||||
|
||||
### Graphite
|
||||
|
||||
vmalert sends requests to `<-datasource.url>/render?format=json` during evaluation of alerting and recording rules
|
||||
if the corresponding rule contains `type: "graphite"` config option. It is expected that the `<-datasource.url>/render`
|
||||
implements [Graphite Render API](https://graphite.readthedocs.io/en/stable/render_api.html) for `format=json`.
|
||||
|
||||
|
||||
### Configuration
|
||||
|
||||
The shortlist of configuration flags is the following:
|
||||
|
|
|
@ -5,7 +5,7 @@ It reads username and password from [Basic Auth headers](https://en.wikipedia.or
|
|||
matches them against configs pointed by `-auth.config` command-line flag and proxies incoming HTTP requests to the configured per-user `url_prefix` on successful match.
|
||||
|
||||
|
||||
### Quick start
|
||||
## Quick start
|
||||
|
||||
Just download `vmutils-*` archive from [releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases), unpack it
|
||||
and pass the following flag to `vmauth` binary in order to start authorizing and routing requests:
|
||||
|
@ -26,7 +26,7 @@ Pass `-help` to `vmauth` in order to see all the supported command-line flags wi
|
|||
Feel free [contacting us](mailto:info@victoriametrics.com) if you need customized auth proxy for VictoriaMetrics with the support of LDAP, SSO, RBAC, SAML, accounting, limits, etc.
|
||||
|
||||
|
||||
### Auth config
|
||||
## Auth config
|
||||
|
||||
Auth config is represented in the following simple `yml` format:
|
||||
|
||||
|
@ -68,7 +68,7 @@ The config may contain `%{ENV_VAR}` placeholders, which are substituted by the c
|
|||
This may be useful for passing secrets to the config.
|
||||
|
||||
|
||||
### Security
|
||||
## Security
|
||||
|
||||
Do not transfer Basic Auth headers in plaintext over untrusted networks. Enable https. This can be done by passing the following `-tls*` command-line flags to `vmauth`:
|
||||
|
||||
|
@ -84,30 +84,30 @@ Do not transfer Basic Auth headers in plaintext over untrusted networks. Enable
|
|||
Alternatively, [https termination proxy](https://en.wikipedia.org/wiki/TLS_termination_proxy) may be put in front of `vmauth`.
|
||||
|
||||
|
||||
### Monitoring
|
||||
## Monitoring
|
||||
|
||||
`vmauth` exports various metrics in Prometheus exposition format at `http://vmauth-host:8427/metrics` page. It is recommended setting up regular scraping of this page
|
||||
either via [vmagent](https://victoriametrics.github.io/vmagent.html) or via Prometheus, so the exported metrics could be analyzed later.
|
||||
|
||||
|
||||
### How to build from sources
|
||||
## How to build from sources
|
||||
|
||||
It is recommended using [binary releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) - `vmauth` is located in `vmutils-*` archives there.
|
||||
|
||||
|
||||
#### Development build
|
||||
### Development build
|
||||
|
||||
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.13.
|
||||
2. Run `make vmauth` from the root folder of the repository.
|
||||
It builds `vmauth` binary and puts it into the `bin` folder.
|
||||
|
||||
#### Production build
|
||||
### Production build
|
||||
|
||||
1. [Install docker](https://docs.docker.com/install/).
|
||||
2. Run `make vmauth-prod` from the root folder of the repository.
|
||||
It builds `vmauth-prod` binary and puts it into the `bin` folder.
|
||||
|
||||
#### Building docker images
|
||||
### Building docker images
|
||||
|
||||
Run `make package-vmauth`. It builds `victoriametrics/vmauth:<PKG_TAG>` docker image locally.
|
||||
`<PKG_TAG>` is auto-generated image tag, which depends on source code in the repository.
|
||||
|
@ -121,7 +121,7 @@ ROOT_IMAGE=scratch make package-vmauth
|
|||
```
|
||||
|
||||
|
||||
### Profiling
|
||||
## Profiling
|
||||
|
||||
`vmauth` provides handlers for collecting the following [Go profiles](https://blog.golang.org/profiling-go-programs):
|
||||
|
||||
|
@ -142,7 +142,7 @@ The command for collecting CPU profile waits for 30 seconds before returning.
|
|||
The collected profiles may be analyzed with [go tool pprof](https://github.com/google/pprof).
|
||||
|
||||
|
||||
### Advanced usage
|
||||
## Advanced usage
|
||||
|
||||
Pass `-help` command-line arg to `vmauth` in order to see all the configuration options:
|
||||
|
||||
|
|
|
@ -23,9 +23,9 @@ See also [vmbackuper](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/
|
|||
creation of hourly, daily, weekly and monthly backups.
|
||||
|
||||
|
||||
### Use cases
|
||||
## Use cases
|
||||
|
||||
#### Regular backups
|
||||
### Regular backups
|
||||
|
||||
Regular backup can be performed with the following command:
|
||||
|
||||
|
@ -40,7 +40,7 @@ vmbackup -storageDataPath=</path/to/victoria-metrics-data> -snapshotName=<local-
|
|||
* `<path/to/new/backup>` is the destination path where new backup will be placed.
|
||||
|
||||
|
||||
#### Regular backups with server-side copy from existing backup
|
||||
### Regular backups with server-side copy from existing backup
|
||||
|
||||
If the destination GCS bucket already contains the previous backup at `-origin` path, then new backup can be sped up
|
||||
with the following command:
|
||||
|
@ -52,7 +52,7 @@ vmbackup -storageDataPath=</path/to/victoria-metrics-data> -snapshotName=<local-
|
|||
It saves time and network bandwidth costs by performing server-side copy for the shared data from the `-origin` to `-dst`.
|
||||
|
||||
|
||||
#### Incremental backups
|
||||
### Incremental backups
|
||||
|
||||
Incremental backups performed if `-dst` points to an already existing backup. In this case only new data uploaded to remote storage.
|
||||
It saves time and network bandwidth costs when working with big backups:
|
||||
|
@ -62,7 +62,7 @@ vmbackup -storageDataPath=</path/to/victoria-metrics-data> -snapshotName=<local-
|
|||
```
|
||||
|
||||
|
||||
#### Smart backups
|
||||
### Smart backups
|
||||
|
||||
Smart backups mean storing full daily backups into `YYYYMMDD` folders and creating incremental hourly backup into `latest` folder:
|
||||
|
||||
|
@ -92,7 +92,7 @@ Do not forget removing old snapshots and backups when they are no longer needed
|
|||
See also [vmbackuper tool](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/466) for automating smart backups.
|
||||
|
||||
|
||||
### How does it work?
|
||||
## How does it work?
|
||||
|
||||
The backup algorithm is the following:
|
||||
|
||||
|
@ -118,7 +118,7 @@ See [this article](https://medium.com/@valyala/speeding-up-backups-for-big-time-
|
|||
`vmbackup` can work improperly or slowly when these properties are violated.
|
||||
|
||||
|
||||
### Troubleshooting
|
||||
## Troubleshooting
|
||||
|
||||
* If the backup is slow, then try setting higher value for `-concurrency` flag. This will increase the number of concurrent workers that upload data to backup storage.
|
||||
* If `vmbackup` eats all the network bandwidth, then set `-maxBytesPerSecond` to the desired value.
|
||||
|
@ -127,7 +127,7 @@ See [this article](https://medium.com/@valyala/speeding-up-backups-for-big-time-
|
|||
at [cluster VictoriaMetrics](https://victoriametrics.github.io/Cluster-VictoriaMetrics.html) and vice versa.
|
||||
|
||||
|
||||
### Advanced usage
|
||||
## Advanced usage
|
||||
|
||||
|
||||
* Obtaining credentials from a file.
|
||||
|
@ -222,24 +222,24 @@ See [this article](https://medium.com/@valyala/speeding-up-backups-for-big-time-
|
|||
```
|
||||
|
||||
|
||||
### How to build from sources
|
||||
## How to build from sources
|
||||
|
||||
It is recommended using [binary releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) - see `vmutils-*` archives there.
|
||||
|
||||
|
||||
#### Development build
|
||||
### Development build
|
||||
|
||||
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.13.
|
||||
2. Run `make vmbackup` from the root folder of the repository.
|
||||
It builds `vmbackup` binary and puts it into the `bin` folder.
|
||||
|
||||
#### Production build
|
||||
### Production build
|
||||
|
||||
1. [Install docker](https://docs.docker.com/install/).
|
||||
2. Run `make vmbackup-prod` from the root folder of the repository.
|
||||
It builds `vmbackup-prod` binary and puts it into the `bin` folder.
|
||||
|
||||
#### Building docker images
|
||||
### Building docker images
|
||||
|
||||
Run `make package-vmbackup`. It builds `victoriametrics/vmbackup:<PKG_TAG>` docker image locally.
|
||||
`<PKG_TAG>` is auto-generated image tag, which depends on source code in the repository.
|
||||
|
|
473
docs/vmctl.md
Normal file
473
docs/vmctl.md
Normal file
|
@ -0,0 +1,473 @@
|
|||
# vmctl - Victoria metrics command-line tool
|
||||
|
||||
Features:
|
||||
- [x] Prometheus: migrate data from Prometheus to VictoriaMetrics using snapshot API
|
||||
- [x] Thanos: migrate data from Thanos to VictoriaMetrics
|
||||
- [ ] ~~Prometheus: migrate data from Prometheus to VictoriaMetrics by query~~(discarded)
|
||||
- [x] InfluxDB: migrate data from InfluxDB to VictoriaMetrics
|
||||
- [ ] Storage Management: data re-balancing between nodes
|
||||
|
||||
# Table of contents
|
||||
|
||||
* [Articles](#articles)
|
||||
* [How to build](#how-to-build)
|
||||
* [Migrating data from InfluxDB 1.x](#migrating-data-from-influxdb-1x)
|
||||
* [Data mapping](#data-mapping)
|
||||
* [Configuration](#configuration)
|
||||
* [Filtering](#filtering)
|
||||
* [Migrating data from InfluxDB 2.x](#migrating-data-from-influxdb-2x)
|
||||
* [Migrating data from Prometheus](#migrating-data-from-prometheus)
|
||||
* [Data mapping](#data-mapping-1)
|
||||
* [Configuration](#configuration-1)
|
||||
* [Filtering](#filtering-1)
|
||||
* [Migrating data from Thanos](#migrating-data-from-thanos)
|
||||
* [Current data](#current-data)
|
||||
* [Historical data](#historical-data)
|
||||
* [Migrating data from VictoriaMetrics](#migrating-data-from-victoriametrics)
|
||||
* [Native protocol](#native-protocol)
|
||||
* [Tuning](#tuning)
|
||||
* [Influx mode](#influx-mode)
|
||||
* [Prometheus mode](#prometheus-mode)
|
||||
* [VictoriaMetrics importer](#victoriametrics-importer)
|
||||
* [Importer stats](#importer-stats)
|
||||
* [Significant figures](#significant-figures)
|
||||
* [Adding extra labels](#adding-extra-labels)
|
||||
|
||||
|
||||
## Articles
|
||||
|
||||
* [How to migrate data from Prometheus](https://medium.com/@romanhavronenko/victoriametrics-how-to-migrate-data-from-prometheus-d44a6728f043)
|
||||
* [How to migrate data from Prometheus. Filtering and modifying time series](https://medium.com/@romanhavronenko/victoriametrics-how-to-migrate-data-from-prometheus-filtering-and-modifying-time-series-6d40cea4bf21)
|
||||
|
||||
## How to build
|
||||
|
||||
It is recommended using [binary releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) - `vmctl` is located in `vmutils-*` archives there.
|
||||
|
||||
|
||||
### Development build
|
||||
|
||||
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.13.
|
||||
2. Run `make vmctl` from the root folder of the repository.
|
||||
It builds `vmctl` binary and puts it into the `bin` folder.
|
||||
|
||||
### Production build
|
||||
|
||||
1. [Install docker](https://docs.docker.com/install/).
|
||||
2. Run `make vmctl-prod` from the root folder of the repository.
|
||||
It builds `vmctl-prod` binary and puts it into the `bin` folder.
|
||||
|
||||
### Building docker images
|
||||
|
||||
Run `make package-vmctl`. It builds `victoriametrics/vmctl:<PKG_TAG>` docker image locally.
|
||||
`<PKG_TAG>` is auto-generated image tag, which depends on source code in the repository.
|
||||
The `<PKG_TAG>` may be manually set via `PKG_TAG=foobar make package-vmctl`.
|
||||
|
||||
The base docker image is [alpine](https://hub.docker.com/_/alpine) but it is possible to use any other base image
|
||||
by setting it via `<ROOT_IMAGE>` environment variable. For example, the following command builds the image on top of [scratch](https://hub.docker.com/_/scratch) image:
|
||||
|
||||
```bash
|
||||
ROOT_IMAGE=scratch make package-vmctl
|
||||
```
|
||||
|
||||
### ARM build
|
||||
|
||||
ARM build may run on Raspberry Pi or on [energy-efficient ARM servers](https://blog.cloudflare.com/arm-takes-wing/).
|
||||
|
||||
#### Development ARM build
|
||||
|
||||
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.13.
|
||||
2. Run `make vmctl-arm` or `make vmctl-arm64` from the root folder of the repository.
|
||||
It builds `vmctl-arm` or `vmctl-arm64` binary respectively and puts it into the `bin` folder.
|
||||
|
||||
#### Production ARM build
|
||||
|
||||
1. [Install docker](https://docs.docker.com/install/).
|
||||
2. Run `make vmctl-arm-prod` or `make vmctl-arm64-prod` from the root folder of the repository.
|
||||
It builds `vmctl-arm-prod` or `vmctl-arm64-prod` binary respectively and puts it into the `bin` folder.
|
||||
|
||||
|
||||
## Migrating data from InfluxDB (1.x)
|
||||
|
||||
`vmctl` supports the `influx` mode to migrate data from InfluxDB to VictoriaMetrics time-series database.
|
||||
|
||||
See `./vmctl influx --help` for details and full list of flags.
|
||||
|
||||
To use migration tool please specify the InfluxDB address `--influx-addr`, the database `--influx-database` and VictoriaMetrics address `--vm-addr`.
|
||||
Flag `--vm-addr` for single-node VM is usually equal to `--httpListenAddr`, and for cluster version
|
||||
is equal to `--httpListenAddr` flag of VMInsert component. Please note, that vmctl performs initial readiness check for the given address
|
||||
by checking `/health` endpoint. For cluster version it is additionally required to specify the `--vm-account-id` flag.
|
||||
See more details for cluster version [here](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/cluster).
|
||||
|
||||
As soon as required flags are provided and all endpoints are accessible, `vmctl` will start the InfluxDB scheme exploration.
|
||||
Basically, it just fetches all fields and timeseries from the provided database and builds up registry of all available timeseries.
|
||||
Then `vmctl` sends fetch requests for each timeseries to InfluxDB one by one and pass results to VM importer.
|
||||
VM importer then accumulates received samples in batches and sends import requests to VM.
|
||||
|
||||
The importing process example for local installation of InfluxDB(`http://localhost:8086`)
|
||||
and single-node VictoriaMetrics(`http://localhost:8428`):
|
||||
```
|
||||
./vmctl influx --influx-database benchmark
|
||||
InfluxDB import mode
|
||||
2020/01/18 20:47:11 Exploring scheme for database "benchmark"
|
||||
2020/01/18 20:47:11 fetching fields: command: "show field keys"; database: "benchmark"; retention: "autogen"
|
||||
2020/01/18 20:47:11 found 10 fields
|
||||
2020/01/18 20:47:11 fetching series: command: "show series "; database: "benchmark"; retention: "autogen"
|
||||
Found 40000 timeseries to import. Continue? [Y/n] y
|
||||
40000 / 40000 [-----------------------------------------------------------------------------------------------------------------------------------------------] 100.00% 21 p/s
|
||||
2020/01/18 21:19:00 Import finished!
|
||||
2020/01/18 21:19:00 VictoriaMetrics importer stats:
|
||||
idle duration: 13m51.461434876s;
|
||||
time spent while importing: 17m56.923899847s;
|
||||
total samples: 345600000;
|
||||
samples/s: 320914.04;
|
||||
total bytes: 5.9 GB;
|
||||
bytes/s: 5.4 MB;
|
||||
import requests: 40001;
|
||||
2020/01/18 21:19:00 Total time: 31m48.467044016s
|
||||
```
|
||||
|
||||
### Data mapping
|
||||
|
||||
Vmctl maps Influx data the same way as VictoriaMetrics does by using the following rules:
|
||||
|
||||
* `influx-database` arg is mapped into `db` label value unless `db` tag exists in the Influx line.
|
||||
* Field names are mapped to time series names prefixed with {measurement}{separator} value,
|
||||
where {separator} equals to _ by default.
|
||||
It can be changed with `--influx-measurement-field-separator` command-line flag.
|
||||
* Field values are mapped to time series values.
|
||||
* Tags are mapped to Prometheus labels format as-is.
|
||||
|
||||
For example, the following Influx line:
|
||||
```
|
||||
foo,tag1=value1,tag2=value2 field1=12,field2=40
|
||||
```
|
||||
|
||||
is converted into the following Prometheus format data points:
|
||||
```
|
||||
foo_field1{tag1="value1", tag2="value2"} 12
|
||||
foo_field2{tag1="value1", tag2="value2"} 40
|
||||
```
|
||||
|
||||
### Configuration
|
||||
|
||||
The configuration flags should contain self-explanatory descriptions.
|
||||
|
||||
### Filtering
|
||||
|
||||
The filtering consists of two parts: timeseries and time.
|
||||
The first step of application is to select all available timeseries
|
||||
for given database and retention. User may specify additional filtering
|
||||
condition via `--influx-filter-series` flag. For example:
|
||||
```
|
||||
./vmctl influx --influx-database benchmark \
|
||||
--influx-filter-series "on benchmark from cpu where hostname='host_1703'"
|
||||
InfluxDB import mode
|
||||
2020/01/26 14:23:29 Exploring scheme for database "benchmark"
|
||||
2020/01/26 14:23:29 fetching fields: command: "show field keys"; database: "benchmark"; retention: "autogen"
|
||||
2020/01/26 14:23:29 found 12 fields
|
||||
2020/01/26 14:23:29 fetching series: command: "show series on benchmark from cpu where hostname='host_1703'"; database: "benchmark"; retention: "autogen"
|
||||
Found 10 timeseries to import. Continue? [Y/n]
|
||||
```
|
||||
The timeseries select query would be following:
|
||||
`fetching series: command: "show series on benchmark from cpu where hostname='host_1703'"; database: "benchmark"; retention: "autogen"`
|
||||
|
||||
The second step of filtering is a time filter and it applies when fetching the datapoints from Influx.
|
||||
Time filtering may be configured with two flags:
|
||||
* --influx-filter-time-start
|
||||
* --influx-filter-time-end
|
||||
Here's an example of importing timeseries for one day only:
|
||||
`./vmctl influx --influx-database benchmark --influx-filter-series "where hostname='host_1703'" --influx-filter-time-start "2020-01-01T10:07:00Z" --influx-filter-time-end "2020-01-01T15:07:00Z"`
|
||||
|
||||
Please see more about time filtering [here](https://docs.influxdata.com/influxdb/v1.7/query_language/schema_exploration#filter-meta-queries-by-time).
|
||||
|
||||
## Migrating data from InfluxDB (2.x)
|
||||
|
||||
Migrating data from InfluxDB v2.x is not supported yet ([#32](https://github.com/VictoriaMetrics/vmctl/issues/32)).
|
||||
You may find useful a 3rd party solution for this - https://github.com/jonppe/influx_to_victoriametrics.
|
||||
|
||||
|
||||
## Migrating data from Prometheus
|
||||
|
||||
`vmctl` supports the `prometheus` mode for migrating data from Prometheus to VictoriaMetrics time-series database.
|
||||
Migration is based on reading Prometheus snapshot, which is basically a hard-link to Prometheus data files.
|
||||
|
||||
See `./vmctl prometheus --help` for details and full list of flags.
|
||||
|
||||
To use migration tool please specify the path to Prometheus snapshot `--prom-snapshot` and VictoriaMetrics address `--vm-addr`.
|
||||
More about Prometheus snapshots may be found [here](https://www.robustperception.io/taking-snapshots-of-prometheus-data).
|
||||
Flag `--vm-addr` for single-node VM is usually equal to `--httpListenAddr`, and for cluster version
|
||||
is equal to `--httpListenAddr` flag of VMInsert component. Please note, that vmctl performs initial readiness check for the given address
|
||||
by checking `/health` endpoint. For cluster version it is additionally required to specify the `--vm-account-id` flag.
|
||||
See more details for cluster version [here](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/cluster).
|
||||
|
||||
As soon as required flags are provided and all endpoints are accessible, `vmctl` will start the Prometheus snapshot exploration.
|
||||
Basically, it just fetches all available blocks in provided snapshot and read the metadata. It also does initial filtering by time
|
||||
if flags `--prom-filter-time-start` or `--prom-filter-time-end` were set. The exploration procedure prints some stats from read blocks.
|
||||
Please note that stats are not taking into account timeseries or samples filtering. This will be done during importing process.
|
||||
|
||||
The importing process takes the snapshot blocks revealed from Explore procedure and processes them one by one
|
||||
accumulating timeseries and samples. Please note, that `vmctl` relies on responses from Influx on this stage,
|
||||
so ensure that Explore queries are executed without errors or limits. Please see this
|
||||
[issue](https://github.com/VictoriaMetrics/vmctl/issues/30) for details.
|
||||
The data processed in chunks and then sent to VM.
|
||||
|
||||
The importing process example for local installation of Prometheus
|
||||
and single-node VictoriaMetrics(`http://localhost:8428`):
|
||||
```
|
||||
./vmctl prometheus --prom-snapshot=/path/to/snapshot \
|
||||
--vm-concurrency=1 \
|
||||
--vm-batch-size=200000 \
|
||||
--prom-concurrency=3
|
||||
Prometheus import mode
|
||||
Prometheus snapshot stats:
|
||||
blocks found: 14;
|
||||
blocks skipped: 0;
|
||||
min time: 1581288163058 (2020-02-09T22:42:43Z);
|
||||
max time: 1582409128139 (2020-02-22T22:05:28Z);
|
||||
samples: 32549106;
|
||||
series: 27289.
|
||||
Found 14 blocks to import. Continue? [Y/n] y
|
||||
14 / 14 [-------------------------------------------------------------------------------------------] 100.00% 0 p/s
|
||||
2020/02/23 15:50:03 Import finished!
|
||||
2020/02/23 15:50:03 VictoriaMetrics importer stats:
|
||||
idle duration: 6.152953029s;
|
||||
time spent while importing: 44.908522491s;
|
||||
total samples: 32549106;
|
||||
samples/s: 724786.84;
|
||||
total bytes: 669.1 MB;
|
||||
bytes/s: 14.9 MB;
|
||||
import requests: 323;
|
||||
import requests retries: 0;
|
||||
2020/02/23 15:50:03 Total time: 51.077451066s
|
||||
```
|
||||
|
||||
### Data mapping
|
||||
|
||||
VictoriaMetrics has very similar data model to Prometheus and supports [RemoteWrite integration](https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage).
|
||||
So no data changes will be applied.
|
||||
|
||||
### Configuration
|
||||
|
||||
The configuration flags should contain self-explanatory descriptions.
|
||||
|
||||
### Filtering
|
||||
|
||||
The filtering consists of three parts: by timeseries and time.
|
||||
|
||||
Filtering by time may be configured via flags `--prom-filter-time-start` and `--prom-filter-time-end`
|
||||
in in RFC3339 format. This filter applied twice: to drop blocks out of range and to filter timeseries in blocks with
|
||||
overlapping time range.
|
||||
|
||||
Example of applying time filter:
|
||||
```
|
||||
./vmctl prometheus --prom-snapshot=/path/to/snapshot \
|
||||
--prom-filter-time-start=2020-02-07T00:07:01Z \
|
||||
--prom-filter-time-end=2020-02-11T00:07:01Z
|
||||
Prometheus import mode
|
||||
Prometheus snapshot stats:
|
||||
blocks found: 2;
|
||||
blocks skipped: 12;
|
||||
min time: 1581288163058 (2020-02-09T22:42:43Z);
|
||||
max time: 1581328800000 (2020-02-10T10:00:00Z);
|
||||
samples: 1657698;
|
||||
series: 3930.
|
||||
Found 2 blocks to import. Continue? [Y/n] y
|
||||
```
|
||||
|
||||
Please notice, that total amount of blocks in provided snapshot is 14, but only 2 of them were in provided
|
||||
time range. So other 12 blocks were marked as `skipped`. The amount of samples and series is not taken into account,
|
||||
since this is heavy operation and will be done during import process.
|
||||
|
||||
|
||||
Filtering by timeseries is configured with following flags:
|
||||
* `--prom-filter-label` - the label name, e.g. `__name__` or `instance`;
|
||||
* `--prom-filter-label-value` - the regular expression to filter the label value. By default matches all `.*`
|
||||
|
||||
For example:
|
||||
```
|
||||
./vmctl prometheus --prom-snapshot=/path/to/snapshot \
|
||||
--prom-filter-label="__name__" \
|
||||
--prom-filter-label-value="promhttp.*" \
|
||||
--prom-filter-time-start=2020-02-07T00:07:01Z \
|
||||
--prom-filter-time-end=2020-02-11T00:07:01Z
|
||||
Prometheus import mode
|
||||
Prometheus snapshot stats:
|
||||
blocks found: 2;
|
||||
blocks skipped: 12;
|
||||
min time: 1581288163058 (2020-02-09T22:42:43Z);
|
||||
max time: 1581328800000 (2020-02-10T10:00:00Z);
|
||||
samples: 1657698;
|
||||
series: 3930.
|
||||
Found 2 blocks to import. Continue? [Y/n] y
|
||||
14 / 14 [------------------------------------------------------------------------------------------------------------------------------------------------------] 100.00% ? p/s
|
||||
2020/02/23 15:51:07 Import finished!
|
||||
2020/02/23 15:51:07 VictoriaMetrics importer stats:
|
||||
idle duration: 0s;
|
||||
time spent while importing: 37.415461ms;
|
||||
total samples: 10128;
|
||||
samples/s: 270690.24;
|
||||
total bytes: 195.2 kB;
|
||||
bytes/s: 5.2 MB;
|
||||
import requests: 2;
|
||||
import requests retries: 0;
|
||||
2020/02/23 15:51:07 Total time: 7.153158218s
|
||||
```
|
||||
|
||||
## Migrating data from Thanos
|
||||
|
||||
Thanos uses the same storage engine as Prometheus and the data layout on-disk should be the same. That means
|
||||
`vmctl` in mode `prometheus` may be used for Thanos historical data migration as well.
|
||||
These instructions may vary based on the details of your Thanos configuration.
|
||||
Please read carefully and verify as you go. We assume you're using Thanos Sidecar on your Prometheus pods,
|
||||
and that you have a separate Thanos Store installation.
|
||||
|
||||
### Current data
|
||||
|
||||
1. For now, keep your Thanos Sidecar and Thanos-related Prometheus configuration, but add this to also stream
|
||||
metrics to VictoriaMetrics:
|
||||
```
|
||||
remote_write:
|
||||
- url: http://victoria-metrics:8428/api/v1/write
|
||||
```
|
||||
2. Make sure VM is running, of course. Now check the logs to make sure that Prometheus is sending and VM is receiving.
|
||||
In Prometheus, make sure there are no errors. On the VM side, you should see messages like this:
|
||||
```
|
||||
2020-04-27T18:38:46.474Z info VictoriaMetrics/lib/storage/partition.go:207 creating a partition "2020_04" with smallPartsPath="/victoria-metrics-data/data/small/2020_04", bigPartsPath="/victoria-metrics-data/data/big/2020_04"
|
||||
2020-04-27T18:38:46.506Z info VictoriaMetrics/lib/storage/partition.go:222 partition "2020_04" has been created
|
||||
```
|
||||
3. Now just wait. Within two hours, Prometheus should finish its current data file and hand it off to Thanos Store for long term
|
||||
storage.
|
||||
|
||||
### Historical data
|
||||
|
||||
Let's assume your data is stored on S3 served by minio. You first need to copy that out to a local filesystem,
|
||||
then import it into VM using `vmctl` in `prometheus` mode.
|
||||
1. Copy data from minio.
|
||||
1. Run the `minio/mc` Docker container.
|
||||
1. `mc config host add minio http://minio:9000 accessKey secretKey`, substituting appropriate values for the last 3 items.
|
||||
1. `mc cp -r minio/prometheus thanos-data`
|
||||
1. Import using `vmctl`.
|
||||
1. Follow the [instructions](#how-to-build) to compile `vmctl` on your machine.
|
||||
1. Use [prometheus](#migrating-data-from-prometheus) mode to import data:
|
||||
```
|
||||
vmctl prometheus --prom-snapshot thanos-data --vm-addr http://victoria-metrics:8428
|
||||
```
|
||||
|
||||
## Migrating data from VictoriaMetrics
|
||||
|
||||
### Native protocol
|
||||
|
||||
The [native binary protocol](https://victoriametrics.github.io/#how-to-export-data-in-native-format)
|
||||
was introduced in [1.42.0 release](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.42.0)
|
||||
and provides the most efficient way to migrate data between VM instances: single to single, cluster to cluster,
|
||||
single to cluster and vice versa. Please note that both instances (source and destination) should be of v1.42.0
|
||||
or higher.
|
||||
|
||||
See `./vmctl vm-native --help` for details and full list of flags.
|
||||
|
||||
In this mode `vmctl` acts as a proxy between two VM instances, where time series filtering is done by "source" (`src`)
|
||||
and processing is done by "destination" (`dst`). Because of that, `vmctl` doesn't actually know how much data will be
|
||||
processed and can't show the progress bar. It will show the current processing speed and total number of processed bytes:
|
||||
|
||||
```
|
||||
./vmctl vm-native --vm-native-src-addr=http://localhost:8528 \
|
||||
--vm-native-dst-addr=http://localhost:8428 \
|
||||
--vm-native-filter-match='{job="vmagent"}' \
|
||||
--vm-native-filter-time-start='2020-01-01T20:07:00Z'
|
||||
VictoriaMetrics Native import mode
|
||||
Initing export pipe from "http://localhost:8528" with filters:
|
||||
filter: match[]={job="vmagent"}
|
||||
Initing import process to "http://localhost:8428":
|
||||
Total: 336.75 KiB ↖ Speed: 454.46 KiB p/s
|
||||
2020/10/13 17:04:59 Total time: 952.143376ms
|
||||
```
|
||||
|
||||
Importing tips:
|
||||
1. Migrating all the metrics from one VM to another may collide with existing application metrics
|
||||
(prefixed with `vm_`) at destination and lead to confusion when using
|
||||
[official Grafana dashboards](https://grafana.com/orgs/victoriametrics/dashboards).
|
||||
To avoid such situation try to filter out VM process metrics via `--vm-native-filter-match` flag.
|
||||
2. Migration is a backfilling process, so it is recommended to read
|
||||
[Backfilling tips](https://github.com/VictoriaMetrics/VictoriaMetrics#backfilling) section.
|
||||
3. `vmctl` doesn't provide relabeling or other types of labels management in this mode.
|
||||
Instead, use [relabeling in VictoriaMetrics](https://github.com/VictoriaMetrics/vmctl/issues/4#issuecomment-683424375).
|
||||
|
||||
|
||||
## Tuning
|
||||
|
||||
### Influx mode
|
||||
|
||||
The flag `--influx-concurrency` controls how many concurrent requests may be sent to InfluxDB while fetching
|
||||
timeseries. Please set it wisely to avoid InfluxDB overwhelming.
|
||||
|
||||
The flag `--influx-chunk-size` controls the max amount of datapoints to return in single chunk from fetch requests.
|
||||
Please see more details [here](https://docs.influxdata.com/influxdb/v1.7/guides/querying_data/#chunking).
|
||||
The chunk size is used to control InfluxDB memory usage, so it won't OOM on processing large timeseries with
|
||||
billions of datapoints.
|
||||
|
||||
### Prometheus mode
|
||||
|
||||
The flag `--prom-concurrency` controls how many concurrent readers will be reading the blocks in snapshot.
|
||||
Since snapshots are just files on disk it would be hard to overwhelm the system. Please go with value equal
|
||||
to number of free CPU cores.
|
||||
|
||||
### VictoriaMetrics importer
|
||||
|
||||
The flag `--vm-concurrency` controls the number of concurrent workers that process the input from InfluxDB query results.
|
||||
Please note that each import request can load up to a single vCPU core on VictoriaMetrics. So try to set it according
|
||||
to allocated CPU resources of your VictoriMetrics installation.
|
||||
|
||||
The flag `--vm-batch-size` controls max amount of samples collected before sending the import request.
|
||||
For example, if `--influx-chunk-size=500` and `--vm-batch-size=2000` then importer will process not more
|
||||
than 4 chunks before sending the request.
|
||||
|
||||
### Importer stats
|
||||
|
||||
After successful import `vmctl` prints some statistics for details.
|
||||
The important numbers to watch are following:
|
||||
- `idle duration` - shows time that importer spent while waiting for data from InfluxDB/Prometheus
|
||||
to fill up `--vm-batch-size` batch size. Value shows total duration across all workers configured
|
||||
via `--vm-concurrency`. High value may be a sign of too slow InfluxDB/Prometheus fetches or too
|
||||
high `--vm-concurrency` value. Try to improve it by increasing `--<mode>-concurrency` value or
|
||||
decreasing `--vm-concurrency` value.
|
||||
- `import requests` - shows how many import requests were issued to VM server.
|
||||
The import request is issued once the batch size(`--vm-batch-size`) is full and ready to be sent.
|
||||
Please prefer big batch sizes (50k-500k) to improve performance.
|
||||
- `import requests retries` - shows number of unsuccessful import requests. Non-zero value may be
|
||||
a sign of network issues or VM being overloaded. See the logs during import for error messages.
|
||||
|
||||
### Silent mode
|
||||
|
||||
By default `vmctl` waits confirmation from user before starting the import. If this is unwanted
|
||||
behavior and no user interaction required - pass `-s` flag to enable "silence" mode:
|
||||
```
|
||||
-s Whether to run in silent mode. If set to true no confirmation prompts will appear. (default: false)
|
||||
```
|
||||
|
||||
### Significant figures
|
||||
|
||||
`vmctl` allows to limit the number of [significant figures](https://en.wikipedia.org/wiki/Significant_figures)
|
||||
before importing. For example, the average value for response size is `102.342305` bytes and it has 9 significant figures.
|
||||
If you ask a human to pronounce this value then with high probability value will be rounded to first 4 or 5 figures
|
||||
because the rest aren't really that important to mention. In most cases, such a high precision is too much.
|
||||
Moreover, such values may be just a result of [floating point arithmetic](https://en.wikipedia.org/wiki/Floating-point_arithmetic),
|
||||
create a [false precision](https://en.wikipedia.org/wiki/False_precision) and result into bad compression ratio
|
||||
according to [information theory](https://en.wikipedia.org/wiki/Information_theory).
|
||||
|
||||
`vmctl` provides the following flags for improving data compression:
|
||||
|
||||
* `--vm-round-digits` flag for rounding processed values to the given number of decimal digits after the point.
|
||||
For example, `--vm-round-digits=2` would round `1.2345` to `1.23`. By default the rounding is disabled.
|
||||
|
||||
* `--vm-significant-figures` flag for limiting the number of significant figures in processed values. It takes no effect if set
|
||||
to 0 (by default), but set `--vm-significant-figures=5` and `102.342305` will be rounded to `102.34`.
|
||||
|
||||
The most common case for using these flags is to improve data compression for time series storing aggregation
|
||||
results such as `average`, `rate`, etc.
|
||||
|
||||
### Adding extra labels
|
||||
|
||||
`vmctl` allows to add extra labels to all imported series. It can be achived with flag `--vm-extra-label label=value`.
|
||||
If multiple labels needs to be added, set flag for each label, for example, `--vm-extra-label label1=value1 --vm-extra-label label2=value2`.
|
||||
If timeseries already have label, that must be added with `--vm-extra-label` flag, flag has priority and will override label value from timeseries.
|
||||
|
|
@ -7,7 +7,7 @@ Restore process can be interrupted at any time. It is automatically resumed from
|
|||
when restarting `vmrestore` with the same args.
|
||||
|
||||
|
||||
### Usage
|
||||
## Usage
|
||||
|
||||
VictoriaMetrics must be stopped during the restore process.
|
||||
|
||||
|
@ -25,13 +25,13 @@ The original `-storageDataPath` directory may contain old files. They will be su
|
|||
i.e. the end result would be similar to [rsync --delete](https://askubuntu.com/questions/476041/how-do-i-make-rsync-delete-files-that-have-been-deleted-from-the-source-folder).
|
||||
|
||||
|
||||
### Troubleshooting
|
||||
## Troubleshooting
|
||||
|
||||
* If `vmrestore` eats all the network bandwidth, then set `-maxBytesPerSecond` to the desired value.
|
||||
* If `vmrestore` has been interrupted due to temporary error, then just restart it with the same args. It will resume the restore process.
|
||||
|
||||
|
||||
### Advanced usage
|
||||
## Advanced usage
|
||||
|
||||
* Obtaining credentials from a file.
|
||||
|
||||
|
@ -118,24 +118,24 @@ i.e. the end result would be similar to [rsync --delete](https://askubuntu.com/q
|
|||
```
|
||||
|
||||
|
||||
### How to build from sources
|
||||
## How to build from sources
|
||||
|
||||
It is recommended using [binary releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) - see `vmutils-*` archives there.
|
||||
|
||||
|
||||
#### Development build
|
||||
### Development build
|
||||
|
||||
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.13.
|
||||
2. Run `make vmrestore` from the root folder of the repository.
|
||||
It builds `vmrestore` binary and puts it into the `bin` folder.
|
||||
|
||||
#### Production build
|
||||
### Production build
|
||||
|
||||
1. [Install docker](https://docs.docker.com/install/).
|
||||
2. Run `make vmrestore-prod` from the root folder of the repository.
|
||||
It builds `vmrestore-prod` binary and puts it into the `bin` folder.
|
||||
|
||||
#### Building docker images
|
||||
### Building docker images
|
||||
|
||||
Run `make package-vmrestore`. It builds `victoriametrics/vmrestore:<PKG_TAG>` docker image locally.
|
||||
`<PKG_TAG>` is auto-generated image tag, which depends on source code in the repository.
|
||||
|
|
28
go.mod
28
go.mod
|
@ -10,29 +10,39 @@ require (
|
|||
github.com/VictoriaMetrics/fasthttp v1.0.12
|
||||
github.com/VictoriaMetrics/metrics v1.12.3
|
||||
github.com/VictoriaMetrics/metricsql v0.10.0
|
||||
github.com/aws/aws-sdk-go v1.36.25
|
||||
github.com/aws/aws-sdk-go v1.37.1
|
||||
github.com/cespare/xxhash/v2 v2.1.1
|
||||
github.com/cheggaaa/pb/v3 v3.0.5
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0 // indirect
|
||||
github.com/fatih/color v1.10.0 // indirect
|
||||
github.com/go-kit/kit v0.10.0
|
||||
github.com/golang/snappy v0.0.2
|
||||
github.com/klauspost/compress v1.11.6
|
||||
github.com/influxdata/influxdb v1.8.4
|
||||
github.com/klauspost/compress v1.11.7
|
||||
github.com/mattn/go-runewidth v0.0.10 // indirect
|
||||
github.com/oklog/ulid v1.3.1
|
||||
github.com/prometheus/client_golang v1.9.0 // indirect
|
||||
github.com/prometheus/procfs v0.3.0 // indirect
|
||||
github.com/prometheus/prometheus v1.8.2-0.20201119142752-3ad25a6dc3d9
|
||||
github.com/rivo/uniseg v0.2.0 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/urfave/cli/v2 v2.3.0
|
||||
github.com/valyala/fastjson v1.6.3
|
||||
github.com/valyala/fastrand v1.0.0
|
||||
github.com/valyala/fasttemplate v1.2.1
|
||||
github.com/valyala/gozstd v1.9.0
|
||||
github.com/valyala/histogram v1.1.2
|
||||
github.com/valyala/quicktemplate v1.6.3
|
||||
golang.org/x/oauth2 v0.0.0-20210112200429-01de73cf58bd
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a // indirect
|
||||
golang.org/x/sys v0.0.0-20210113000019-eaf3bda374d2
|
||||
go.opencensus.io v0.22.6 // indirect
|
||||
golang.org/x/mod v0.4.1 // indirect
|
||||
golang.org/x/net v0.0.0-20210119194325-5f4716e94777 // indirect
|
||||
golang.org/x/oauth2 v0.0.0-20210201163806-010130855d6c
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c
|
||||
golang.org/x/text v0.3.5 // indirect
|
||||
golang.org/x/tools v0.0.0-20210112230658-8b4aab62c064 // indirect
|
||||
google.golang.org/api v0.36.0
|
||||
google.golang.org/genproto v0.0.0-20210111234610-22ae2b108f89 // indirect
|
||||
google.golang.org/grpc v1.34.1 // indirect
|
||||
golang.org/x/tools v0.1.0 // indirect
|
||||
google.golang.org/api v0.38.0
|
||||
google.golang.org/genproto v0.0.0-20210201151548-94839c025ad4 // indirect
|
||||
google.golang.org/grpc v1.35.0 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
)
|
||||
|
||||
|
|
77
go.sum
77
go.sum
|
@ -17,6 +17,7 @@ cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOY
|
|||
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
|
||||
cloud.google.com/go v0.66.0/go.mod h1:dgqGAjKCDxyhGTtC9dAREQGUJpkceNm1yt590Qno0Ko=
|
||||
cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
|
||||
cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
|
||||
cloud.google.com/go v0.75.0 h1:XgtDnVJRCPEUG21gjFiRPz4zI1Mjg16R+NYQjfmU4XY=
|
||||
cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY=
|
||||
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
||||
|
@ -88,6 +89,8 @@ github.com/VictoriaMetrics/metrics v1.12.3 h1:Fe6JHC6MSEKa+BtLhPN8WIvS+HKPzMc2ev
|
|||
github.com/VictoriaMetrics/metrics v1.12.3/go.mod h1:Z1tSfPfngDn12bTfZSCqArT3OPY3u88J12hSoOhuiRE=
|
||||
github.com/VictoriaMetrics/metricsql v0.10.0 h1:45BARAP2shaL/5p67Hvz+YrWUbr0X0VCy9t+gvdIm8o=
|
||||
github.com/VictoriaMetrics/metricsql v0.10.0/go.mod h1:ylO7YITho/Iw6P71oEaGyHbO94bGoGtzWfLGqFhMIg8=
|
||||
github.com/VividCortex/ewma v1.1.1 h1:MnEK4VOv6n0RSY4vtRe3h11qjxL3+t0B8yOL8iMXdcM=
|
||||
github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmxzcbUokwA=
|
||||
github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g=
|
||||
github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c=
|
||||
github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM=
|
||||
|
@ -120,8 +123,8 @@ github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQ
|
|||
github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||
github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48=
|
||||
github.com/aws/aws-sdk-go v1.35.31/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
||||
github.com/aws/aws-sdk-go v1.36.25 h1:foHwQg8LGGuR9L8IODs2co5OQqjYhNNrngefIbXbyjg=
|
||||
github.com/aws/aws-sdk-go v1.36.25/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
||||
github.com/aws/aws-sdk-go v1.37.1 h1:BTHmuN+gzhxkvU9sac2tZvaY0gV9ihbHw+KxZOecYvY=
|
||||
github.com/aws/aws-sdk-go v1.37.1/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
||||
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
|
@ -139,6 +142,8 @@ github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
|
|||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cheggaaa/pb/v3 v3.0.5 h1:lmZOti7CraK9RSjzExsY53+WWfub9Qv13B5m4ptEoPE=
|
||||
github.com/cheggaaa/pb/v3 v3.0.5/go.mod h1:X1L61/+36nz9bjIsrDU52qHKOQukUQe2Ge+YvGuquCw=
|
||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
|
@ -148,6 +153,7 @@ github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4
|
|||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
|
||||
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
|
||||
github.com/containerd/containerd v1.3.4/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
||||
|
@ -155,6 +161,8 @@ github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3Ee
|
|||
github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg=
|
||||
|
@ -185,10 +193,13 @@ github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymF
|
|||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||
github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
|
||||
github.com/fatih/color v1.10.0 h1:s36xzo75JdqLaaWoiEHk767eHiwo0598uUxyfiPkDsg=
|
||||
github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM=
|
||||
github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
|
||||
github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
|
||||
github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4=
|
||||
|
@ -368,6 +379,7 @@ github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
|||
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M=
|
||||
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
|
||||
|
@ -388,6 +400,7 @@ github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hf
|
|||
github.com/google/pprof v0.0.0-20200905233945-acf8798be1f7/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20201117184057-ae444373da19/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
|
@ -451,6 +464,8 @@ github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJ
|
|||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/influxdata/flux v0.65.1/go.mod h1:J754/zds0vvpfwuq7Gc2wRdVwEodfpCFM7mYlOw2LqY=
|
||||
github.com/influxdata/influxdb v1.8.3/go.mod h1:JugdFhsvvI8gadxOI6noqNeeBHvWNTbfYGtiAn+2jhI=
|
||||
github.com/influxdata/influxdb v1.8.4 h1:FUcPJJ1/sM47gX3Xr7QCIbkmZ2N4ug5CV7iOFQCR75A=
|
||||
github.com/influxdata/influxdb v1.8.4/go.mod h1:JugdFhsvvI8gadxOI6noqNeeBHvWNTbfYGtiAn+2jhI=
|
||||
github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo=
|
||||
github.com/influxdata/influxql v1.1.1-0.20200828144457-65d3ef77d385/go.mod h1:gHp9y86a/pxhjJ+zMjNXiQAA197Xk9wLxaz+fGG+kWk=
|
||||
github.com/influxdata/line-protocol v0.0.0-20180522152040-32c6aa80de5e/go.mod h1:4kt73NQhadE3daL3WhR5EJ/J2ocX0PZzwxQ0gXJ7oFE=
|
||||
|
@ -492,8 +507,8 @@ github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0
|
|||
github.com/klauspost/compress v1.10.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||
github.com/klauspost/compress v1.11.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||
github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||
github.com/klauspost/compress v1.11.6 h1:EgWPCW6O3n1D5n99Zq3xXBt9uCwRGvpwGOusOLNBRSQ=
|
||||
github.com/klauspost/compress v1.11.6/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||
github.com/klauspost/compress v1.11.7 h1:0hzRabrMN4tSTvMfnL3SCv1ZGeAP23ynzodBgaHeMeg=
|
||||
github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||
github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||
github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg=
|
||||
github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
|
||||
|
@ -525,16 +540,23 @@ github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJ
|
|||
github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE=
|
||||
github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0=
|
||||
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||
github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
|
||||
github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8=
|
||||
github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
|
||||
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84=
|
||||
github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
|
||||
github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY=
|
||||
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
||||
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
|
||||
github.com/mattn/go-runewidth v0.0.10 h1:CoZ3S2P7pvtP45xOtBw+/mDL2z0RKI576gSkzRRpdGg=
|
||||
github.com/mattn/go-runewidth v0.0.10/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk=
|
||||
github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
|
||||
github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
|
||||
|
@ -664,6 +686,9 @@ github.com/prometheus/prometheus v1.8.2-0.20201119142752-3ad25a6dc3d9 h1:F2A86PG
|
|||
github.com/prometheus/prometheus v1.8.2-0.20201119142752-3ad25a6dc3d9/go.mod h1:1MDE/bXgu4gqd5w/otko6WQpXZX9vu8QX4KbitCmaPg=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1RftBQPUCDRw6SmxeaREsAaRKnOclghuzp/WRzc=
|
||||
github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
||||
github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
|
@ -671,6 +696,8 @@ github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR
|
|||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||
github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||
github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
|
||||
|
@ -721,7 +748,10 @@ github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqri
|
|||
github.com/uber/jaeger-client-go v2.25.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk=
|
||||
github.com/uber/jaeger-lib v2.4.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U=
|
||||
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
|
||||
github.com/urfave/cli v1.22.1 h1:+mkCCcOFKPnCmVYVcURKps1Xe+3zP90gSYGNfRkjoIY=
|
||||
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||
github.com/urfave/cli/v2 v2.3.0 h1:qph92Y649prgesehzOrQjdWyxFOp/QVM+6imKHad91M=
|
||||
github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI=
|
||||
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
|
||||
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
||||
github.com/valyala/fasthttp v1.16.0/go.mod h1:YOKImeEosDdBPnxc0gy7INqi3m1zK6A+xl6TwOBhHCA=
|
||||
|
@ -764,8 +794,9 @@ go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
|||
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.5 h1:dntmOdLpSpHlVqbW5Eay97DelsZHe+55D+xC6i0dDS0=
|
||||
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
|
||||
go.opencensus.io v0.22.6 h1:BdkrbWrzDlV9dnbzoP7sfN+dHheJ4J9JOaYxcUDL+ok=
|
||||
go.opencensus.io v0.22.6/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
|
||||
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
||||
go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
|
||||
|
@ -831,8 +862,9 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB
|
|||
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.0 h1:8pl+sMODzuvGJkmj2W4kZihvVb5mKm8pB/X44PIQHv8=
|
||||
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.1 h1:Kvvh58BN8Y9/lBi7hTekvtMpm07eUZ0ck5pRHpsMWrY=
|
||||
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
|
@ -876,8 +908,10 @@ golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81R
|
|||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20201224014010-6772e930b67b h1:iFwSg7t5GZmB/Q5TjiEAsdoLDrdJRC1RiF2WhuV29Qw=
|
||||
golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210119194325-5f4716e94777 h1:003p0dJM77cxMSyCPFphvZf/Y5/NXf5fzg6ufd1/Oew=
|
||||
golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
|
@ -886,8 +920,8 @@ golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4Iltr
|
|||
golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210112200429-01de73cf58bd h1:0n2rzLq6xLtV9OFaT0BF2syUkjOwRrJ1zvXY5hH7Kkc=
|
||||
golang.org/x/oauth2 v0.0.0-20210112200429-01de73cf58bd/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210201163806-010130855d6c h1:HiAZXo96zOhVhtFHchj/ojzoxCFiPrp9/j0GtS38V3g=
|
||||
golang.org/x/oauth2 v0.0.0-20210201163806-010130855d6c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
|
@ -963,8 +997,10 @@ golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201214210602-f9fddec55a1e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210113000019-eaf3bda374d2 h1:F9vNgpIiamoF+Q1/c78bikg/NScXEtbZSNEpnRelOzs=
|
||||
golang.org/x/sys v0.0.0-20210113000019-eaf3bda374d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c h1:VwygUrnw9jn88c4u8GD3rZQbqrP/tgas88tPUbBxQrk=
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
|
@ -1048,9 +1084,11 @@ golang.org/x/tools v0.0.0-20200918232735-d647fc253266/go.mod h1:z6u4i615ZeAfBE4X
|
|||
golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20201119054027-25dc3e1ccc3c/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20210112230658-8b4aab62c064 h1:BmCFkEH4nJrYcAc2L08yX5RhYGD4j58PTMkEUDkpz2I=
|
||||
golang.org/x/tools v0.0.0-20210112230658-8b4aab62c064/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.0 h1:po9/4sTYwZU9lPhi1tOrb4hCv3qrhiQ77LZfGa2OjwY=
|
||||
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
|
@ -1082,8 +1120,9 @@ google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz513
|
|||
google.golang.org/api v0.31.0/go.mod h1:CL+9IBCa2WWU6gRuBWaKqGWLFFwbEUXkfeMkHLQWYWo=
|
||||
google.golang.org/api v0.32.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
|
||||
google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
|
||||
google.golang.org/api v0.36.0 h1:l2Nfbl2GPXdWorv+dT2XfinX2jOOw4zv1VhLstx+6rE=
|
||||
google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=
|
||||
google.golang.org/api v0.38.0 h1:vDyWk6eup8eQAidaZ31sNWIn8tZEL8qpbtGkBD4ytQo=
|
||||
google.golang.org/api v0.38.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
|
@ -1132,9 +1171,11 @@ google.golang.org/genproto v0.0.0-20200914193844-75d14daec038/go.mod h1:FWY/as6D
|
|||
google.golang.org/genproto v0.0.0-20200921151605-7abf4a1a14d5/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20210111234610-22ae2b108f89 h1:R2owLnwrU3BdTJ5R9cnHDNsnEmBQ7n5lZjKShnbISe4=
|
||||
google.golang.org/genproto v0.0.0-20210111234610-22ae2b108f89/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20210201151548-94839c025ad4 h1:HPkKL4eEh/nemF/FRzYMrFsAh1ZPm5t8NqKBI/Ejlg0=
|
||||
google.golang.org/genproto v0.0.0-20210201151548-94839c025ad4/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
|
||||
|
@ -1157,8 +1198,8 @@ google.golang.org/grpc v1.32.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM
|
|||
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
|
||||
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
|
||||
google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
|
||||
google.golang.org/grpc v1.34.1 h1:ugq+9++ZQPFzM2pKUMCIK8gj9M0pFyuUWO9Q8kwEDQw=
|
||||
google.golang.org/grpc v1.34.1/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
|
||||
google.golang.org/grpc v1.35.0 h1:TwIQcH3es+MojMVojxxfQ3l3OF2KzlRxML2xZq0kRo8=
|
||||
google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
|
|
|
@ -298,8 +298,21 @@ func maxUpExponent(v int64) int16 {
|
|||
}
|
||||
}
|
||||
|
||||
// Round f to value with the given number of significant figures.
|
||||
func Round(f float64, digits int) float64 {
|
||||
// RoundToDecimalDigits rounds f to the given number of decimal digits after the point.
|
||||
//
|
||||
// See also RoundToSignificantFigures.
|
||||
func RoundToDecimalDigits(f float64, digits int) float64 {
|
||||
if digits <= -100 || digits >= 100 {
|
||||
return f
|
||||
}
|
||||
m := math.Pow10(digits)
|
||||
return math.Round(f*m) / m
|
||||
}
|
||||
|
||||
// RoundToSignificantFigures rounds f to value with the given number of significant figures.
|
||||
//
|
||||
// See also RoundToDecimalDigits.
|
||||
func RoundToSignificantFigures(f float64, digits int) float64 {
|
||||
if digits <= 0 || digits >= 18 {
|
||||
return f
|
||||
}
|
||||
|
|
|
@ -7,10 +7,34 @@ import (
|
|||
"testing"
|
||||
)
|
||||
|
||||
func TestRoundInplace(t *testing.T) {
|
||||
func TestRoundToDecimalDigits(t *testing.T) {
|
||||
f := func(f float64, digits int, resultExpected float64) {
|
||||
t.Helper()
|
||||
result := Round(f, digits)
|
||||
result := RoundToDecimalDigits(f, digits)
|
||||
if math.IsNaN(result) {
|
||||
if !math.IsNaN(resultExpected) {
|
||||
t.Fatalf("unexpected result; got %v; want %v", result, resultExpected)
|
||||
}
|
||||
}
|
||||
if result != resultExpected {
|
||||
t.Fatalf("unexpected result; got %v; want %v", result, resultExpected)
|
||||
}
|
||||
}
|
||||
f(12.34, 0, 12)
|
||||
f(12.57, 0, 13)
|
||||
f(-1.578, 2, -1.58)
|
||||
f(-1.578, 3, -1.578)
|
||||
f(1234, -2, 1200)
|
||||
f(1235, -1, 1240)
|
||||
f(1234, 0, 1234)
|
||||
f(1234.6, 0, 1235)
|
||||
f(123.4e-99, 99, 123e-99)
|
||||
}
|
||||
|
||||
func TestRoundToSignificantFigures(t *testing.T) {
|
||||
f := func(f float64, digits int, resultExpected float64) {
|
||||
t.Helper()
|
||||
result := RoundToSignificantFigures(f, digits)
|
||||
if math.IsNaN(result) {
|
||||
if !math.IsNaN(resultExpected) {
|
||||
t.Fatalf("unexpected result; got %v; want %v", result, resultExpected)
|
||||
|
|
|
@ -35,6 +35,15 @@ func NewArrayBool(name, description string) *ArrayBool {
|
|||
return &a
|
||||
}
|
||||
|
||||
// NewArrayInt returns new ArrayInt with the given name and description.
|
||||
func NewArrayInt(name string, description string) *ArrayInt {
|
||||
description += "\nSupports `array` of values separated by comma" +
|
||||
" or specified via multiple flags."
|
||||
var a ArrayInt
|
||||
flag.Var(&a, name, description)
|
||||
return &a
|
||||
}
|
||||
|
||||
// Array is a flag that holds an array of values.
|
||||
//
|
||||
// It may be set either by specifying multiple flags with the given name
|
||||
|
@ -223,3 +232,41 @@ func (a *ArrayDuration) GetOptionalArgOrDefault(argIdx int, defaultValue time.Du
|
|||
}
|
||||
return x[argIdx]
|
||||
}
|
||||
|
||||
// ArrayInt is flag that holds an array of ints.
|
||||
type ArrayInt []int
|
||||
|
||||
// String implements flag.Value interface
|
||||
func (a *ArrayInt) String() string {
|
||||
x := *a
|
||||
formattedInts := make([]string, len(x))
|
||||
for i, v := range x {
|
||||
formattedInts[i] = strconv.Itoa(v)
|
||||
}
|
||||
return strings.Join(formattedInts, ",")
|
||||
}
|
||||
|
||||
// Set implements flag.Value interface
|
||||
func (a *ArrayInt) Set(value string) error {
|
||||
values := parseArrayValues(value)
|
||||
for _, v := range values {
|
||||
n, err := strconv.Atoi(v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*a = append(*a, n)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetOptionalArgOrDefault returns optional arg under the given argIdx.
|
||||
func (a *ArrayInt) GetOptionalArgOrDefault(argIdx int, defaultValue int) int {
|
||||
x := *a
|
||||
if argIdx < len(x) {
|
||||
return x[argIdx]
|
||||
}
|
||||
if len(x) == 1 {
|
||||
return x[0]
|
||||
}
|
||||
return defaultValue
|
||||
}
|
||||
|
|
|
@ -12,14 +12,18 @@ var (
|
|||
fooFlag Array
|
||||
fooFlagDuration ArrayDuration
|
||||
fooFlagBool ArrayBool
|
||||
fooFlagInt ArrayInt
|
||||
)
|
||||
|
||||
func init() {
|
||||
os.Args = append(os.Args, "--fooFlag=foo", "--fooFlag=bar", "--fooFlagDuration=10s", "--fooFlagDuration=5m")
|
||||
os.Args = append(os.Args, "--fooFlag=foo", "--fooFlag=bar")
|
||||
os.Args = append(os.Args, "--fooFlagDuration=10s", "--fooFlagDuration=5m")
|
||||
os.Args = append(os.Args, "--fooFlagBool=true", "--fooFlagBool=false,true", "--fooFlagBool")
|
||||
os.Args = append(os.Args, "--fooFlagInt=1", "--fooFlagInt=2,3")
|
||||
flag.Var(&fooFlag, "fooFlag", "test")
|
||||
flag.Var(&fooFlagDuration, "fooFlagDuration", "test")
|
||||
flag.Var(&fooFlagBool, "fooFlagBool", "test")
|
||||
flag.Var(&fooFlagInt, "fooFlagInt", "test")
|
||||
}
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
|
@ -28,16 +32,16 @@ func TestMain(m *testing.M) {
|
|||
}
|
||||
|
||||
func TestArray(t *testing.T) {
|
||||
expected := map[string]struct{}{
|
||||
"foo": {},
|
||||
"bar": {},
|
||||
expected := []string{
|
||||
"foo",
|
||||
"bar",
|
||||
}
|
||||
if len(expected) != len(fooFlag) {
|
||||
t.Errorf("len array flag (%d) is not equal to %d", len(fooFlag), len(expected))
|
||||
}
|
||||
for _, i := range fooFlag {
|
||||
if _, ok := expected[i]; !ok {
|
||||
t.Errorf("unexpected item in array %v", i)
|
||||
for i, v := range fooFlag {
|
||||
if v != expected[i] {
|
||||
t.Errorf("unexpected item in array %q", v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -101,16 +105,16 @@ func TestArrayString(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestArrayDuration(t *testing.T) {
|
||||
expected := map[time.Duration]struct{}{
|
||||
time.Second * 10: {},
|
||||
time.Minute * 5: {},
|
||||
expected := []time.Duration{
|
||||
time.Second * 10,
|
||||
time.Minute * 5,
|
||||
}
|
||||
if len(expected) != len(fooFlagDuration) {
|
||||
t.Errorf("len array flag (%d) is not equal to %d", len(fooFlag), len(expected))
|
||||
}
|
||||
for _, i := range fooFlagDuration {
|
||||
if _, ok := expected[i]; !ok {
|
||||
t.Errorf("unexpected item in array %v", i)
|
||||
for i, v := range fooFlagDuration {
|
||||
if v != expected[i] {
|
||||
t.Errorf("unexpected item in array %s", v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -130,7 +134,7 @@ func TestArrayDurationSet(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestArrayDurationGetOptionalArg(t *testing.T) {
|
||||
f := func(s string, argIdx int, expectedValue time.Duration, defaultValue time.Duration) {
|
||||
f := func(s string, argIdx int, expectedValue, defaultValue time.Duration) {
|
||||
t.Helper()
|
||||
var a ArrayDuration
|
||||
_ = a.Set(s)
|
||||
|
@ -219,3 +223,60 @@ func TestArrayBoolString(t *testing.T) {
|
|||
f("true,false")
|
||||
f("false,true")
|
||||
}
|
||||
|
||||
func TestArrayInt(t *testing.T) {
|
||||
expected := []int{1, 2, 3}
|
||||
if len(expected) != len(fooFlagInt) {
|
||||
t.Errorf("len array flag (%d) is not equal to %d", len(fooFlag), len(expected))
|
||||
}
|
||||
for i, n := range fooFlagInt {
|
||||
if n != expected[i] {
|
||||
t.Errorf("unexpected item in array %d", n)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestArrayIntSet(t *testing.T) {
|
||||
f := func(s string, expectedValues []int) {
|
||||
t.Helper()
|
||||
var a ArrayInt
|
||||
_ = a.Set(s)
|
||||
if !reflect.DeepEqual([]int(a), expectedValues) {
|
||||
t.Fatalf("unexpected values parsed;\ngot\n%q\nwant\n%q", a, expectedValues)
|
||||
}
|
||||
}
|
||||
f("", nil)
|
||||
f(`1`, []int{1})
|
||||
f(`-2,3,-64`, []int{-2, 3, -64})
|
||||
}
|
||||
|
||||
func TestArrayIntGetOptionalArg(t *testing.T) {
|
||||
f := func(s string, argIdx int, expectedValue, defaultValue int) {
|
||||
t.Helper()
|
||||
var a ArrayInt
|
||||
_ = a.Set(s)
|
||||
v := a.GetOptionalArgOrDefault(argIdx, defaultValue)
|
||||
if v != expectedValue {
|
||||
t.Fatalf("unexpected value; got %d; want %d", v, expectedValue)
|
||||
}
|
||||
}
|
||||
f("", 0, 123, 123)
|
||||
f("", 1, -34, -34)
|
||||
f("10,1", 1, 1, 234)
|
||||
f("10", 3, 10, -34)
|
||||
}
|
||||
|
||||
func TestArrayIntString(t *testing.T) {
|
||||
f := func(s string) {
|
||||
t.Helper()
|
||||
var a ArrayInt
|
||||
_ = a.Set(s)
|
||||
result := a.String()
|
||||
if result != s {
|
||||
t.Fatalf("unexpected string;\ngot\n%s\nwant\n%s", result, s)
|
||||
}
|
||||
}
|
||||
f("")
|
||||
f("10,1")
|
||||
f("-5,1,123")
|
||||
}
|
||||
|
|
|
@ -242,6 +242,7 @@ var (
|
|||
scrapesOK = metrics.NewCounter(`vm_promscrape_scrapes_total{status_code="200"}`)
|
||||
scrapesGunzipped = metrics.NewCounter(`vm_promscrape_scrapes_gunziped_total`)
|
||||
scrapesGunzipFailed = metrics.NewCounter(`vm_promscrape_scrapes_gunzip_failed_total`)
|
||||
scrapeRetries = metrics.NewCounter(`vm_promscrape_scrape_retries_total`)
|
||||
)
|
||||
|
||||
func doRequestWithPossibleRetry(hc *fasthttp.HostClient, req *fasthttp.Request, resp *fasthttp.Response, deadline time.Time) error {
|
||||
|
@ -259,6 +260,7 @@ func doRequestWithPossibleRetry(hc *fasthttp.HostClient, req *fasthttp.Request,
|
|||
if time.Since(deadline) >= 0 {
|
||||
return fmt.Errorf("the server closes all the connection attempts: %w", err)
|
||||
}
|
||||
scrapeRetries.Inc()
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -14,6 +14,7 @@ import (
|
|||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/proxy"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/timerpool"
|
||||
"github.com/VictoriaMetrics/fasthttp"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -192,6 +193,7 @@ func (c *Client) getAPIResponseWithParamsAndClient(client *fasthttp.HostClient,
|
|||
}
|
||||
|
||||
func doRequestWithPossibleRetry(hc *fasthttp.HostClient, req *fasthttp.Request, resp *fasthttp.Response, deadline time.Time) error {
|
||||
discoveryRequests.Inc()
|
||||
for {
|
||||
// Use DoDeadline instead of Do even if hc.ReadTimeout is already set in order to guarantee the given deadline
|
||||
// across multiple retries.
|
||||
|
@ -206,5 +208,11 @@ func doRequestWithPossibleRetry(hc *fasthttp.HostClient, req *fasthttp.Request,
|
|||
if time.Since(deadline) >= 0 {
|
||||
return fmt.Errorf("the server closes all the connection attempts: %w", err)
|
||||
}
|
||||
discoveryRetries.Inc()
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
discoveryRetries = metrics.NewCounter(`vm_promscrape_discovery_retries_total`)
|
||||
discoveryRequests = metrics.NewCounter(`vm_promscrape_discovery_requests_total`)
|
||||
)
|
||||
|
|
2
vendor/github.com/VividCortex/ewma/.gitignore
generated
vendored
Normal file
2
vendor/github.com/VividCortex/ewma/.gitignore
generated
vendored
Normal file
|
@ -0,0 +1,2 @@
|
|||
.DS_Store
|
||||
.*.sw?
|
21
vendor/github.com/VividCortex/ewma/LICENSE
generated
vendored
Normal file
21
vendor/github.com/VividCortex/ewma/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,21 @@
|
|||
The MIT License
|
||||
|
||||
Copyright (c) 2013 VividCortex
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
140
vendor/github.com/VividCortex/ewma/README.md
generated
vendored
Normal file
140
vendor/github.com/VividCortex/ewma/README.md
generated
vendored
Normal file
|
@ -0,0 +1,140 @@
|
|||
# EWMA [![GoDoc](https://godoc.org/github.com/VividCortex/ewma?status.svg)](https://godoc.org/github.com/VividCortex/ewma) ![Build Status](https://circleci.com/gh/VividCortex/moving_average.png?circle-token=1459fa37f9ca0e50cef05d1963146d96d47ea523)
|
||||
|
||||
This repo provides Exponentially Weighted Moving Average algorithms, or EWMAs for short, [based on our
|
||||
Quantifying Abnormal Behavior talk](https://vividcortex.com/blog/2013/07/23/a-fast-go-library-for-exponential-moving-averages/).
|
||||
|
||||
### Exponentially Weighted Moving Average
|
||||
|
||||
An exponentially weighted moving average is a way to continuously compute a type of
|
||||
average for a series of numbers, as the numbers arrive. After a value in the series is
|
||||
added to the average, its weight in the average decreases exponentially over time. This
|
||||
biases the average towards more recent data. EWMAs are useful for several reasons, chiefly
|
||||
their inexpensive computational and memory cost, as well as the fact that they represent
|
||||
the recent central tendency of the series of values.
|
||||
|
||||
The EWMA algorithm requires a decay factor, alpha. The larger the alpha, the more the average
|
||||
is biased towards recent history. The alpha must be between 0 and 1, and is typically
|
||||
a fairly small number, such as 0.04. We will discuss the choice of alpha later.
|
||||
|
||||
The algorithm works thus, in pseudocode:
|
||||
|
||||
1. Multiply the next number in the series by alpha.
|
||||
2. Multiply the current value of the average by 1 minus alpha.
|
||||
3. Add the result of steps 1 and 2, and store it as the new current value of the average.
|
||||
4. Repeat for each number in the series.
|
||||
|
||||
There are special-case behaviors for how to initialize the current value, and these vary
|
||||
between implementations. One approach is to start with the first value in the series;
|
||||
another is to average the first 10 or so values in the series using an arithmetic average,
|
||||
and then begin the incremental updating of the average. Each method has pros and cons.
|
||||
|
||||
It may help to look at it pictorially. Suppose the series has five numbers, and we choose
|
||||
alpha to be 0.50 for simplicity. Here's the series, with numbers in the neighborhood of 300.
|
||||
|
||||
![Data Series](https://user-images.githubusercontent.com/279875/28242350-463289a2-6977-11e7-88ca-fd778ccef1f0.png)
|
||||
|
||||
Now let's take the moving average of those numbers. First we set the average to the value
|
||||
of the first number.
|
||||
|
||||
![EWMA Step 1](https://user-images.githubusercontent.com/279875/28242353-464c96bc-6977-11e7-9981-dc4e0789c7ba.png)
|
||||
|
||||
Next we multiply the next number by alpha, multiply the current value by 1-alpha, and add
|
||||
them to generate a new value.
|
||||
|
||||
![EWMA Step 2](https://user-images.githubusercontent.com/279875/28242351-464abefa-6977-11e7-95d0-43900f29bef2.png)
|
||||
|
||||
This continues until we are done.
|
||||
|
||||
![EWMA Step N](https://user-images.githubusercontent.com/279875/28242352-464c58f0-6977-11e7-8cd0-e01e4efaac7f.png)
|
||||
|
||||
Notice how each of the values in the series decays by half each time a new value
|
||||
is added, and the top of the bars in the lower portion of the image represents the
|
||||
size of the moving average. It is a smoothed, or low-pass, average of the original
|
||||
series.
|
||||
|
||||
For further reading, see [Exponentially weighted moving average](http://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average) on wikipedia.
|
||||
|
||||
### Choosing Alpha
|
||||
|
||||
Consider a fixed-size sliding-window moving average (not an exponentially weighted moving average)
|
||||
that averages over the previous N samples. What is the average age of each sample? It is N/2.
|
||||
|
||||
Now suppose that you wish to construct a EWMA whose samples have the same average age. The formula
|
||||
to compute the alpha required for this is: alpha = 2/(N+1). Proof is in the book
|
||||
"Production and Operations Analysis" by Steven Nahmias.
|
||||
|
||||
So, for example, if you have a time-series with samples once per second, and you want to get the
|
||||
moving average over the previous minute, you should use an alpha of .032786885. This, by the way,
|
||||
is the constant alpha used for this repository's SimpleEWMA.
|
||||
|
||||
### Implementations
|
||||
|
||||
This repository contains two implementations of the EWMA algorithm, with different properties.
|
||||
|
||||
The implementations all conform to the MovingAverage interface, and the constructor returns
|
||||
that type.
|
||||
|
||||
Current implementations assume an implicit time interval of 1.0 between every sample added.
|
||||
That is, the passage of time is treated as though it's the same as the arrival of samples.
|
||||
If you need time-based decay when samples are not arriving precisely at set intervals, then
|
||||
this package will not support your needs at present.
|
||||
|
||||
#### SimpleEWMA
|
||||
|
||||
A SimpleEWMA is designed for low CPU and memory consumption. It **will** have different behavior than the VariableEWMA
|
||||
for multiple reasons. It has no warm-up period and it uses a constant
|
||||
decay. These properties let it use less memory. It will also behave
|
||||
differently when it's equal to zero, which is assumed to mean
|
||||
uninitialized, so if a value is likely to actually become zero over time,
|
||||
then any non-zero value will cause a sharp jump instead of a small change.
|
||||
|
||||
#### VariableEWMA
|
||||
|
||||
Unlike SimpleEWMA, this supports a custom age which must be stored, and thus uses more memory.
|
||||
It also has a "warmup" time when you start adding values to it. It will report a value of 0.0
|
||||
until you have added the required number of samples to it. It uses some memory to store the
|
||||
number of samples added to it. As a result it uses a little over twice the memory of SimpleEWMA.
|
||||
|
||||
## Usage
|
||||
|
||||
### API Documentation
|
||||
|
||||
View the GoDoc generated documentation [here](http://godoc.org/github.com/VividCortex/ewma).
|
||||
|
||||
```go
|
||||
package main
|
||||
import "github.com/VividCortex/ewma"
|
||||
|
||||
func main() {
|
||||
samples := [100]float64{
|
||||
4599, 5711, 4746, 4621, 5037, 4218, 4925, 4281, 5207, 5203, 5594, 5149,
|
||||
}
|
||||
|
||||
e := ewma.NewMovingAverage() //=> Returns a SimpleEWMA if called without params
|
||||
a := ewma.NewMovingAverage(5) //=> returns a VariableEWMA with a decay of 2 / (5 + 1)
|
||||
|
||||
for _, f := range samples {
|
||||
e.Add(f)
|
||||
a.Add(f)
|
||||
}
|
||||
|
||||
e.Value() //=> 13.577404704631077
|
||||
a.Value() //=> 1.5806140565521463e-12
|
||||
}
|
||||
```
|
||||
|
||||
## Contributing
|
||||
|
||||
We only accept pull requests for minor fixes or improvements. This includes:
|
||||
|
||||
* Small bug fixes
|
||||
* Typos
|
||||
* Documentation or comments
|
||||
|
||||
Please open issues to discuss new features. Pull requests for new features will be rejected,
|
||||
so we recommend forking the repository and making changes in your fork for your use case.
|
||||
|
||||
## License
|
||||
|
||||
This repository is Copyright (c) 2013 VividCortex, Inc. All rights reserved.
|
||||
It is licensed under the MIT license. Please see the LICENSE file for applicable license terms.
|
126
vendor/github.com/VividCortex/ewma/ewma.go
generated
vendored
Normal file
126
vendor/github.com/VividCortex/ewma/ewma.go
generated
vendored
Normal file
|
@ -0,0 +1,126 @@
|
|||
// Package ewma implements exponentially weighted moving averages.
|
||||
package ewma
|
||||
|
||||
// Copyright (c) 2013 VividCortex, Inc. All rights reserved.
|
||||
// Please see the LICENSE file for applicable license terms.
|
||||
|
||||
const (
|
||||
// By default, we average over a one-minute period, which means the average
|
||||
// age of the metrics in the period is 30 seconds.
|
||||
AVG_METRIC_AGE float64 = 30.0
|
||||
|
||||
// The formula for computing the decay factor from the average age comes
|
||||
// from "Production and Operations Analysis" by Steven Nahmias.
|
||||
DECAY float64 = 2 / (float64(AVG_METRIC_AGE) + 1)
|
||||
|
||||
// For best results, the moving average should not be initialized to the
|
||||
// samples it sees immediately. The book "Production and Operations
|
||||
// Analysis" by Steven Nahmias suggests initializing the moving average to
|
||||
// the mean of the first 10 samples. Until the VariableEwma has seen this
|
||||
// many samples, it is not "ready" to be queried for the value of the
|
||||
// moving average. This adds some memory cost.
|
||||
WARMUP_SAMPLES uint8 = 10
|
||||
)
|
||||
|
||||
// MovingAverage is the interface that computes a moving average over a time-
|
||||
// series stream of numbers. The average may be over a window or exponentially
|
||||
// decaying.
|
||||
type MovingAverage interface {
|
||||
Add(float64)
|
||||
Value() float64
|
||||
Set(float64)
|
||||
}
|
||||
|
||||
// NewMovingAverage constructs a MovingAverage that computes an average with the
|
||||
// desired characteristics in the moving window or exponential decay. If no
|
||||
// age is given, it constructs a default exponentially weighted implementation
|
||||
// that consumes minimal memory. The age is related to the decay factor alpha
|
||||
// by the formula given for the DECAY constant. It signifies the average age
|
||||
// of the samples as time goes to infinity.
|
||||
func NewMovingAverage(age ...float64) MovingAverage {
|
||||
if len(age) == 0 || age[0] == AVG_METRIC_AGE {
|
||||
return new(SimpleEWMA)
|
||||
}
|
||||
return &VariableEWMA{
|
||||
decay: 2 / (age[0] + 1),
|
||||
}
|
||||
}
|
||||
|
||||
// A SimpleEWMA represents the exponentially weighted moving average of a
|
||||
// series of numbers. It WILL have different behavior than the VariableEWMA
|
||||
// for multiple reasons. It has no warm-up period and it uses a constant
|
||||
// decay. These properties let it use less memory. It will also behave
|
||||
// differently when it's equal to zero, which is assumed to mean
|
||||
// uninitialized, so if a value is likely to actually become zero over time,
|
||||
// then any non-zero value will cause a sharp jump instead of a small change.
|
||||
// However, note that this takes a long time, and the value may just
|
||||
// decays to a stable value that's close to zero, but which won't be mistaken
|
||||
// for uninitialized. See http://play.golang.org/p/litxBDr_RC for example.
|
||||
type SimpleEWMA struct {
|
||||
// The current value of the average. After adding with Add(), this is
|
||||
// updated to reflect the average of all values seen thus far.
|
||||
value float64
|
||||
}
|
||||
|
||||
// Add adds a value to the series and updates the moving average.
|
||||
func (e *SimpleEWMA) Add(value float64) {
|
||||
if e.value == 0 { // this is a proxy for "uninitialized"
|
||||
e.value = value
|
||||
} else {
|
||||
e.value = (value * DECAY) + (e.value * (1 - DECAY))
|
||||
}
|
||||
}
|
||||
|
||||
// Value returns the current value of the moving average.
|
||||
func (e *SimpleEWMA) Value() float64 {
|
||||
return e.value
|
||||
}
|
||||
|
||||
// Set sets the EWMA's value.
|
||||
func (e *SimpleEWMA) Set(value float64) {
|
||||
e.value = value
|
||||
}
|
||||
|
||||
// VariableEWMA represents the exponentially weighted moving average of a series of
|
||||
// numbers. Unlike SimpleEWMA, it supports a custom age, and thus uses more memory.
|
||||
type VariableEWMA struct {
|
||||
// The multiplier factor by which the previous samples decay.
|
||||
decay float64
|
||||
// The current value of the average.
|
||||
value float64
|
||||
// The number of samples added to this instance.
|
||||
count uint8
|
||||
}
|
||||
|
||||
// Add adds a value to the series and updates the moving average.
|
||||
func (e *VariableEWMA) Add(value float64) {
|
||||
switch {
|
||||
case e.count < WARMUP_SAMPLES:
|
||||
e.count++
|
||||
e.value += value
|
||||
case e.count == WARMUP_SAMPLES:
|
||||
e.count++
|
||||
e.value = e.value / float64(WARMUP_SAMPLES)
|
||||
e.value = (value * e.decay) + (e.value * (1 - e.decay))
|
||||
default:
|
||||
e.value = (value * e.decay) + (e.value * (1 - e.decay))
|
||||
}
|
||||
}
|
||||
|
||||
// Value returns the current value of the average, or 0.0 if the series hasn't
|
||||
// warmed up yet.
|
||||
func (e *VariableEWMA) Value() float64 {
|
||||
if e.count <= WARMUP_SAMPLES {
|
||||
return 0.0
|
||||
}
|
||||
|
||||
return e.value
|
||||
}
|
||||
|
||||
// Set sets the EWMA's value.
|
||||
func (e *VariableEWMA) Set(value float64) {
|
||||
e.value = value
|
||||
if e.count <= WARMUP_SAMPLES {
|
||||
e.count = WARMUP_SAMPLES + 1
|
||||
}
|
||||
}
|
32
vendor/github.com/aws/aws-sdk-go/aws/config.go
generated
vendored
32
vendor/github.com/aws/aws-sdk-go/aws/config.go
generated
vendored
|
@ -438,13 +438,6 @@ func (c *Config) WithDisableEndpointHostPrefix(t bool) *Config {
|
|||
return c
|
||||
}
|
||||
|
||||
// MergeIn merges the passed in configs into the existing config object.
|
||||
func (c *Config) MergeIn(cfgs ...*Config) {
|
||||
for _, other := range cfgs {
|
||||
mergeInConfig(c, other)
|
||||
}
|
||||
}
|
||||
|
||||
// WithSTSRegionalEndpoint will set whether or not to use regional endpoint flag
|
||||
// when resolving the endpoint for a service
|
||||
func (c *Config) WithSTSRegionalEndpoint(sre endpoints.STSRegionalEndpoint) *Config {
|
||||
|
@ -459,6 +452,27 @@ func (c *Config) WithS3UsEast1RegionalEndpoint(sre endpoints.S3UsEast1RegionalEn
|
|||
return c
|
||||
}
|
||||
|
||||
// WithLowerCaseHeaderMaps sets a config LowerCaseHeaderMaps value
|
||||
// returning a Config pointer for chaining.
|
||||
func (c *Config) WithLowerCaseHeaderMaps(t bool) *Config {
|
||||
c.LowerCaseHeaderMaps = &t
|
||||
return c
|
||||
}
|
||||
|
||||
// WithDisableRestProtocolURICleaning sets a config DisableRestProtocolURICleaning value
|
||||
// returning a Config pointer for chaining.
|
||||
func (c *Config) WithDisableRestProtocolURICleaning(t bool) *Config {
|
||||
c.DisableRestProtocolURICleaning = &t
|
||||
return c
|
||||
}
|
||||
|
||||
// MergeIn merges the passed in configs into the existing config object.
|
||||
func (c *Config) MergeIn(cfgs ...*Config) {
|
||||
for _, other := range cfgs {
|
||||
mergeInConfig(c, other)
|
||||
}
|
||||
}
|
||||
|
||||
func mergeInConfig(dst *Config, other *Config) {
|
||||
if other == nil {
|
||||
return
|
||||
|
@ -571,6 +585,10 @@ func mergeInConfig(dst *Config, other *Config) {
|
|||
if other.S3UsEast1RegionalEndpoint != endpoints.UnsetS3UsEast1Endpoint {
|
||||
dst.S3UsEast1RegionalEndpoint = other.S3UsEast1RegionalEndpoint
|
||||
}
|
||||
|
||||
if other.LowerCaseHeaderMaps != nil {
|
||||
dst.LowerCaseHeaderMaps = other.LowerCaseHeaderMaps
|
||||
}
|
||||
}
|
||||
|
||||
// Copy will return a shallow copy of the Config object. If any additional
|
||||
|
|
60
vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/doc.go
generated
vendored
Normal file
60
vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,60 @@
|
|||
// Package ssocreds provides a credential provider for retrieving temporary AWS credentials using an SSO access token.
|
||||
//
|
||||
// IMPORTANT: The provider in this package does not initiate or perform the AWS SSO login flow. The SDK provider
|
||||
// expects that you have already performed the SSO login flow using AWS CLI using the "aws sso login" command, or by
|
||||
// some other mechanism. The provider must find a valid non-expired access token for the AWS SSO user portal URL in
|
||||
// ~/.aws/sso/cache. If a cached token is not found, it is expired, or the file is malformed an error will be returned.
|
||||
//
|
||||
// Loading AWS SSO credentials with the AWS shared configuration file
|
||||
//
|
||||
// You can use configure AWS SSO credentials from the AWS shared configuration file by
|
||||
// providing the specifying the required keys in the profile:
|
||||
//
|
||||
// sso_account_id
|
||||
// sso_region
|
||||
// sso_role_name
|
||||
// sso_start_url
|
||||
//
|
||||
// For example, the following defines a profile "devsso" and specifies the AWS SSO parameters that defines the target
|
||||
// account, role, sign-on portal, and the region where the user portal is located. Note: all SSO arguments must be
|
||||
// provided, or an error will be returned.
|
||||
//
|
||||
// [profile devsso]
|
||||
// sso_start_url = https://my-sso-portal.awsapps.com/start
|
||||
// sso_role_name = SSOReadOnlyRole
|
||||
// sso_region = us-east-1
|
||||
// sso_account_id = 123456789012
|
||||
//
|
||||
// Using the config module, you can load the AWS SDK shared configuration, and specify that this profile be used to
|
||||
// retrieve credentials. For example:
|
||||
//
|
||||
// sess, err := session.NewSessionWithOptions(session.Options{
|
||||
// SharedConfigState: session.SharedConfigEnable,
|
||||
// Profile: "devsso",
|
||||
// })
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
//
|
||||
// Programmatically loading AWS SSO credentials directly
|
||||
//
|
||||
// You can programmatically construct the AWS SSO Provider in your application, and provide the necessary information
|
||||
// to load and retrieve temporary credentials using an access token from ~/.aws/sso/cache.
|
||||
//
|
||||
// svc := sso.New(sess, &aws.Config{
|
||||
// Region: aws.String("us-west-2"), // Client Region must correspond to the AWS SSO user portal region
|
||||
// })
|
||||
//
|
||||
// provider := ssocreds.NewCredentialsWithClient(svc, "123456789012", "SSOReadOnlyRole", "https://my-sso-portal.awsapps.com/start")
|
||||
//
|
||||
// credentials, err := provider.Get()
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
//
|
||||
// Additional Resources
|
||||
//
|
||||
// Configuring the AWS CLI to use AWS Single Sign-On: https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sso.html
|
||||
//
|
||||
// AWS Single Sign-On User Guide: https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html
|
||||
package ssocreds
|
9
vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/os.go
generated
vendored
Normal file
9
vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/os.go
generated
vendored
Normal file
|
@ -0,0 +1,9 @@
|
|||
// +build !windows
|
||||
|
||||
package ssocreds
|
||||
|
||||
import "os"
|
||||
|
||||
func getHomeDirectory() string {
|
||||
return os.Getenv("HOME")
|
||||
}
|
7
vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/os_windows.go
generated
vendored
Normal file
7
vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/os_windows.go
generated
vendored
Normal file
|
@ -0,0 +1,7 @@
|
|||
package ssocreds
|
||||
|
||||
import "os"
|
||||
|
||||
func getHomeDirectory() string {
|
||||
return os.Getenv("USERPROFILE")
|
||||
}
|
180
vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/provider.go
generated
vendored
Normal file
180
vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/provider.go
generated
vendored
Normal file
|
@ -0,0 +1,180 @@
|
|||
package ssocreds
|
||||
|
||||
import (
|
||||
"crypto/sha1"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/client"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/service/sso"
|
||||
"github.com/aws/aws-sdk-go/service/sso/ssoiface"
|
||||
)
|
||||
|
||||
// ErrCodeSSOProviderInvalidToken is the code type that is returned if loaded token has expired or is otherwise invalid.
|
||||
// To refresh the SSO session run aws sso login with the corresponding profile.
|
||||
const ErrCodeSSOProviderInvalidToken = "SSOProviderInvalidToken"
|
||||
|
||||
const invalidTokenMessage = "the SSO session has expired or is invalid"
|
||||
|
||||
func init() {
|
||||
nowTime = time.Now
|
||||
defaultCacheLocation = defaultCacheLocationImpl
|
||||
}
|
||||
|
||||
var nowTime func() time.Time
|
||||
|
||||
// ProviderName is the name of the provider used to specify the source of credentials.
|
||||
const ProviderName = "SSOProvider"
|
||||
|
||||
var defaultCacheLocation func() string
|
||||
|
||||
func defaultCacheLocationImpl() string {
|
||||
return filepath.Join(getHomeDirectory(), ".aws", "sso", "cache")
|
||||
}
|
||||
|
||||
// Provider is an AWS credential provider that retrieves temporary AWS credentials by exchanging an SSO login token.
|
||||
type Provider struct {
|
||||
credentials.Expiry
|
||||
|
||||
// The Client which is configured for the AWS Region where the AWS SSO user portal is located.
|
||||
Client ssoiface.SSOAPI
|
||||
|
||||
// The AWS account that is assigned to the user.
|
||||
AccountID string
|
||||
|
||||
// The role name that is assigned to the user.
|
||||
RoleName string
|
||||
|
||||
// The URL that points to the organization's AWS Single Sign-On (AWS SSO) user portal.
|
||||
StartURL string
|
||||
}
|
||||
|
||||
// NewCredentials returns a new AWS Single Sign-On (AWS SSO) credential provider. The ConfigProvider is expected to be configured
|
||||
// for the AWS Region where the AWS SSO user portal is located.
|
||||
func NewCredentials(configProvider client.ConfigProvider, accountID, roleName, startURL string, optFns ...func(provider *Provider)) *credentials.Credentials {
|
||||
return NewCredentialsWithClient(sso.New(configProvider), accountID, roleName, startURL, optFns...)
|
||||
}
|
||||
|
||||
// NewCredentialsWithClient returns a new AWS Single Sign-On (AWS SSO) credential provider. The provided client is expected to be configured
|
||||
// for the AWS Region where the AWS SSO user portal is located.
|
||||
func NewCredentialsWithClient(client ssoiface.SSOAPI, accountID, roleName, startURL string, optFns ...func(provider *Provider)) *credentials.Credentials {
|
||||
p := &Provider{
|
||||
Client: client,
|
||||
AccountID: accountID,
|
||||
RoleName: roleName,
|
||||
StartURL: startURL,
|
||||
}
|
||||
|
||||
for _, fn := range optFns {
|
||||
fn(p)
|
||||
}
|
||||
|
||||
return credentials.NewCredentials(p)
|
||||
}
|
||||
|
||||
// Retrieve retrieves temporary AWS credentials from the configured Amazon Single Sign-On (AWS SSO) user portal
|
||||
// by exchanging the accessToken present in ~/.aws/sso/cache.
|
||||
func (p *Provider) Retrieve() (credentials.Value, error) {
|
||||
return p.RetrieveWithContext(aws.BackgroundContext())
|
||||
}
|
||||
|
||||
// RetrieveWithContext retrieves temporary AWS credentials from the configured Amazon Single Sign-On (AWS SSO) user portal
|
||||
// by exchanging the accessToken present in ~/.aws/sso/cache.
|
||||
func (p *Provider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) {
|
||||
tokenFile, err := loadTokenFile(p.StartURL)
|
||||
if err != nil {
|
||||
return credentials.Value{}, err
|
||||
}
|
||||
|
||||
output, err := p.Client.GetRoleCredentialsWithContext(ctx, &sso.GetRoleCredentialsInput{
|
||||
AccessToken: &tokenFile.AccessToken,
|
||||
AccountId: &p.AccountID,
|
||||
RoleName: &p.RoleName,
|
||||
})
|
||||
if err != nil {
|
||||
return credentials.Value{}, err
|
||||
}
|
||||
|
||||
expireTime := time.Unix(0, aws.Int64Value(output.RoleCredentials.Expiration)*int64(time.Millisecond)).UTC()
|
||||
p.SetExpiration(expireTime, 0)
|
||||
|
||||
return credentials.Value{
|
||||
AccessKeyID: aws.StringValue(output.RoleCredentials.AccessKeyId),
|
||||
SecretAccessKey: aws.StringValue(output.RoleCredentials.SecretAccessKey),
|
||||
SessionToken: aws.StringValue(output.RoleCredentials.SessionToken),
|
||||
ProviderName: ProviderName,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func getCacheFileName(url string) (string, error) {
|
||||
hash := sha1.New()
|
||||
_, err := hash.Write([]byte(url))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return strings.ToLower(hex.EncodeToString(hash.Sum(nil))) + ".json", nil
|
||||
}
|
||||
|
||||
type rfc3339 time.Time
|
||||
|
||||
func (r *rfc3339) UnmarshalJSON(bytes []byte) error {
|
||||
var value string
|
||||
|
||||
if err := json.Unmarshal(bytes, &value); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
parse, err := time.Parse(time.RFC3339, value)
|
||||
if err != nil {
|
||||
return fmt.Errorf("expected RFC3339 timestamp: %v", err)
|
||||
}
|
||||
|
||||
*r = rfc3339(parse)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type token struct {
|
||||
AccessToken string `json:"accessToken"`
|
||||
ExpiresAt rfc3339 `json:"expiresAt"`
|
||||
Region string `json:"region,omitempty"`
|
||||
StartURL string `json:"startUrl,omitempty"`
|
||||
}
|
||||
|
||||
func (t token) Expired() bool {
|
||||
return nowTime().Round(0).After(time.Time(t.ExpiresAt))
|
||||
}
|
||||
|
||||
func loadTokenFile(startURL string) (t token, err error) {
|
||||
key, err := getCacheFileName(startURL)
|
||||
if err != nil {
|
||||
return token{}, awserr.New(ErrCodeSSOProviderInvalidToken, invalidTokenMessage, err)
|
||||
}
|
||||
|
||||
fileBytes, err := ioutil.ReadFile(filepath.Join(defaultCacheLocation(), key))
|
||||
if err != nil {
|
||||
return token{}, awserr.New(ErrCodeSSOProviderInvalidToken, invalidTokenMessage, err)
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(fileBytes, &t); err != nil {
|
||||
return token{}, awserr.New(ErrCodeSSOProviderInvalidToken, invalidTokenMessage, err)
|
||||
}
|
||||
|
||||
if len(t.AccessToken) == 0 {
|
||||
return token{}, awserr.New(ErrCodeSSOProviderInvalidToken, invalidTokenMessage, nil)
|
||||
}
|
||||
|
||||
if t.Expired() {
|
||||
return token{}, awserr.New(ErrCodeSSOProviderInvalidToken, invalidTokenMessage, nil)
|
||||
}
|
||||
|
||||
return t, nil
|
||||
}
|
12
vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go
generated
vendored
12
vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go
generated
vendored
|
@ -244,9 +244,11 @@ type AssumeRoleProvider struct {
|
|||
MaxJitterFrac float64
|
||||
}
|
||||
|
||||
// NewCredentials returns a pointer to a new Credentials object wrapping the
|
||||
// NewCredentials returns a pointer to a new Credentials value wrapping the
|
||||
// AssumeRoleProvider. The credentials will expire every 15 minutes and the
|
||||
// role will be named after a nanosecond timestamp of this operation.
|
||||
// role will be named after a nanosecond timestamp of this operation. The
|
||||
// Credentials value will attempt to refresh the credentials using the provider
|
||||
// when Credentials.Get is called, if the cached credentials are expiring.
|
||||
//
|
||||
// Takes a Config provider to create the STS client. The ConfigProvider is
|
||||
// satisfied by the session.Session type.
|
||||
|
@ -268,9 +270,11 @@ func NewCredentials(c client.ConfigProvider, roleARN string, options ...func(*As
|
|||
return credentials.NewCredentials(p)
|
||||
}
|
||||
|
||||
// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping the
|
||||
// NewCredentialsWithClient returns a pointer to a new Credentials value wrapping the
|
||||
// AssumeRoleProvider. The credentials will expire every 15 minutes and the
|
||||
// role will be named after a nanosecond timestamp of this operation.
|
||||
// role will be named after a nanosecond timestamp of this operation. The
|
||||
// Credentials value will attempt to refresh the credentials using the provider
|
||||
// when Credentials.Get is called, if the cached credentials are expiring.
|
||||
//
|
||||
// Takes an AssumeRoler which can be satisfied by the STS client.
|
||||
//
|
||||
|
|
79
vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
generated
vendored
79
vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
generated
vendored
|
@ -380,9 +380,33 @@ var awsPartition = partition{
|
|||
"me-south-1": endpoint{},
|
||||
"sa-east-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-1-fips": endpoint{
|
||||
Hostname: "api.detective-fips.us-east-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-east-1",
|
||||
},
|
||||
},
|
||||
"us-east-2": endpoint{},
|
||||
"us-east-2-fips": endpoint{
|
||||
Hostname: "api.detective-fips.us-east-2.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-east-2",
|
||||
},
|
||||
},
|
||||
"us-west-1": endpoint{},
|
||||
"us-west-1-fips": endpoint{
|
||||
Hostname: "api.detective-fips.us-west-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-west-1",
|
||||
},
|
||||
},
|
||||
"us-west-2": endpoint{},
|
||||
"us-west-2-fips": endpoint{
|
||||
Hostname: "api.detective-fips.us-west-2.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-west-2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"api.ecr": service{
|
||||
|
@ -746,6 +770,7 @@ var awsPartition = partition{
|
|||
"appmesh": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"af-south-1": endpoint{},
|
||||
"ap-east-1": endpoint{},
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
|
@ -1413,6 +1438,7 @@ var awsPartition = partition{
|
|||
"ca-central-1": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-north-1": endpoint{},
|
||||
"eu-south-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"eu-west-3": endpoint{},
|
||||
|
@ -3053,6 +3079,7 @@ var awsPartition = partition{
|
|||
"fsx": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"af-south-1": endpoint{},
|
||||
"ap-east-1": endpoint{},
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
|
@ -3062,6 +3089,7 @@ var awsPartition = partition{
|
|||
"ca-central-1": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-north-1": endpoint{},
|
||||
"eu-south-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"eu-west-3": endpoint{},
|
||||
|
@ -3095,6 +3123,7 @@ var awsPartition = partition{
|
|||
Region: "us-west-2",
|
||||
},
|
||||
},
|
||||
"me-south-1": endpoint{},
|
||||
"sa-east-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
|
@ -3759,6 +3788,7 @@ var awsPartition = partition{
|
|||
"lakeformation": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"ap-east-1": endpoint{},
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-south-1": endpoint{},
|
||||
|
@ -4274,7 +4304,19 @@ var awsPartition = partition{
|
|||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-1-fips": endpoint{
|
||||
Hostname: "models-fips.lex.us-east-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-east-1",
|
||||
},
|
||||
},
|
||||
"us-west-2": endpoint{},
|
||||
"us-west-2-fips": endpoint{
|
||||
Hostname: "models-fips.lex.us-west-2.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-west-2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"monitoring": service{
|
||||
|
@ -5188,7 +5230,19 @@ var awsPartition = partition{
|
|||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-1-fips": endpoint{
|
||||
Hostname: "runtime-fips.lex.us-east-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-east-1",
|
||||
},
|
||||
},
|
||||
"us-west-2": endpoint{},
|
||||
"us-west-2-fips": endpoint{
|
||||
Hostname: "runtime-fips.lex.us-west-2.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-west-2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"runtime.sagemaker": service{
|
||||
|
@ -8674,7 +8728,12 @@ var awsusgovPartition = partition{
|
|||
Region: "us-gov-east-1",
|
||||
},
|
||||
},
|
||||
"us-gov-east-1": endpoint{},
|
||||
"us-gov-east-1": endpoint{
|
||||
Hostname: "greengrass.us-gov-east-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-gov-east-1",
|
||||
},
|
||||
},
|
||||
"us-gov-west-1": endpoint{
|
||||
Hostname: "greengrass.us-gov-west-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
|
@ -9833,6 +9892,18 @@ var awsisoPartition = partition{
|
|||
"us-iso-east-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"medialive": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-iso-east-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"mediapackage": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-iso-east-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"monitoring": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
|
@ -9909,6 +9980,12 @@ var awsisoPartition = partition{
|
|||
},
|
||||
},
|
||||
},
|
||||
"ssm": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-iso-east-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"states": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
|
|
19
vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go
generated
vendored
19
vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go
generated
vendored
|
@ -9,6 +9,7 @@ import (
|
|||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials/processcreds"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials/ssocreds"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials/stscreds"
|
||||
"github.com/aws/aws-sdk-go/aws/defaults"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
|
@ -100,6 +101,9 @@ func resolveCredsFromProfile(cfg *aws.Config,
|
|||
sharedCfg.Creds,
|
||||
)
|
||||
|
||||
case sharedCfg.hasSSOConfiguration():
|
||||
creds = resolveSSOCredentials(cfg, sharedCfg, handlers)
|
||||
|
||||
case len(sharedCfg.CredentialProcess) != 0:
|
||||
// Get credentials from CredentialProcess
|
||||
creds = processcreds.NewCredentials(sharedCfg.CredentialProcess)
|
||||
|
@ -151,6 +155,21 @@ func resolveCredsFromProfile(cfg *aws.Config,
|
|||
return creds, nil
|
||||
}
|
||||
|
||||
func resolveSSOCredentials(cfg *aws.Config, sharedCfg sharedConfig, handlers request.Handlers) *credentials.Credentials {
|
||||
cfgCopy := cfg.Copy()
|
||||
cfgCopy.Region = &sharedCfg.SSORegion
|
||||
|
||||
return ssocreds.NewCredentials(
|
||||
&Session{
|
||||
Config: cfgCopy,
|
||||
Handlers: handlers.Copy(),
|
||||
},
|
||||
sharedCfg.SSOAccountID,
|
||||
sharedCfg.SSORoleName,
|
||||
sharedCfg.SSOStartURL,
|
||||
)
|
||||
}
|
||||
|
||||
// valid credential source values
|
||||
const (
|
||||
credSourceEc2Metadata = "Ec2InstanceMetadata"
|
||||
|
|
2
vendor/github.com/aws/aws-sdk-go/aws/session/session.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/aws/session/session.go
generated
vendored
|
@ -36,7 +36,7 @@ const (
|
|||
|
||||
// ErrSharedConfigSourceCollision will be returned if a section contains both
|
||||
// source_profile and credential_source
|
||||
var ErrSharedConfigSourceCollision = awserr.New(ErrCodeSharedConfig, "only source profile or credential source can be specified, not both", nil)
|
||||
var ErrSharedConfigSourceCollision = awserr.New(ErrCodeSharedConfig, "only one credential type may be specified per profile: source profile, credential source, credential process, web identity token, or sso", nil)
|
||||
|
||||
// ErrSharedConfigECSContainerEnvVarEmpty will be returned if the environment
|
||||
// variables are empty and Environment was set as the credential source
|
||||
|
|
80
vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go
generated
vendored
80
vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go
generated
vendored
|
@ -2,6 +2,7 @@ package session
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
|
@ -25,6 +26,12 @@ const (
|
|||
roleSessionNameKey = `role_session_name` // optional
|
||||
roleDurationSecondsKey = "duration_seconds" // optional
|
||||
|
||||
// AWS Single Sign-On (AWS SSO) group
|
||||
ssoAccountIDKey = "sso_account_id"
|
||||
ssoRegionKey = "sso_region"
|
||||
ssoRoleNameKey = "sso_role_name"
|
||||
ssoStartURL = "sso_start_url"
|
||||
|
||||
// CSM options
|
||||
csmEnabledKey = `csm_enabled`
|
||||
csmHostKey = `csm_host`
|
||||
|
@ -78,6 +85,11 @@ type sharedConfig struct {
|
|||
CredentialProcess string
|
||||
WebIdentityTokenFile string
|
||||
|
||||
SSOAccountID string
|
||||
SSORegion string
|
||||
SSORoleName string
|
||||
SSOStartURL string
|
||||
|
||||
RoleARN string
|
||||
RoleSessionName string
|
||||
ExternalID string
|
||||
|
@ -217,9 +229,9 @@ func (cfg *sharedConfig) setFromIniFiles(profiles map[string]struct{}, profile s
|
|||
cfg.clearAssumeRoleOptions()
|
||||
} else {
|
||||
// First time a profile has been seen, It must either be a assume role
|
||||
// or credentials. Assert if the credential type requires a role ARN,
|
||||
// the ARN is also set.
|
||||
if err := cfg.validateCredentialsRequireARN(profile); err != nil {
|
||||
// credentials, or SSO. Assert if the credential type requires a role ARN,
|
||||
// the ARN is also set, or validate that the SSO configuration is complete.
|
||||
if err := cfg.validateCredentialsConfig(profile); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -312,6 +324,12 @@ func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile, e
|
|||
}
|
||||
cfg.S3UsEast1RegionalEndpoint = sre
|
||||
}
|
||||
|
||||
// AWS Single Sign-On (AWS SSO)
|
||||
updateString(&cfg.SSOAccountID, section, ssoAccountIDKey)
|
||||
updateString(&cfg.SSORegion, section, ssoRegionKey)
|
||||
updateString(&cfg.SSORoleName, section, ssoRoleNameKey)
|
||||
updateString(&cfg.SSOStartURL, section, ssoStartURL)
|
||||
}
|
||||
|
||||
updateString(&cfg.CredentialProcess, section, credentialProcessKey)
|
||||
|
@ -342,6 +360,18 @@ func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile, e
|
|||
return nil
|
||||
}
|
||||
|
||||
func (cfg *sharedConfig) validateCredentialsConfig(profile string) error {
|
||||
if err := cfg.validateCredentialsRequireARN(profile); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := cfg.validateSSOConfiguration(profile); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cfg *sharedConfig) validateCredentialsRequireARN(profile string) error {
|
||||
var credSource string
|
||||
|
||||
|
@ -371,6 +401,7 @@ func (cfg *sharedConfig) validateCredentialType() error {
|
|||
len(cfg.CredentialSource) != 0,
|
||||
len(cfg.CredentialProcess) != 0,
|
||||
len(cfg.WebIdentityTokenFile) != 0,
|
||||
cfg.hasSSOConfiguration(),
|
||||
) {
|
||||
return ErrSharedConfigSourceCollision
|
||||
}
|
||||
|
@ -378,12 +409,43 @@ func (cfg *sharedConfig) validateCredentialType() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (cfg *sharedConfig) validateSSOConfiguration(profile string) error {
|
||||
if !cfg.hasSSOConfiguration() {
|
||||
return nil
|
||||
}
|
||||
|
||||
var missing []string
|
||||
if len(cfg.SSOAccountID) == 0 {
|
||||
missing = append(missing, ssoAccountIDKey)
|
||||
}
|
||||
|
||||
if len(cfg.SSORegion) == 0 {
|
||||
missing = append(missing, ssoRegionKey)
|
||||
}
|
||||
|
||||
if len(cfg.SSORoleName) == 0 {
|
||||
missing = append(missing, ssoRoleNameKey)
|
||||
}
|
||||
|
||||
if len(cfg.SSOStartURL) == 0 {
|
||||
missing = append(missing, ssoStartURL)
|
||||
}
|
||||
|
||||
if len(missing) > 0 {
|
||||
return fmt.Errorf("profile %q is configured to use SSO but is missing required configuration: %s",
|
||||
profile, strings.Join(missing, ", "))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cfg *sharedConfig) hasCredentials() bool {
|
||||
switch {
|
||||
case len(cfg.SourceProfileName) != 0:
|
||||
case len(cfg.CredentialSource) != 0:
|
||||
case len(cfg.CredentialProcess) != 0:
|
||||
case len(cfg.WebIdentityTokenFile) != 0:
|
||||
case cfg.hasSSOConfiguration():
|
||||
case cfg.Creds.HasKeys():
|
||||
default:
|
||||
return false
|
||||
|
@ -407,6 +469,18 @@ func (cfg *sharedConfig) clearAssumeRoleOptions() {
|
|||
cfg.SourceProfileName = ""
|
||||
}
|
||||
|
||||
func (cfg *sharedConfig) hasSSOConfiguration() bool {
|
||||
switch {
|
||||
case len(cfg.SSOAccountID) != 0:
|
||||
case len(cfg.SSORegion) != 0:
|
||||
case len(cfg.SSORoleName) != 0:
|
||||
case len(cfg.SSOStartURL) != 0:
|
||||
default:
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func oneOrNone(bs ...bool) bool {
|
||||
var count int
|
||||
|
||||
|
|
2
vendor/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
|
@ -5,4 +5,4 @@ package aws
|
|||
const SDKName = "aws-sdk-go"
|
||||
|
||||
// SDKVersion is the version of this SDK
|
||||
const SDKVersion = "1.36.25"
|
||||
const SDKVersion = "1.37.1"
|
||||
|
|
44
vendor/github.com/aws/aws-sdk-go/private/protocol/host.go
generated
vendored
44
vendor/github.com/aws/aws-sdk-go/private/protocol/host.go
generated
vendored
|
@ -1,9 +1,10 @@
|
|||
package protocol
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ValidateEndpointHostHandler is a request handler that will validate the
|
||||
|
@ -22,8 +23,26 @@ var ValidateEndpointHostHandler = request.NamedHandler{
|
|||
// 3986 host. Returns error if the host is not valid.
|
||||
func ValidateEndpointHost(opName, host string) error {
|
||||
paramErrs := request.ErrInvalidParams{Context: opName}
|
||||
labels := strings.Split(host, ".")
|
||||
|
||||
var hostname string
|
||||
var port string
|
||||
var err error
|
||||
|
||||
if strings.Contains(host, ":") {
|
||||
hostname, port, err = net.SplitHostPort(host)
|
||||
|
||||
if err != nil {
|
||||
paramErrs.Add(request.NewErrParamFormat("endpoint", err.Error(), host))
|
||||
}
|
||||
|
||||
if !ValidPortNumber(port) {
|
||||
paramErrs.Add(request.NewErrParamFormat("endpoint port number", "[0-65535]", port))
|
||||
}
|
||||
} else {
|
||||
hostname = host
|
||||
}
|
||||
|
||||
labels := strings.Split(hostname, ".")
|
||||
for i, label := range labels {
|
||||
if i == len(labels)-1 && len(label) == 0 {
|
||||
// Allow trailing dot for FQDN hosts.
|
||||
|
@ -36,7 +55,11 @@ func ValidateEndpointHost(opName, host string) error {
|
|||
}
|
||||
}
|
||||
|
||||
if len(host) > 255 {
|
||||
if len(hostname) == 0 {
|
||||
paramErrs.Add(request.NewErrParamMinLen("endpoint host", 1))
|
||||
}
|
||||
|
||||
if len(hostname) > 255 {
|
||||
paramErrs.Add(request.NewErrParamMaxLen(
|
||||
"endpoint host", 255, host,
|
||||
))
|
||||
|
@ -66,3 +89,16 @@ func ValidHostLabel(label string) bool {
|
|||
|
||||
return true
|
||||
}
|
||||
|
||||
// ValidPortNumber return if the port is valid RFC 3986 port
|
||||
func ValidPortNumber(port string) bool {
|
||||
i, err := strconv.Atoi(port)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if i < 0 || i > 65535 {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
|
88
vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go
generated
vendored
Normal file
88
vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go
generated
vendored
Normal file
|
@ -0,0 +1,88 @@
|
|||
// Package jsonrpc provides JSON RPC utilities for serialization of AWS
|
||||
// requests and responses.
|
||||
package jsonrpc
|
||||
|
||||
//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/input/json.json build_test.go
|
||||
//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/output/json.json unmarshal_test.go
|
||||
|
||||
import (
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/private/protocol/json/jsonutil"
|
||||
"github.com/aws/aws-sdk-go/private/protocol/rest"
|
||||
)
|
||||
|
||||
var emptyJSON = []byte("{}")
|
||||
|
||||
// BuildHandler is a named request handler for building jsonrpc protocol
|
||||
// requests
|
||||
var BuildHandler = request.NamedHandler{
|
||||
Name: "awssdk.jsonrpc.Build",
|
||||
Fn: Build,
|
||||
}
|
||||
|
||||
// UnmarshalHandler is a named request handler for unmarshaling jsonrpc
|
||||
// protocol requests
|
||||
var UnmarshalHandler = request.NamedHandler{
|
||||
Name: "awssdk.jsonrpc.Unmarshal",
|
||||
Fn: Unmarshal,
|
||||
}
|
||||
|
||||
// UnmarshalMetaHandler is a named request handler for unmarshaling jsonrpc
|
||||
// protocol request metadata
|
||||
var UnmarshalMetaHandler = request.NamedHandler{
|
||||
Name: "awssdk.jsonrpc.UnmarshalMeta",
|
||||
Fn: UnmarshalMeta,
|
||||
}
|
||||
|
||||
// Build builds a JSON payload for a JSON RPC request.
|
||||
func Build(req *request.Request) {
|
||||
var buf []byte
|
||||
var err error
|
||||
if req.ParamsFilled() {
|
||||
buf, err = jsonutil.BuildJSON(req.Params)
|
||||
if err != nil {
|
||||
req.Error = awserr.New(request.ErrCodeSerialization, "failed encoding JSON RPC request", err)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
buf = emptyJSON
|
||||
}
|
||||
|
||||
if req.ClientInfo.TargetPrefix != "" || string(buf) != "{}" {
|
||||
req.SetBufferBody(buf)
|
||||
}
|
||||
|
||||
if req.ClientInfo.TargetPrefix != "" {
|
||||
target := req.ClientInfo.TargetPrefix + "." + req.Operation.Name
|
||||
req.HTTPRequest.Header.Add("X-Amz-Target", target)
|
||||
}
|
||||
|
||||
// Only set the content type if one is not already specified and an
|
||||
// JSONVersion is specified.
|
||||
if ct, v := req.HTTPRequest.Header.Get("Content-Type"), req.ClientInfo.JSONVersion; len(ct) == 0 && len(v) != 0 {
|
||||
jsonVersion := req.ClientInfo.JSONVersion
|
||||
req.HTTPRequest.Header.Set("Content-Type", "application/x-amz-json-"+jsonVersion)
|
||||
}
|
||||
}
|
||||
|
||||
// Unmarshal unmarshals a response for a JSON RPC service.
|
||||
func Unmarshal(req *request.Request) {
|
||||
defer req.HTTPResponse.Body.Close()
|
||||
if req.DataFilled() {
|
||||
err := jsonutil.UnmarshalJSON(req.Data, req.HTTPResponse.Body)
|
||||
if err != nil {
|
||||
req.Error = awserr.NewRequestFailure(
|
||||
awserr.New(request.ErrCodeSerialization, "failed decoding JSON RPC response", err),
|
||||
req.HTTPResponse.StatusCode,
|
||||
req.RequestID,
|
||||
)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// UnmarshalMeta unmarshals headers from a response for a JSON RPC service.
|
||||
func UnmarshalMeta(req *request.Request) {
|
||||
rest.UnmarshalMeta(req)
|
||||
}
|
107
vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/unmarshal_error.go
generated
vendored
Normal file
107
vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/unmarshal_error.go
generated
vendored
Normal file
|
@ -0,0 +1,107 @@
|
|||
package jsonrpc
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/private/protocol"
|
||||
"github.com/aws/aws-sdk-go/private/protocol/json/jsonutil"
|
||||
)
|
||||
|
||||
// UnmarshalTypedError provides unmarshaling errors API response errors
|
||||
// for both typed and untyped errors.
|
||||
type UnmarshalTypedError struct {
|
||||
exceptions map[string]func(protocol.ResponseMetadata) error
|
||||
}
|
||||
|
||||
// NewUnmarshalTypedError returns an UnmarshalTypedError initialized for the
|
||||
// set of exception names to the error unmarshalers
|
||||
func NewUnmarshalTypedError(exceptions map[string]func(protocol.ResponseMetadata) error) *UnmarshalTypedError {
|
||||
return &UnmarshalTypedError{
|
||||
exceptions: exceptions,
|
||||
}
|
||||
}
|
||||
|
||||
// UnmarshalError attempts to unmarshal the HTTP response error as a known
|
||||
// error type. If unable to unmarshal the error type, the generic SDK error
|
||||
// type will be used.
|
||||
func (u *UnmarshalTypedError) UnmarshalError(
|
||||
resp *http.Response,
|
||||
respMeta protocol.ResponseMetadata,
|
||||
) (error, error) {
|
||||
|
||||
var buf bytes.Buffer
|
||||
var jsonErr jsonErrorResponse
|
||||
teeReader := io.TeeReader(resp.Body, &buf)
|
||||
err := jsonutil.UnmarshalJSONError(&jsonErr, teeReader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
body := ioutil.NopCloser(&buf)
|
||||
|
||||
// Code may be separated by hash(#), with the last element being the code
|
||||
// used by the SDK.
|
||||
codeParts := strings.SplitN(jsonErr.Code, "#", 2)
|
||||
code := codeParts[len(codeParts)-1]
|
||||
msg := jsonErr.Message
|
||||
|
||||
if fn, ok := u.exceptions[code]; ok {
|
||||
// If exception code is know, use associated constructor to get a value
|
||||
// for the exception that the JSON body can be unmarshaled into.
|
||||
v := fn(respMeta)
|
||||
err := jsonutil.UnmarshalJSONCaseInsensitive(v, body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// fallback to unmodeled generic exceptions
|
||||
return awserr.NewRequestFailure(
|
||||
awserr.New(code, msg, nil),
|
||||
respMeta.StatusCode,
|
||||
respMeta.RequestID,
|
||||
), nil
|
||||
}
|
||||
|
||||
// UnmarshalErrorHandler is a named request handler for unmarshaling jsonrpc
|
||||
// protocol request errors
|
||||
var UnmarshalErrorHandler = request.NamedHandler{
|
||||
Name: "awssdk.jsonrpc.UnmarshalError",
|
||||
Fn: UnmarshalError,
|
||||
}
|
||||
|
||||
// UnmarshalError unmarshals an error response for a JSON RPC service.
|
||||
func UnmarshalError(req *request.Request) {
|
||||
defer req.HTTPResponse.Body.Close()
|
||||
|
||||
var jsonErr jsonErrorResponse
|
||||
err := jsonutil.UnmarshalJSONError(&jsonErr, req.HTTPResponse.Body)
|
||||
if err != nil {
|
||||
req.Error = awserr.NewRequestFailure(
|
||||
awserr.New(request.ErrCodeSerialization,
|
||||
"failed to unmarshal error message", err),
|
||||
req.HTTPResponse.StatusCode,
|
||||
req.RequestID,
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
codes := strings.SplitN(jsonErr.Code, "#", 2)
|
||||
req.Error = awserr.NewRequestFailure(
|
||||
awserr.New(codes[len(codes)-1], jsonErr.Message, nil),
|
||||
req.HTTPResponse.StatusCode,
|
||||
req.RequestID,
|
||||
)
|
||||
}
|
||||
|
||||
type jsonErrorResponse struct {
|
||||
Code string `json:"__type"`
|
||||
Message string `json:"message"`
|
||||
}
|
59
vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/restjson.go
generated
vendored
Normal file
59
vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/restjson.go
generated
vendored
Normal file
|
@ -0,0 +1,59 @@
|
|||
// Package restjson provides RESTful JSON serialization of AWS
|
||||
// requests and responses.
|
||||
package restjson
|
||||
|
||||
//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/input/rest-json.json build_test.go
|
||||
//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/output/rest-json.json unmarshal_test.go
|
||||
|
||||
import (
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/private/protocol/jsonrpc"
|
||||
"github.com/aws/aws-sdk-go/private/protocol/rest"
|
||||
)
|
||||
|
||||
// BuildHandler is a named request handler for building restjson protocol
|
||||
// requests
|
||||
var BuildHandler = request.NamedHandler{
|
||||
Name: "awssdk.restjson.Build",
|
||||
Fn: Build,
|
||||
}
|
||||
|
||||
// UnmarshalHandler is a named request handler for unmarshaling restjson
|
||||
// protocol requests
|
||||
var UnmarshalHandler = request.NamedHandler{
|
||||
Name: "awssdk.restjson.Unmarshal",
|
||||
Fn: Unmarshal,
|
||||
}
|
||||
|
||||
// UnmarshalMetaHandler is a named request handler for unmarshaling restjson
|
||||
// protocol request metadata
|
||||
var UnmarshalMetaHandler = request.NamedHandler{
|
||||
Name: "awssdk.restjson.UnmarshalMeta",
|
||||
Fn: UnmarshalMeta,
|
||||
}
|
||||
|
||||
// Build builds a request for the REST JSON protocol.
|
||||
func Build(r *request.Request) {
|
||||
rest.Build(r)
|
||||
|
||||
if t := rest.PayloadType(r.Params); t == "structure" || t == "" {
|
||||
if v := r.HTTPRequest.Header.Get("Content-Type"); len(v) == 0 {
|
||||
r.HTTPRequest.Header.Set("Content-Type", "application/json")
|
||||
}
|
||||
jsonrpc.Build(r)
|
||||
}
|
||||
}
|
||||
|
||||
// Unmarshal unmarshals a response body for the REST JSON protocol.
|
||||
func Unmarshal(r *request.Request) {
|
||||
if t := rest.PayloadType(r.Data); t == "structure" || t == "" {
|
||||
jsonrpc.Unmarshal(r)
|
||||
} else {
|
||||
rest.Unmarshal(r)
|
||||
}
|
||||
}
|
||||
|
||||
// UnmarshalMeta unmarshals response headers for the REST JSON protocol.
|
||||
func UnmarshalMeta(r *request.Request) {
|
||||
rest.UnmarshalMeta(r)
|
||||
}
|
134
vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/unmarshal_error.go
generated
vendored
Normal file
134
vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/unmarshal_error.go
generated
vendored
Normal file
|
@ -0,0 +1,134 @@
|
|||
package restjson
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/private/protocol"
|
||||
"github.com/aws/aws-sdk-go/private/protocol/json/jsonutil"
|
||||
"github.com/aws/aws-sdk-go/private/protocol/rest"
|
||||
)
|
||||
|
||||
const (
|
||||
errorTypeHeader = "X-Amzn-Errortype"
|
||||
errorMessageHeader = "X-Amzn-Errormessage"
|
||||
)
|
||||
|
||||
// UnmarshalTypedError provides unmarshaling errors API response errors
|
||||
// for both typed and untyped errors.
|
||||
type UnmarshalTypedError struct {
|
||||
exceptions map[string]func(protocol.ResponseMetadata) error
|
||||
}
|
||||
|
||||
// NewUnmarshalTypedError returns an UnmarshalTypedError initialized for the
|
||||
// set of exception names to the error unmarshalers
|
||||
func NewUnmarshalTypedError(exceptions map[string]func(protocol.ResponseMetadata) error) *UnmarshalTypedError {
|
||||
return &UnmarshalTypedError{
|
||||
exceptions: exceptions,
|
||||
}
|
||||
}
|
||||
|
||||
// UnmarshalError attempts to unmarshal the HTTP response error as a known
|
||||
// error type. If unable to unmarshal the error type, the generic SDK error
|
||||
// type will be used.
|
||||
func (u *UnmarshalTypedError) UnmarshalError(
|
||||
resp *http.Response,
|
||||
respMeta protocol.ResponseMetadata,
|
||||
) (error, error) {
|
||||
|
||||
code := resp.Header.Get(errorTypeHeader)
|
||||
msg := resp.Header.Get(errorMessageHeader)
|
||||
|
||||
body := resp.Body
|
||||
if len(code) == 0 {
|
||||
// If unable to get code from HTTP headers have to parse JSON message
|
||||
// to determine what kind of exception this will be.
|
||||
var buf bytes.Buffer
|
||||
var jsonErr jsonErrorResponse
|
||||
teeReader := io.TeeReader(resp.Body, &buf)
|
||||
err := jsonutil.UnmarshalJSONError(&jsonErr, teeReader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
body = ioutil.NopCloser(&buf)
|
||||
code = jsonErr.Code
|
||||
msg = jsonErr.Message
|
||||
}
|
||||
|
||||
// If code has colon separators remove them so can compare against modeled
|
||||
// exception names.
|
||||
code = strings.SplitN(code, ":", 2)[0]
|
||||
|
||||
if fn, ok := u.exceptions[code]; ok {
|
||||
// If exception code is know, use associated constructor to get a value
|
||||
// for the exception that the JSON body can be unmarshaled into.
|
||||
v := fn(respMeta)
|
||||
if err := jsonutil.UnmarshalJSONCaseInsensitive(v, body); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := rest.UnmarshalResponse(resp, v, true); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// fallback to unmodeled generic exceptions
|
||||
return awserr.NewRequestFailure(
|
||||
awserr.New(code, msg, nil),
|
||||
respMeta.StatusCode,
|
||||
respMeta.RequestID,
|
||||
), nil
|
||||
}
|
||||
|
||||
// UnmarshalErrorHandler is a named request handler for unmarshaling restjson
|
||||
// protocol request errors
|
||||
var UnmarshalErrorHandler = request.NamedHandler{
|
||||
Name: "awssdk.restjson.UnmarshalError",
|
||||
Fn: UnmarshalError,
|
||||
}
|
||||
|
||||
// UnmarshalError unmarshals a response error for the REST JSON protocol.
|
||||
func UnmarshalError(r *request.Request) {
|
||||
defer r.HTTPResponse.Body.Close()
|
||||
|
||||
var jsonErr jsonErrorResponse
|
||||
err := jsonutil.UnmarshalJSONError(&jsonErr, r.HTTPResponse.Body)
|
||||
if err != nil {
|
||||
r.Error = awserr.NewRequestFailure(
|
||||
awserr.New(request.ErrCodeSerialization,
|
||||
"failed to unmarshal response error", err),
|
||||
r.HTTPResponse.StatusCode,
|
||||
r.RequestID,
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
code := r.HTTPResponse.Header.Get(errorTypeHeader)
|
||||
if code == "" {
|
||||
code = jsonErr.Code
|
||||
}
|
||||
msg := r.HTTPResponse.Header.Get(errorMessageHeader)
|
||||
if msg == "" {
|
||||
msg = jsonErr.Message
|
||||
}
|
||||
|
||||
code = strings.SplitN(code, ":", 2)[0]
|
||||
r.Error = awserr.NewRequestFailure(
|
||||
awserr.New(code, jsonErr.Message, nil),
|
||||
r.HTTPResponse.StatusCode,
|
||||
r.RequestID,
|
||||
)
|
||||
}
|
||||
|
||||
type jsonErrorResponse struct {
|
||||
Code string `json:"code"`
|
||||
Message string `json:"message"`
|
||||
}
|
5
vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go
generated
vendored
5
vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go
generated
vendored
|
@ -109,6 +109,9 @@ type UploadOutput struct {
|
|||
// The ID for a multipart upload to S3. In the case of an error the error
|
||||
// can be cast to the MultiUploadFailure interface to extract the upload ID.
|
||||
UploadID string
|
||||
|
||||
// Entity tag of the object.
|
||||
ETag *string
|
||||
}
|
||||
|
||||
// WithUploaderRequestOptions appends to the Uploader's API request options.
|
||||
|
@ -527,6 +530,7 @@ func (u *uploader) singlePart(r io.ReadSeeker, cleanup func()) (*UploadOutput, e
|
|||
return &UploadOutput{
|
||||
Location: url,
|
||||
VersionID: out.VersionId,
|
||||
ETag: out.ETag,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -632,6 +636,7 @@ func (u *multiuploader) upload(firstBuf io.ReadSeeker, cleanup func()) (*UploadO
|
|||
Location: uploadLocation,
|
||||
VersionID: complete.VersionId,
|
||||
UploadID: u.uploadID,
|
||||
ETag: complete.ETag,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
|
1210
vendor/github.com/aws/aws-sdk-go/service/sso/api.go
generated
vendored
Normal file
1210
vendor/github.com/aws/aws-sdk-go/service/sso/api.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
44
vendor/github.com/aws/aws-sdk-go/service/sso/doc.go
generated
vendored
Normal file
44
vendor/github.com/aws/aws-sdk-go/service/sso/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,44 @@
|
|||
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
|
||||
|
||||
// Package sso provides the client and types for making API
|
||||
// requests to AWS Single Sign-On.
|
||||
//
|
||||
// AWS Single Sign-On Portal is a web service that makes it easy for you to
|
||||
// assign user access to AWS SSO resources such as the user portal. Users can
|
||||
// get AWS account applications and roles assigned to them and get federated
|
||||
// into the application.
|
||||
//
|
||||
// For general information about AWS SSO, see What is AWS Single Sign-On? (https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html)
|
||||
// in the AWS SSO User Guide.
|
||||
//
|
||||
// This API reference guide describes the AWS SSO Portal operations that you
|
||||
// can call programatically and includes detailed information on data types
|
||||
// and errors.
|
||||
//
|
||||
// AWS provides SDKs that consist of libraries and sample code for various programming
|
||||
// languages and platforms, such as Java, Ruby, .Net, iOS, or Android. The SDKs
|
||||
// provide a convenient way to create programmatic access to AWS SSO and other
|
||||
// AWS services. For more information about the AWS SDKs, including how to download
|
||||
// and install them, see Tools for Amazon Web Services (http://aws.amazon.com/tools/).
|
||||
//
|
||||
// See https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10 for more information on this service.
|
||||
//
|
||||
// See sso package documentation for more information.
|
||||
// https://docs.aws.amazon.com/sdk-for-go/api/service/sso/
|
||||
//
|
||||
// Using the Client
|
||||
//
|
||||
// To contact AWS Single Sign-On with the SDK use the New function to create
|
||||
// a new service client. With that client you can make API requests to the service.
|
||||
// These clients are safe to use concurrently.
|
||||
//
|
||||
// See the SDK's documentation for more information on how to use the SDK.
|
||||
// https://docs.aws.amazon.com/sdk-for-go/api/
|
||||
//
|
||||
// See aws.Config documentation for more information on configuring SDK clients.
|
||||
// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config
|
||||
//
|
||||
// See the AWS Single Sign-On client SSO for more
|
||||
// information on creating client for this service.
|
||||
// https://docs.aws.amazon.com/sdk-for-go/api/service/sso/#New
|
||||
package sso
|
44
vendor/github.com/aws/aws-sdk-go/service/sso/errors.go
generated
vendored
Normal file
44
vendor/github.com/aws/aws-sdk-go/service/sso/errors.go
generated
vendored
Normal file
|
@ -0,0 +1,44 @@
|
|||
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
|
||||
|
||||
package sso
|
||||
|
||||
import (
|
||||
"github.com/aws/aws-sdk-go/private/protocol"
|
||||
)
|
||||
|
||||
const (
|
||||
|
||||
// ErrCodeInvalidRequestException for service response error code
|
||||
// "InvalidRequestException".
|
||||
//
|
||||
// Indicates that a problem occurred with the input to the request. For example,
|
||||
// a required parameter might be missing or out of range.
|
||||
ErrCodeInvalidRequestException = "InvalidRequestException"
|
||||
|
||||
// ErrCodeResourceNotFoundException for service response error code
|
||||
// "ResourceNotFoundException".
|
||||
//
|
||||
// The specified resource doesn't exist.
|
||||
ErrCodeResourceNotFoundException = "ResourceNotFoundException"
|
||||
|
||||
// ErrCodeTooManyRequestsException for service response error code
|
||||
// "TooManyRequestsException".
|
||||
//
|
||||
// Indicates that the request is being made too frequently and is more than
|
||||
// what the server can handle.
|
||||
ErrCodeTooManyRequestsException = "TooManyRequestsException"
|
||||
|
||||
// ErrCodeUnauthorizedException for service response error code
|
||||
// "UnauthorizedException".
|
||||
//
|
||||
// Indicates that the request is not authorized. This can happen due to an invalid
|
||||
// access token in the request.
|
||||
ErrCodeUnauthorizedException = "UnauthorizedException"
|
||||
)
|
||||
|
||||
var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{
|
||||
"InvalidRequestException": newErrorInvalidRequestException,
|
||||
"ResourceNotFoundException": newErrorResourceNotFoundException,
|
||||
"TooManyRequestsException": newErrorTooManyRequestsException,
|
||||
"UnauthorizedException": newErrorUnauthorizedException,
|
||||
}
|
104
vendor/github.com/aws/aws-sdk-go/service/sso/service.go
generated
vendored
Normal file
104
vendor/github.com/aws/aws-sdk-go/service/sso/service.go
generated
vendored
Normal file
|
@ -0,0 +1,104 @@
|
|||
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
|
||||
|
||||
package sso
|
||||
|
||||
import (
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/client"
|
||||
"github.com/aws/aws-sdk-go/aws/client/metadata"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/aws/signer/v4"
|
||||
"github.com/aws/aws-sdk-go/private/protocol"
|
||||
"github.com/aws/aws-sdk-go/private/protocol/restjson"
|
||||
)
|
||||
|
||||
// SSO provides the API operation methods for making requests to
|
||||
// AWS Single Sign-On. See this package's package overview docs
|
||||
// for details on the service.
|
||||
//
|
||||
// SSO methods are safe to use concurrently. It is not safe to
|
||||
// modify mutate any of the struct's properties though.
|
||||
type SSO struct {
|
||||
*client.Client
|
||||
}
|
||||
|
||||
// Used for custom client initialization logic
|
||||
var initClient func(*client.Client)
|
||||
|
||||
// Used for custom request initialization logic
|
||||
var initRequest func(*request.Request)
|
||||
|
||||
// Service information constants
|
||||
const (
|
||||
ServiceName = "SSO" // Name of service.
|
||||
EndpointsID = "portal.sso" // ID to lookup a service endpoint with.
|
||||
ServiceID = "SSO" // ServiceID is a unique identifier of a specific service.
|
||||
)
|
||||
|
||||
// New creates a new instance of the SSO client with a session.
|
||||
// If additional configuration is needed for the client instance use the optional
|
||||
// aws.Config parameter to add your extra config.
|
||||
//
|
||||
// Example:
|
||||
// mySession := session.Must(session.NewSession())
|
||||
//
|
||||
// // Create a SSO client from just a session.
|
||||
// svc := sso.New(mySession)
|
||||
//
|
||||
// // Create a SSO client with additional configuration
|
||||
// svc := sso.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
|
||||
func New(p client.ConfigProvider, cfgs ...*aws.Config) *SSO {
|
||||
c := p.ClientConfig(EndpointsID, cfgs...)
|
||||
if c.SigningNameDerived || len(c.SigningName) == 0 {
|
||||
c.SigningName = "awsssoportal"
|
||||
}
|
||||
return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName)
|
||||
}
|
||||
|
||||
// newClient creates, initializes and returns a new service client instance.
|
||||
func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *SSO {
|
||||
svc := &SSO{
|
||||
Client: client.New(
|
||||
cfg,
|
||||
metadata.ClientInfo{
|
||||
ServiceName: ServiceName,
|
||||
ServiceID: ServiceID,
|
||||
SigningName: signingName,
|
||||
SigningRegion: signingRegion,
|
||||
PartitionID: partitionID,
|
||||
Endpoint: endpoint,
|
||||
APIVersion: "2019-06-10",
|
||||
},
|
||||
handlers,
|
||||
),
|
||||
}
|
||||
|
||||
// Handlers
|
||||
svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler)
|
||||
svc.Handlers.Build.PushBackNamed(restjson.BuildHandler)
|
||||
svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler)
|
||||
svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler)
|
||||
svc.Handlers.UnmarshalError.PushBackNamed(
|
||||
protocol.NewUnmarshalErrorHandler(restjson.NewUnmarshalTypedError(exceptionFromCode)).NamedHandler(),
|
||||
)
|
||||
|
||||
// Run custom client initialization if present
|
||||
if initClient != nil {
|
||||
initClient(svc.Client)
|
||||
}
|
||||
|
||||
return svc
|
||||
}
|
||||
|
||||
// newRequest creates a new request for a SSO operation and runs any
|
||||
// custom request initialization.
|
||||
func (c *SSO) newRequest(op *request.Operation, params, data interface{}) *request.Request {
|
||||
req := c.NewRequest(op, params, data)
|
||||
|
||||
// Run custom request initialization if present
|
||||
if initRequest != nil {
|
||||
initRequest(req)
|
||||
}
|
||||
|
||||
return req
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue