mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2024-11-21 14:44:00 +00:00
app/vmctl: move vmctl code from github.com/VictoriaMetrics/vmctl
It is better developing vmctl tool in VictoriaMetrics repository, so it could be released together with the rest of vmutils tools such as vmalert, vmagent, vmbackup, vmrestore and vmauth.
This commit is contained in:
parent
2a7b1cc668
commit
d5c180e680
467 changed files with 97073 additions and 12 deletions
3
.github/workflows/main.yml
vendored
3
.github/workflows/main.yml
vendored
|
@ -45,16 +45,19 @@ jobs:
|
||||||
GOOS=freebsd go build -mod=vendor ./app/vmalert
|
GOOS=freebsd go build -mod=vendor ./app/vmalert
|
||||||
GOOS=freebsd go build -mod=vendor ./app/vmbackup
|
GOOS=freebsd go build -mod=vendor ./app/vmbackup
|
||||||
GOOS=freebsd go build -mod=vendor ./app/vmrestore
|
GOOS=freebsd go build -mod=vendor ./app/vmrestore
|
||||||
|
GOOS=freebsd go build -mod=vendor ./app/vmctl
|
||||||
GOOS=openbsd go build -mod=vendor ./app/victoria-metrics
|
GOOS=openbsd go build -mod=vendor ./app/victoria-metrics
|
||||||
GOOS=openbsd go build -mod=vendor ./app/vmagent
|
GOOS=openbsd go build -mod=vendor ./app/vmagent
|
||||||
GOOS=openbsd go build -mod=vendor ./app/vmalert
|
GOOS=openbsd go build -mod=vendor ./app/vmalert
|
||||||
GOOS=openbsd go build -mod=vendor ./app/vmbackup
|
GOOS=openbsd go build -mod=vendor ./app/vmbackup
|
||||||
GOOS=openbsd go build -mod=vendor ./app/vmrestore
|
GOOS=openbsd go build -mod=vendor ./app/vmrestore
|
||||||
|
GOOS=openbsd go build -mod=vendor ./app/vmctl
|
||||||
GOOS=darwin go build -mod=vendor ./app/victoria-metrics
|
GOOS=darwin go build -mod=vendor ./app/victoria-metrics
|
||||||
GOOS=darwin go build -mod=vendor ./app/vmagent
|
GOOS=darwin go build -mod=vendor ./app/vmagent
|
||||||
GOOS=darwin go build -mod=vendor ./app/vmalert
|
GOOS=darwin go build -mod=vendor ./app/vmalert
|
||||||
GOOS=darwin go build -mod=vendor ./app/vmbackup
|
GOOS=darwin go build -mod=vendor ./app/vmbackup
|
||||||
GOOS=darwin go build -mod=vendor ./app/vmrestore
|
GOOS=darwin go build -mod=vendor ./app/vmrestore
|
||||||
|
GOOS=darwin go build -mod=vendor ./app/vmctl
|
||||||
- name: Publish coverage
|
- name: Publish coverage
|
||||||
uses: codecov/codecov-action@v1.0.6
|
uses: codecov/codecov-action@v1.0.6
|
||||||
with:
|
with:
|
||||||
|
|
22
Makefile
22
Makefile
|
@ -18,7 +18,8 @@ all: \
|
||||||
vmalert-prod \
|
vmalert-prod \
|
||||||
vmauth-prod \
|
vmauth-prod \
|
||||||
vmbackup-prod \
|
vmbackup-prod \
|
||||||
vmrestore-prod
|
vmrestore-prod \
|
||||||
|
vmctl-prod
|
||||||
|
|
||||||
include app/*/Makefile
|
include app/*/Makefile
|
||||||
include deployment/*/Makefile
|
include deployment/*/Makefile
|
||||||
|
@ -32,7 +33,8 @@ publish: \
|
||||||
publish-vmalert \
|
publish-vmalert \
|
||||||
publish-vmauth \
|
publish-vmauth \
|
||||||
publish-vmbackup \
|
publish-vmbackup \
|
||||||
publish-vmrestore
|
publish-vmrestore \
|
||||||
|
publish-vmctl
|
||||||
|
|
||||||
package: \
|
package: \
|
||||||
package-victoria-metrics \
|
package-victoria-metrics \
|
||||||
|
@ -40,21 +42,24 @@ package: \
|
||||||
package-vmalert \
|
package-vmalert \
|
||||||
package-vmauth \
|
package-vmauth \
|
||||||
package-vmbackup \
|
package-vmbackup \
|
||||||
package-vmrestore
|
package-vmrestore \
|
||||||
|
package-vmctl
|
||||||
|
|
||||||
vmutils: \
|
vmutils: \
|
||||||
vmagent \
|
vmagent \
|
||||||
vmalert \
|
vmalert \
|
||||||
vmauth \
|
vmauth \
|
||||||
vmbackup \
|
vmbackup \
|
||||||
vmrestore
|
vmrestore \
|
||||||
|
vmctl
|
||||||
|
|
||||||
vmutils-arm64: \
|
vmutils-arm64: \
|
||||||
vmagent-arm64 \
|
vmagent-arm64 \
|
||||||
vmalert-arm64 \
|
vmalert-arm64 \
|
||||||
vmauth-arm64 \
|
vmauth-arm64 \
|
||||||
vmbackup-arm64 \
|
vmbackup-arm64 \
|
||||||
vmrestore-arm64
|
vmrestore-arm64 \
|
||||||
|
vmctl-arm64
|
||||||
|
|
||||||
release-snap:
|
release-snap:
|
||||||
snapcraft
|
snapcraft
|
||||||
|
@ -97,7 +102,8 @@ release-vmutils-generic: \
|
||||||
vmalert-$(GOARCH)-prod \
|
vmalert-$(GOARCH)-prod \
|
||||||
vmauth-$(GOARCH)-prod \
|
vmauth-$(GOARCH)-prod \
|
||||||
vmbackup-$(GOARCH)-prod \
|
vmbackup-$(GOARCH)-prod \
|
||||||
vmrestore-$(GOARCH)-prod
|
vmrestore-$(GOARCH)-prod \
|
||||||
|
vmctl-$(GOARCH)-prod
|
||||||
cd bin && \
|
cd bin && \
|
||||||
tar --transform="flags=r;s|-$(GOARCH)||" -czf vmutils-$(GOARCH)-$(PKG_TAG).tar.gz \
|
tar --transform="flags=r;s|-$(GOARCH)||" -czf vmutils-$(GOARCH)-$(PKG_TAG).tar.gz \
|
||||||
vmagent-$(GOARCH)-prod \
|
vmagent-$(GOARCH)-prod \
|
||||||
|
@ -105,12 +111,14 @@ release-vmutils-generic: \
|
||||||
vmauth-$(GOARCH)-prod \
|
vmauth-$(GOARCH)-prod \
|
||||||
vmbackup-$(GOARCH)-prod \
|
vmbackup-$(GOARCH)-prod \
|
||||||
vmrestore-$(GOARCH)-prod \
|
vmrestore-$(GOARCH)-prod \
|
||||||
|
vmctl-$(GOARCH)-prod \
|
||||||
&& sha256sum vmutils-$(GOARCH)-$(PKG_TAG).tar.gz \
|
&& sha256sum vmutils-$(GOARCH)-$(PKG_TAG).tar.gz \
|
||||||
vmagent-$(GOARCH)-prod \
|
vmagent-$(GOARCH)-prod \
|
||||||
vmalert-$(GOARCH)-prod \
|
vmalert-$(GOARCH)-prod \
|
||||||
vmauth-$(GOARCH)-prod \
|
vmauth-$(GOARCH)-prod \
|
||||||
vmbackup-$(GOARCH)-prod \
|
vmbackup-$(GOARCH)-prod \
|
||||||
vmrestore-$(GOARCH)-prod \
|
vmrestore-$(GOARCH)-prod \
|
||||||
|
vmctl-$(GOARCH)-prod \
|
||||||
| sed s/-$(GOARCH)// > vmutils-$(GOARCH)-$(PKG_TAG)_checksums.txt
|
| sed s/-$(GOARCH)// > vmutils-$(GOARCH)-$(PKG_TAG)_checksums.txt
|
||||||
|
|
||||||
pprof-cpu:
|
pprof-cpu:
|
||||||
|
@ -141,6 +149,7 @@ errcheck: install-errcheck
|
||||||
errcheck -exclude=errcheck_excludes.txt ./app/vmauth/...
|
errcheck -exclude=errcheck_excludes.txt ./app/vmauth/...
|
||||||
errcheck -exclude=errcheck_excludes.txt ./app/vmbackup/...
|
errcheck -exclude=errcheck_excludes.txt ./app/vmbackup/...
|
||||||
errcheck -exclude=errcheck_excludes.txt ./app/vmrestore/...
|
errcheck -exclude=errcheck_excludes.txt ./app/vmrestore/...
|
||||||
|
errcheck -exclude=errcheck_excludes.txt ./app/vmctl/...
|
||||||
|
|
||||||
install-errcheck:
|
install-errcheck:
|
||||||
which errcheck || go install github.com/kisielk/errcheck
|
which errcheck || go install github.com/kisielk/errcheck
|
||||||
|
@ -204,4 +213,5 @@ docs-sync:
|
||||||
cp app/vmauth/README.md docs/vmauth.md
|
cp app/vmauth/README.md docs/vmauth.md
|
||||||
cp app/vmbackup/README.md docs/vmbackup.md
|
cp app/vmbackup/README.md docs/vmbackup.md
|
||||||
cp app/vmrestore/README.md docs/vmrestore.md
|
cp app/vmrestore/README.md docs/vmrestore.md
|
||||||
|
cp app/vmctl/README.md docs/vmctl.md
|
||||||
cp README.md docs/Single-server-VictoriaMetrics.md
|
cp README.md docs/Single-server-VictoriaMetrics.md
|
||||||
|
|
13
README.md
13
README.md
|
@ -154,6 +154,7 @@ Alphabetically sorted links to case studies:
|
||||||
* [Tuning](#tuning)
|
* [Tuning](#tuning)
|
||||||
* [Monitoring](#monitoring)
|
* [Monitoring](#monitoring)
|
||||||
* [Troubleshooting](#troubleshooting)
|
* [Troubleshooting](#troubleshooting)
|
||||||
|
* [Data migration](#data-migration)
|
||||||
* [Backfilling](#backfilling)
|
* [Backfilling](#backfilling)
|
||||||
* [Data updates](#data-updates)
|
* [Data updates](#data-updates)
|
||||||
* [Replication](#replication)
|
* [Replication](#replication)
|
||||||
|
@ -1353,6 +1354,17 @@ See the example of alerting rules for VM components [here](https://github.com/Vi
|
||||||
* VictoriaMetrics ignores `NaN` values during data ingestion.
|
* VictoriaMetrics ignores `NaN` values during data ingestion.
|
||||||
|
|
||||||
|
|
||||||
|
## Data migration
|
||||||
|
|
||||||
|
Use [vmctl](https://victoriametrics.github.io/vmctl.html) for data migration. It supports the following data migration types:
|
||||||
|
|
||||||
|
* From Prometheus to VictoriaMetrics
|
||||||
|
* From InfluxDB to VictoriaMetrics
|
||||||
|
* From VictoriaMetrics to VictoriaMetrics
|
||||||
|
|
||||||
|
See [vmctl docs](https://victoriametrics.github.io/vmctl.html) for more details.
|
||||||
|
|
||||||
|
|
||||||
## Backfilling
|
## Backfilling
|
||||||
|
|
||||||
VictoriaMetrics accepts historical data in arbitrary order of time via [any supported ingestion method](#how-to-import-time-series-data).
|
VictoriaMetrics accepts historical data in arbitrary order of time via [any supported ingestion method](#how-to-import-time-series-data).
|
||||||
|
@ -1420,7 +1432,6 @@ The collected profiles may be analyzed with [go tool pprof](https://github.com/g
|
||||||
|
|
||||||
* [Helm charts for single-node and cluster versions of VictoriaMetrics](https://github.com/VictoriaMetrics/helm-charts).
|
* [Helm charts for single-node and cluster versions of VictoriaMetrics](https://github.com/VictoriaMetrics/helm-charts).
|
||||||
* [Kubernetes operator for VictoriaMetrics](https://github.com/VictoriaMetrics/operator).
|
* [Kubernetes operator for VictoriaMetrics](https://github.com/VictoriaMetrics/operator).
|
||||||
* [vmctl tool for data migration to VictoriaMetrics](https://github.com/VictoriaMetrics/vmctl).
|
|
||||||
* [netdata](https://github.com/netdata/netdata) can push data into VictoriaMetrics via `Prometheus remote_write API`.
|
* [netdata](https://github.com/netdata/netdata) can push data into VictoriaMetrics via `Prometheus remote_write API`.
|
||||||
See [these docs](https://github.com/netdata/netdata#integrations).
|
See [these docs](https://github.com/netdata/netdata#integrations).
|
||||||
* [go-graphite/carbonapi](https://github.com/go-graphite/carbonapi) can use VictoriaMetrics as time series backend.
|
* [go-graphite/carbonapi](https://github.com/go-graphite/carbonapi) can use VictoriaMetrics as time series backend.
|
||||||
|
|
73
app/vmctl/Makefile
Normal file
73
app/vmctl/Makefile
Normal file
|
@ -0,0 +1,73 @@
|
||||||
|
# All these commands must run from repository root.
|
||||||
|
|
||||||
|
vmctl:
|
||||||
|
APP_NAME=vmctl $(MAKE) app-local
|
||||||
|
|
||||||
|
vmctl-race:
|
||||||
|
APP_NAME=vmctl RACE=-race $(MAKE) app-local
|
||||||
|
|
||||||
|
vmctl-prod:
|
||||||
|
APP_NAME=vmctl $(MAKE) app-via-docker
|
||||||
|
|
||||||
|
vmctl-pure-prod:
|
||||||
|
APP_NAME=vmctl $(MAKE) app-via-docker-pure
|
||||||
|
|
||||||
|
vmctl-amd64-prod:
|
||||||
|
APP_NAME=vmctl $(MAKE) app-via-docker-amd64
|
||||||
|
|
||||||
|
vmctl-arm-prod:
|
||||||
|
APP_NAME=vmctl $(MAKE) app-via-docker-arm
|
||||||
|
|
||||||
|
vmctl-arm64-prod:
|
||||||
|
APP_NAME=vmctl $(MAKE) app-via-docker-arm64
|
||||||
|
|
||||||
|
vmctl-ppc64le-prod:
|
||||||
|
APP_NAME=vmctl $(MAKE) app-via-docker-ppc64le
|
||||||
|
|
||||||
|
vmctl-386-prod:
|
||||||
|
APP_NAME=vmctl $(MAKE) app-via-docker-386
|
||||||
|
|
||||||
|
package-vmctl:
|
||||||
|
APP_NAME=vmctl $(MAKE) package-via-docker
|
||||||
|
|
||||||
|
package-vmctl-pure:
|
||||||
|
APP_NAME=vmctl $(MAKE) package-via-docker-pure
|
||||||
|
|
||||||
|
package-vmctl-amd64:
|
||||||
|
APP_NAME=vmctl $(MAKE) package-via-docker-amd64
|
||||||
|
|
||||||
|
package-vmctl-arm:
|
||||||
|
APP_NAME=vmctl $(MAKE) package-via-docker-arm
|
||||||
|
|
||||||
|
package-vmctl-arm64:
|
||||||
|
APP_NAME=vmctl $(MAKE) package-via-docker-arm64
|
||||||
|
|
||||||
|
package-vmctl-ppc64le:
|
||||||
|
APP_NAME=vmctl $(MAKE) package-via-docker-ppc64le
|
||||||
|
|
||||||
|
package-vmctl-386:
|
||||||
|
APP_NAME=vmctl $(MAKE) package-via-docker-386
|
||||||
|
|
||||||
|
publish-vmctl:
|
||||||
|
APP_NAME=vmctl $(MAKE) publish-via-docker
|
||||||
|
|
||||||
|
vmctl-amd64:
|
||||||
|
CGO_ENABLED=1 GOARCH=amd64 $(MAKE) vmctl-local-with-goarch
|
||||||
|
|
||||||
|
vmctl-arm:
|
||||||
|
CGO_ENABLED=0 GOARCH=arm $(MAKE) vmctl-local-with-goarch
|
||||||
|
|
||||||
|
vmctl-arm64:
|
||||||
|
CGO_ENABLED=0 GOARCH=arm64 $(MAKE) vmctl-local-with-goarch
|
||||||
|
|
||||||
|
vmctl-ppc64le:
|
||||||
|
CGO_ENABLED=0 GOARCH=ppc64le $(MAKE) vmctl-local-with-goarch
|
||||||
|
|
||||||
|
vmctl-386:
|
||||||
|
CGO_ENABLED=0 GOARCH=386 $(MAKE) vmctl-local-with-goarch
|
||||||
|
|
||||||
|
vmctl-local-with-goarch:
|
||||||
|
APP_NAME=vmctl $(MAKE) app-local-with-goarch
|
||||||
|
|
||||||
|
vmctl-pure:
|
||||||
|
APP_NAME=vmctl $(MAKE) app-local-pure
|
427
app/vmctl/README.md
Normal file
427
app/vmctl/README.md
Normal file
|
@ -0,0 +1,427 @@
|
||||||
|
# vmctl - Victoria metrics command-line tool
|
||||||
|
|
||||||
|
Features:
|
||||||
|
- [x] Prometheus: migrate data from Prometheus to VictoriaMetrics using snapshot API
|
||||||
|
- [x] Thanos: migrate data from Thanos to VictoriaMetrics
|
||||||
|
- [ ] ~~Prometheus: migrate data from Prometheus to VictoriaMetrics by query~~(discarded)
|
||||||
|
- [x] InfluxDB: migrate data from InfluxDB to VictoriaMetrics
|
||||||
|
- [ ] Storage Management: data re-balancing between nodes
|
||||||
|
|
||||||
|
# Table of contents
|
||||||
|
|
||||||
|
* [Articles](#articles)
|
||||||
|
* [How to build](#how-to-build)
|
||||||
|
* [Migrating data from InfluxDB 1.x](#migrating-data-from-influxdb-1x)
|
||||||
|
* [Data mapping](#data-mapping)
|
||||||
|
* [Configuration](#configuration)
|
||||||
|
* [Filtering](#filtering)
|
||||||
|
* [Migrating data from InfluxDB 2.x](#migrating-data-from-influxdb-2x)
|
||||||
|
* [Migrating data from Prometheus](#migrating-data-from-prometheus)
|
||||||
|
* [Data mapping](#data-mapping-1)
|
||||||
|
* [Configuration](#configuration-1)
|
||||||
|
* [Filtering](#filtering-1)
|
||||||
|
* [Migrating data from Thanos](#migrating-data-from-thanos)
|
||||||
|
* [Current data](#current-data)
|
||||||
|
* [Historical data](#historical-data)
|
||||||
|
* [Migrating data from VictoriaMetrics](#migrating-data-from-victoriametrics)
|
||||||
|
* [Native protocol](#native-protocol)
|
||||||
|
* [Tuning](#tuning)
|
||||||
|
* [Influx mode](#influx-mode)
|
||||||
|
* [Prometheus mode](#prometheus-mode)
|
||||||
|
* [VictoriaMetrics importer](#victoriametrics-importer)
|
||||||
|
* [Importer stats](#importer-stats)
|
||||||
|
* [Significant figures](#significant-figures)
|
||||||
|
* [Adding extra labels](#adding-extra-labels)
|
||||||
|
|
||||||
|
|
||||||
|
## Articles
|
||||||
|
|
||||||
|
* [How to migrate data from Prometheus](https://medium.com/@romanhavronenko/victoriametrics-how-to-migrate-data-from-prometheus-d44a6728f043)
|
||||||
|
* [How to migrate data from Prometheus. Filtering and modifying time series](https://medium.com/@romanhavronenko/victoriametrics-how-to-migrate-data-from-prometheus-filtering-and-modifying-time-series-6d40cea4bf21)
|
||||||
|
|
||||||
|
## How to build
|
||||||
|
|
||||||
|
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.12.
|
||||||
|
2. Run `make build` from the root folder of the repository.
|
||||||
|
It builds `vmctl` binary and puts it into the `bin` folder.
|
||||||
|
|
||||||
|
## Migrating data from InfluxDB (1.x)
|
||||||
|
|
||||||
|
`vmctl` supports the `influx` mode to migrate data from InfluxDB to VictoriaMetrics time-series database.
|
||||||
|
|
||||||
|
See `./vmctl influx --help` for details and full list of flags.
|
||||||
|
|
||||||
|
To use migration tool please specify the InfluxDB address `--influx-addr`, the database `--influx-database` and VictoriaMetrics address `--vm-addr`.
|
||||||
|
Flag `--vm-addr` for single-node VM is usually equal to `--httpListenAddr`, and for cluster version
|
||||||
|
is equal to `--httpListenAddr` flag of VMInsert component. Please note, that vmctl performs initial readiness check for the given address
|
||||||
|
by checking `/health` endpoint. For cluster version it is additionally required to specify the `--vm-account-id` flag.
|
||||||
|
See more details for cluster version [here](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/cluster).
|
||||||
|
|
||||||
|
As soon as required flags are provided and all endpoints are accessible, `vmctl` will start the InfluxDB scheme exploration.
|
||||||
|
Basically, it just fetches all fields and timeseries from the provided database and builds up registry of all available timeseries.
|
||||||
|
Then `vmctl` sends fetch requests for each timeseries to InfluxDB one by one and pass results to VM importer.
|
||||||
|
VM importer then accumulates received samples in batches and sends import requests to VM.
|
||||||
|
|
||||||
|
The importing process example for local installation of InfluxDB(`http://localhost:8086`)
|
||||||
|
and single-node VictoriaMetrics(`http://localhost:8428`):
|
||||||
|
```
|
||||||
|
./vmctl influx --influx-database benchmark
|
||||||
|
InfluxDB import mode
|
||||||
|
2020/01/18 20:47:11 Exploring scheme for database "benchmark"
|
||||||
|
2020/01/18 20:47:11 fetching fields: command: "show field keys"; database: "benchmark"; retention: "autogen"
|
||||||
|
2020/01/18 20:47:11 found 10 fields
|
||||||
|
2020/01/18 20:47:11 fetching series: command: "show series "; database: "benchmark"; retention: "autogen"
|
||||||
|
Found 40000 timeseries to import. Continue? [Y/n] y
|
||||||
|
40000 / 40000 [-----------------------------------------------------------------------------------------------------------------------------------------------] 100.00% 21 p/s
|
||||||
|
2020/01/18 21:19:00 Import finished!
|
||||||
|
2020/01/18 21:19:00 VictoriaMetrics importer stats:
|
||||||
|
idle duration: 13m51.461434876s;
|
||||||
|
time spent while importing: 17m56.923899847s;
|
||||||
|
total samples: 345600000;
|
||||||
|
samples/s: 320914.04;
|
||||||
|
total bytes: 5.9 GB;
|
||||||
|
bytes/s: 5.4 MB;
|
||||||
|
import requests: 40001;
|
||||||
|
2020/01/18 21:19:00 Total time: 31m48.467044016s
|
||||||
|
```
|
||||||
|
|
||||||
|
### Data mapping
|
||||||
|
|
||||||
|
Vmctl maps Influx data the same way as VictoriaMetrics does by using the following rules:
|
||||||
|
|
||||||
|
* `influx-database` arg is mapped into `db` label value unless `db` tag exists in the Influx line.
|
||||||
|
* Field names are mapped to time series names prefixed with {measurement}{separator} value,
|
||||||
|
where {separator} equals to _ by default.
|
||||||
|
It can be changed with `--influx-measurement-field-separator` command-line flag.
|
||||||
|
* Field values are mapped to time series values.
|
||||||
|
* Tags are mapped to Prometheus labels format as-is.
|
||||||
|
|
||||||
|
For example, the following Influx line:
|
||||||
|
```
|
||||||
|
foo,tag1=value1,tag2=value2 field1=12,field2=40
|
||||||
|
```
|
||||||
|
|
||||||
|
is converted into the following Prometheus format data points:
|
||||||
|
```
|
||||||
|
foo_field1{tag1="value1", tag2="value2"} 12
|
||||||
|
foo_field2{tag1="value1", tag2="value2"} 40
|
||||||
|
```
|
||||||
|
|
||||||
|
### Configuration
|
||||||
|
|
||||||
|
The configuration flags should contain self-explanatory descriptions.
|
||||||
|
|
||||||
|
### Filtering
|
||||||
|
|
||||||
|
The filtering consists of two parts: timeseries and time.
|
||||||
|
The first step of application is to select all available timeseries
|
||||||
|
for given database and retention. User may specify additional filtering
|
||||||
|
condition via `--influx-filter-series` flag. For example:
|
||||||
|
```
|
||||||
|
./vmctl influx --influx-database benchmark \
|
||||||
|
--influx-filter-series "on benchmark from cpu where hostname='host_1703'"
|
||||||
|
InfluxDB import mode
|
||||||
|
2020/01/26 14:23:29 Exploring scheme for database "benchmark"
|
||||||
|
2020/01/26 14:23:29 fetching fields: command: "show field keys"; database: "benchmark"; retention: "autogen"
|
||||||
|
2020/01/26 14:23:29 found 12 fields
|
||||||
|
2020/01/26 14:23:29 fetching series: command: "show series on benchmark from cpu where hostname='host_1703'"; database: "benchmark"; retention: "autogen"
|
||||||
|
Found 10 timeseries to import. Continue? [Y/n]
|
||||||
|
```
|
||||||
|
The timeseries select query would be following:
|
||||||
|
`fetching series: command: "show series on benchmark from cpu where hostname='host_1703'"; database: "benchmark"; retention: "autogen"`
|
||||||
|
|
||||||
|
The second step of filtering is a time filter and it applies when fetching the datapoints from Influx.
|
||||||
|
Time filtering may be configured with two flags:
|
||||||
|
* --influx-filter-time-start
|
||||||
|
* --influx-filter-time-end
|
||||||
|
Here's an example of importing timeseries for one day only:
|
||||||
|
`./vmctl influx --influx-database benchmark --influx-filter-series "where hostname='host_1703'" --influx-filter-time-start "2020-01-01T10:07:00Z" --influx-filter-time-end "2020-01-01T15:07:00Z"`
|
||||||
|
|
||||||
|
Please see more about time filtering [here](https://docs.influxdata.com/influxdb/v1.7/query_language/schema_exploration#filter-meta-queries-by-time).
|
||||||
|
|
||||||
|
## Migrating data from InfluxDB (2.x)
|
||||||
|
|
||||||
|
Migrating data from InfluxDB v2.x is not supported yet ([#32](https://github.com/VictoriaMetrics/vmctl/issues/32)).
|
||||||
|
You may find useful a 3rd party solution for this - https://github.com/jonppe/influx_to_victoriametrics.
|
||||||
|
|
||||||
|
|
||||||
|
## Migrating data from Prometheus
|
||||||
|
|
||||||
|
`vmctl` supports the `prometheus` mode for migrating data from Prometheus to VictoriaMetrics time-series database.
|
||||||
|
Migration is based on reading Prometheus snapshot, which is basically a hard-link to Prometheus data files.
|
||||||
|
|
||||||
|
See `./vmctl prometheus --help` for details and full list of flags.
|
||||||
|
|
||||||
|
To use migration tool please specify the path to Prometheus snapshot `--prom-snapshot` and VictoriaMetrics address `--vm-addr`.
|
||||||
|
More about Prometheus snapshots may be found [here](https://www.robustperception.io/taking-snapshots-of-prometheus-data).
|
||||||
|
Flag `--vm-addr` for single-node VM is usually equal to `--httpListenAddr`, and for cluster version
|
||||||
|
is equal to `--httpListenAddr` flag of VMInsert component. Please note, that vmctl performs initial readiness check for the given address
|
||||||
|
by checking `/health` endpoint. For cluster version it is additionally required to specify the `--vm-account-id` flag.
|
||||||
|
See more details for cluster version [here](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/cluster).
|
||||||
|
|
||||||
|
As soon as required flags are provided and all endpoints are accessible, `vmctl` will start the Prometheus snapshot exploration.
|
||||||
|
Basically, it just fetches all available blocks in provided snapshot and read the metadata. It also does initial filtering by time
|
||||||
|
if flags `--prom-filter-time-start` or `--prom-filter-time-end` were set. The exploration procedure prints some stats from read blocks.
|
||||||
|
Please note that stats are not taking into account timeseries or samples filtering. This will be done during importing process.
|
||||||
|
|
||||||
|
The importing process takes the snapshot blocks revealed from Explore procedure and processes them one by one
|
||||||
|
accumulating timeseries and samples. Please note, that `vmctl` relies on responses from Influx on this stage,
|
||||||
|
so ensure that Explore queries are executed without errors or limits. Please see this
|
||||||
|
[issue](https://github.com/VictoriaMetrics/vmctl/issues/30) for details.
|
||||||
|
The data processed in chunks and then sent to VM.
|
||||||
|
|
||||||
|
The importing process example for local installation of Prometheus
|
||||||
|
and single-node VictoriaMetrics(`http://localhost:8428`):
|
||||||
|
```
|
||||||
|
./vmctl prometheus --prom-snapshot=/path/to/snapshot \
|
||||||
|
--vm-concurrency=1 \
|
||||||
|
--vm-batch-size=200000 \
|
||||||
|
--prom-concurrency=3
|
||||||
|
Prometheus import mode
|
||||||
|
Prometheus snapshot stats:
|
||||||
|
blocks found: 14;
|
||||||
|
blocks skipped: 0;
|
||||||
|
min time: 1581288163058 (2020-02-09T22:42:43Z);
|
||||||
|
max time: 1582409128139 (2020-02-22T22:05:28Z);
|
||||||
|
samples: 32549106;
|
||||||
|
series: 27289.
|
||||||
|
Found 14 blocks to import. Continue? [Y/n] y
|
||||||
|
14 / 14 [-------------------------------------------------------------------------------------------] 100.00% 0 p/s
|
||||||
|
2020/02/23 15:50:03 Import finished!
|
||||||
|
2020/02/23 15:50:03 VictoriaMetrics importer stats:
|
||||||
|
idle duration: 6.152953029s;
|
||||||
|
time spent while importing: 44.908522491s;
|
||||||
|
total samples: 32549106;
|
||||||
|
samples/s: 724786.84;
|
||||||
|
total bytes: 669.1 MB;
|
||||||
|
bytes/s: 14.9 MB;
|
||||||
|
import requests: 323;
|
||||||
|
import requests retries: 0;
|
||||||
|
2020/02/23 15:50:03 Total time: 51.077451066s
|
||||||
|
```
|
||||||
|
|
||||||
|
### Data mapping
|
||||||
|
|
||||||
|
VictoriaMetrics has very similar data model to Prometheus and supports [RemoteWrite integration](https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage).
|
||||||
|
So no data changes will be applied.
|
||||||
|
|
||||||
|
### Configuration
|
||||||
|
|
||||||
|
The configuration flags should contain self-explanatory descriptions.
|
||||||
|
|
||||||
|
### Filtering
|
||||||
|
|
||||||
|
The filtering consists of three parts: by timeseries and time.
|
||||||
|
|
||||||
|
Filtering by time may be configured via flags `--prom-filter-time-start` and `--prom-filter-time-end`
|
||||||
|
in in RFC3339 format. This filter applied twice: to drop blocks out of range and to filter timeseries in blocks with
|
||||||
|
overlapping time range.
|
||||||
|
|
||||||
|
Example of applying time filter:
|
||||||
|
```
|
||||||
|
./vmctl prometheus --prom-snapshot=/path/to/snapshot \
|
||||||
|
--prom-filter-time-start=2020-02-07T00:07:01Z \
|
||||||
|
--prom-filter-time-end=2020-02-11T00:07:01Z
|
||||||
|
Prometheus import mode
|
||||||
|
Prometheus snapshot stats:
|
||||||
|
blocks found: 2;
|
||||||
|
blocks skipped: 12;
|
||||||
|
min time: 1581288163058 (2020-02-09T22:42:43Z);
|
||||||
|
max time: 1581328800000 (2020-02-10T10:00:00Z);
|
||||||
|
samples: 1657698;
|
||||||
|
series: 3930.
|
||||||
|
Found 2 blocks to import. Continue? [Y/n] y
|
||||||
|
```
|
||||||
|
|
||||||
|
Please notice, that total amount of blocks in provided snapshot is 14, but only 2 of them were in provided
|
||||||
|
time range. So other 12 blocks were marked as `skipped`. The amount of samples and series is not taken into account,
|
||||||
|
since this is heavy operation and will be done during import process.
|
||||||
|
|
||||||
|
|
||||||
|
Filtering by timeseries is configured with following flags:
|
||||||
|
* `--prom-filter-label` - the label name, e.g. `__name__` or `instance`;
|
||||||
|
* `--prom-filter-label-value` - the regular expression to filter the label value. By default matches all `.*`
|
||||||
|
|
||||||
|
For example:
|
||||||
|
```
|
||||||
|
./vmctl prometheus --prom-snapshot=/path/to/snapshot \
|
||||||
|
--prom-filter-label="__name__" \
|
||||||
|
--prom-filter-label-value="promhttp.*" \
|
||||||
|
--prom-filter-time-start=2020-02-07T00:07:01Z \
|
||||||
|
--prom-filter-time-end=2020-02-11T00:07:01Z
|
||||||
|
Prometheus import mode
|
||||||
|
Prometheus snapshot stats:
|
||||||
|
blocks found: 2;
|
||||||
|
blocks skipped: 12;
|
||||||
|
min time: 1581288163058 (2020-02-09T22:42:43Z);
|
||||||
|
max time: 1581328800000 (2020-02-10T10:00:00Z);
|
||||||
|
samples: 1657698;
|
||||||
|
series: 3930.
|
||||||
|
Found 2 blocks to import. Continue? [Y/n] y
|
||||||
|
14 / 14 [------------------------------------------------------------------------------------------------------------------------------------------------------] 100.00% ? p/s
|
||||||
|
2020/02/23 15:51:07 Import finished!
|
||||||
|
2020/02/23 15:51:07 VictoriaMetrics importer stats:
|
||||||
|
idle duration: 0s;
|
||||||
|
time spent while importing: 37.415461ms;
|
||||||
|
total samples: 10128;
|
||||||
|
samples/s: 270690.24;
|
||||||
|
total bytes: 195.2 kB;
|
||||||
|
bytes/s: 5.2 MB;
|
||||||
|
import requests: 2;
|
||||||
|
import requests retries: 0;
|
||||||
|
2020/02/23 15:51:07 Total time: 7.153158218s
|
||||||
|
```
|
||||||
|
|
||||||
|
## Migrating data from Thanos
|
||||||
|
|
||||||
|
Thanos uses the same storage engine as Prometheus and the data layout on-disk should be the same. That means
|
||||||
|
`vmctl` in mode `prometheus` may be used for Thanos historical data migration as well.
|
||||||
|
These instructions may vary based on the details of your Thanos configuration.
|
||||||
|
Please read carefully and verify as you go. We assume you're using Thanos Sidecar on your Prometheus pods,
|
||||||
|
and that you have a separate Thanos Store installation.
|
||||||
|
|
||||||
|
### Current data
|
||||||
|
|
||||||
|
1. For now, keep your Thanos Sidecar and Thanos-related Prometheus configuration, but add this to also stream
|
||||||
|
metrics to VictoriaMetrics:
|
||||||
|
```
|
||||||
|
remote_write:
|
||||||
|
- url: http://victoria-metrics:8428/api/v1/write
|
||||||
|
```
|
||||||
|
2. Make sure VM is running, of course. Now check the logs to make sure that Prometheus is sending and VM is receiving.
|
||||||
|
In Prometheus, make sure there are no errors. On the VM side, you should see messages like this:
|
||||||
|
```
|
||||||
|
2020-04-27T18:38:46.474Z info VictoriaMetrics/lib/storage/partition.go:207 creating a partition "2020_04" with smallPartsPath="/victoria-metrics-data/data/small/2020_04", bigPartsPath="/victoria-metrics-data/data/big/2020_04"
|
||||||
|
2020-04-27T18:38:46.506Z info VictoriaMetrics/lib/storage/partition.go:222 partition "2020_04" has been created
|
||||||
|
```
|
||||||
|
3. Now just wait. Within two hours, Prometheus should finish its current data file and hand it off to Thanos Store for long term
|
||||||
|
storage.
|
||||||
|
|
||||||
|
### Historical data
|
||||||
|
|
||||||
|
Let's assume your data is stored on S3 served by minio. You first need to copy that out to a local filesystem,
|
||||||
|
then import it into VM using `vmctl` in `prometheus` mode.
|
||||||
|
1. Copy data from minio.
|
||||||
|
1. Run the `minio/mc` Docker container.
|
||||||
|
1. `mc config host add minio http://minio:9000 accessKey secretKey`, substituting appropriate values for the last 3 items.
|
||||||
|
1. `mc cp -r minio/prometheus thanos-data`
|
||||||
|
1. Import using `vmctl`.
|
||||||
|
1. Follow the [instructions](#how-to-build) to compile `vmctl` on your machine.
|
||||||
|
1. Use [prometheus](#migrating-data-from-prometheus) mode to import data:
|
||||||
|
```
|
||||||
|
vmctl prometheus --prom-snapshot thanos-data --vm-addr http://victoria-metrics:8428
|
||||||
|
```
|
||||||
|
|
||||||
|
## Migrating data from VictoriaMetrics
|
||||||
|
|
||||||
|
### Native protocol
|
||||||
|
|
||||||
|
The [native binary protocol](https://victoriametrics.github.io/#how-to-export-data-in-native-format)
|
||||||
|
was introduced in [1.42.0 release](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.42.0)
|
||||||
|
and provides the most efficient way to migrate data between VM instances: single to single, cluster to cluster,
|
||||||
|
single to cluster and vice versa. Please note that both instances (source and destination) should be of v1.42.0
|
||||||
|
or higher.
|
||||||
|
|
||||||
|
See `./vmctl vm-native --help` for details and full list of flags.
|
||||||
|
|
||||||
|
In this mode `vmctl` acts as a proxy between two VM instances, where time series filtering is done by "source" (`src`)
|
||||||
|
and processing is done by "destination" (`dst`). Because of that, `vmctl` doesn't actually know how much data will be
|
||||||
|
processed and can't show the progress bar. It will show the current processing speed and total number of processed bytes:
|
||||||
|
|
||||||
|
```
|
||||||
|
./vmctl vm-native --vm-native-src-addr=http://localhost:8528 \
|
||||||
|
--vm-native-dst-addr=http://localhost:8428 \
|
||||||
|
--vm-native-filter-match='{job="vmagent"}' \
|
||||||
|
--vm-native-filter-time-start='2020-01-01T20:07:00Z'
|
||||||
|
VictoriaMetrics Native import mode
|
||||||
|
Initing export pipe from "http://localhost:8528" with filters:
|
||||||
|
filter: match[]={job="vmagent"}
|
||||||
|
Initing import process to "http://localhost:8428":
|
||||||
|
Total: 336.75 KiB ↖ Speed: 454.46 KiB p/s
|
||||||
|
2020/10/13 17:04:59 Total time: 952.143376ms
|
||||||
|
```
|
||||||
|
|
||||||
|
Importing tips:
|
||||||
|
1. Migrating all the metrics from one VM to another may collide with existing application metrics
|
||||||
|
(prefixed with `vm_`) at destination and lead to confusion when using
|
||||||
|
[official Grafana dashboards](https://grafana.com/orgs/victoriametrics/dashboards).
|
||||||
|
To avoid such situation try to filter out VM process metrics via `--vm-native-filter-match` flag.
|
||||||
|
2. Migration is a backfilling process, so it is recommended to read
|
||||||
|
[Backfilling tips](https://github.com/VictoriaMetrics/VictoriaMetrics#backfilling) section.
|
||||||
|
3. `vmctl` doesn't provide relabeling or other types of labels management in this mode.
|
||||||
|
Instead, use [relabeling in VictoriaMetrics](https://github.com/VictoriaMetrics/vmctl/issues/4#issuecomment-683424375).
|
||||||
|
|
||||||
|
|
||||||
|
## Tuning
|
||||||
|
|
||||||
|
### Influx mode
|
||||||
|
|
||||||
|
The flag `--influx-concurrency` controls how many concurrent requests may be sent to InfluxDB while fetching
|
||||||
|
timeseries. Please set it wisely to avoid InfluxDB overwhelming.
|
||||||
|
|
||||||
|
The flag `--influx-chunk-size` controls the max amount of datapoints to return in single chunk from fetch requests.
|
||||||
|
Please see more details [here](https://docs.influxdata.com/influxdb/v1.7/guides/querying_data/#chunking).
|
||||||
|
The chunk size is used to control InfluxDB memory usage, so it won't OOM on processing large timeseries with
|
||||||
|
billions of datapoints.
|
||||||
|
|
||||||
|
### Prometheus mode
|
||||||
|
|
||||||
|
The flag `--prom-concurrency` controls how many concurrent readers will be reading the blocks in snapshot.
|
||||||
|
Since snapshots are just files on disk it would be hard to overwhelm the system. Please go with value equal
|
||||||
|
to number of free CPU cores.
|
||||||
|
|
||||||
|
### VictoriaMetrics importer
|
||||||
|
|
||||||
|
The flag `--vm-concurrency` controls the number of concurrent workers that process the input from InfluxDB query results.
|
||||||
|
Please note that each import request can load up to a single vCPU core on VictoriaMetrics. So try to set it according
|
||||||
|
to allocated CPU resources of your VictoriMetrics installation.
|
||||||
|
|
||||||
|
The flag `--vm-batch-size` controls max amount of samples collected before sending the import request.
|
||||||
|
For example, if `--influx-chunk-size=500` and `--vm-batch-size=2000` then importer will process not more
|
||||||
|
than 4 chunks before sending the request.
|
||||||
|
|
||||||
|
### Importer stats
|
||||||
|
|
||||||
|
After successful import `vmctl` prints some statistics for details.
|
||||||
|
The important numbers to watch are following:
|
||||||
|
- `idle duration` - shows time that importer spent while waiting for data from InfluxDB/Prometheus
|
||||||
|
to fill up `--vm-batch-size` batch size. Value shows total duration across all workers configured
|
||||||
|
via `--vm-concurrency`. High value may be a sign of too slow InfluxDB/Prometheus fetches or too
|
||||||
|
high `--vm-concurrency` value. Try to improve it by increasing `--<mode>-concurrency` value or
|
||||||
|
decreasing `--vm-concurrency` value.
|
||||||
|
- `import requests` - shows how many import requests were issued to VM server.
|
||||||
|
The import request is issued once the batch size(`--vm-batch-size`) is full and ready to be sent.
|
||||||
|
Please prefer big batch sizes (50k-500k) to improve performance.
|
||||||
|
- `import requests retries` - shows number of unsuccessful import requests. Non-zero value may be
|
||||||
|
a sign of network issues or VM being overloaded. See the logs during import for error messages.
|
||||||
|
|
||||||
|
### Silent mode
|
||||||
|
|
||||||
|
By default `vmctl` waits confirmation from user before starting the import. If this is unwanted
|
||||||
|
behavior and no user interaction required - pass `-s` flag to enable "silence" mode:
|
||||||
|
```
|
||||||
|
-s Whether to run in silent mode. If set to true no confirmation prompts will appear. (default: false)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Significant figures
|
||||||
|
|
||||||
|
`vmctl` allows to limit the number of [significant figures](https://en.wikipedia.org/wiki/Significant_figures)
|
||||||
|
before importing. For example, the average value for response size is `102.342305` bytes and it has 9 significant figures.
|
||||||
|
If you ask a human to pronounce this value then with high probability value will be rounded to first 4 or 5 figures
|
||||||
|
because the rest aren't really that important to mention. In most cases, such a high precision is too much.
|
||||||
|
Moreover, such values may be just a result of [floating point arithmetic](https://en.wikipedia.org/wiki/Floating-point_arithmetic),
|
||||||
|
create a [false precision](https://en.wikipedia.org/wiki/False_precision) and result into bad compression ratio
|
||||||
|
according to [information theory](https://en.wikipedia.org/wiki/Information_theory).
|
||||||
|
|
||||||
|
The `--vm-significant-figures` flag allows to limit the number of significant figures. It takes no effect if set
|
||||||
|
to 0 (by default), but set `--vm-significant-figures=5` and `102.342305` will be rounded to `102.34`. Such value will
|
||||||
|
have much higher compression ratio comparing to previous one and will save some extra disk space after the migration.
|
||||||
|
The most common case for using this flag is to reduce number of significant figures for time series storing aggregation
|
||||||
|
results such as `average`, `rate`, etc.
|
||||||
|
|
||||||
|
### Adding extra labels
|
||||||
|
|
||||||
|
`vmctl` allows to add extra labels to all imported series. It can be achived with flag `--vm-extra-label label=value`.
|
||||||
|
If multiple labels needs to be added, set flag for each label, for example, `--vm-extra-label label1=value1 --vm-extra-label label2=value2`.
|
||||||
|
If timeseries already have label, that must be added with `--vm-extra-label` flag, flag has priority and will override label value from timeseries.
|
||||||
|
|
6
app/vmctl/deployment/Dockerfile
Normal file
6
app/vmctl/deployment/Dockerfile
Normal file
|
@ -0,0 +1,6 @@
|
||||||
|
ARG base_image
|
||||||
|
FROM $base_image
|
||||||
|
|
||||||
|
ENTRYPOINT ["/vmctl-prod"]
|
||||||
|
ARG src_binary
|
||||||
|
COPY $src_binary ./vmctl-prod
|
284
app/vmctl/flags.go
Normal file
284
app/vmctl/flags.go
Normal file
|
@ -0,0 +1,284 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/urfave/cli/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
globalSilent = "s"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
globalFlags = []cli.Flag{
|
||||||
|
&cli.BoolFlag{
|
||||||
|
Name: globalSilent,
|
||||||
|
Value: false,
|
||||||
|
Usage: "Whether to run in silent mode. If set to true no confirmation prompts will appear.",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
vmAddr = "vm-addr"
|
||||||
|
vmUser = "vm-user"
|
||||||
|
vmPassword = "vm-password"
|
||||||
|
vmAccountID = "vm-account-id"
|
||||||
|
vmConcurrency = "vm-concurrency"
|
||||||
|
vmCompress = "vm-compress"
|
||||||
|
vmBatchSize = "vm-batch-size"
|
||||||
|
vmSignificantFigures = "vm-significant-figures"
|
||||||
|
vmExtraLabel = "vm-extra-label"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
vmFlags = []cli.Flag{
|
||||||
|
&cli.StringFlag{
|
||||||
|
Name: vmAddr,
|
||||||
|
Value: "http://localhost:8428",
|
||||||
|
Usage: "VictoriaMetrics address to perform import requests. \n" +
|
||||||
|
"Should be the same as --httpListenAddr value for single-node version or VMInsert component. \n" +
|
||||||
|
"Please note, that `vmctl` performs initial readiness check for the given address by checking `/health` endpoint.",
|
||||||
|
},
|
||||||
|
&cli.StringFlag{
|
||||||
|
Name: vmUser,
|
||||||
|
Usage: "VictoriaMetrics username for basic auth",
|
||||||
|
EnvVars: []string{"VM_USERNAME"},
|
||||||
|
},
|
||||||
|
&cli.StringFlag{
|
||||||
|
Name: vmPassword,
|
||||||
|
Usage: "VictoriaMetrics password for basic auth",
|
||||||
|
EnvVars: []string{"VM_PASSWORD"},
|
||||||
|
},
|
||||||
|
&cli.StringFlag{
|
||||||
|
Name: vmAccountID,
|
||||||
|
Usage: "AccountID is an arbitrary 32-bit integer identifying namespace for data ingestion (aka tenant). \n" +
|
||||||
|
"It is possible to set it as accountID:projectID, where projectID is also arbitrary 32-bit integer. \n" +
|
||||||
|
"If projectID isn't set, then it equals to 0",
|
||||||
|
},
|
||||||
|
&cli.UintFlag{
|
||||||
|
Name: vmConcurrency,
|
||||||
|
Usage: "Number of workers concurrently performing import requests to VM",
|
||||||
|
Value: 2,
|
||||||
|
},
|
||||||
|
&cli.BoolFlag{
|
||||||
|
Name: vmCompress,
|
||||||
|
Value: true,
|
||||||
|
Usage: "Whether to apply gzip compression to import requests",
|
||||||
|
},
|
||||||
|
&cli.IntFlag{
|
||||||
|
Name: vmBatchSize,
|
||||||
|
Value: 200e3,
|
||||||
|
Usage: "How many samples importer collects before sending the import request to VM",
|
||||||
|
},
|
||||||
|
&cli.IntFlag{
|
||||||
|
Name: vmSignificantFigures,
|
||||||
|
Value: 0,
|
||||||
|
Usage: "The number of significant figures to leave in metric values before importing. " +
|
||||||
|
"See https://en.wikipedia.org/wiki/Significant_figures. Zero value saves all the significant figures. " +
|
||||||
|
"This option may be used for increasing on-disk compression level for the stored metrics",
|
||||||
|
},
|
||||||
|
&cli.StringSliceFlag{
|
||||||
|
Name: vmExtraLabel,
|
||||||
|
Value: nil,
|
||||||
|
Usage: "Extra labels, that will be added to imported timeseries. In case of collision, label value defined by flag" +
|
||||||
|
"will have priority. Flag can be set multiple times, to add few additional labels.",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
influxAddr = "influx-addr"
|
||||||
|
influxUser = "influx-user"
|
||||||
|
influxPassword = "influx-password"
|
||||||
|
influxDB = "influx-database"
|
||||||
|
influxRetention = "influx-retention-policy"
|
||||||
|
influxChunkSize = "influx-chunk-size"
|
||||||
|
influxConcurrency = "influx-concurrency"
|
||||||
|
influxFilterSeries = "influx-filter-series"
|
||||||
|
influxFilterTimeStart = "influx-filter-time-start"
|
||||||
|
influxFilterTimeEnd = "influx-filter-time-end"
|
||||||
|
influxMeasurementFieldSeparator = "influx-measurement-field-separator"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
influxFlags = []cli.Flag{
|
||||||
|
&cli.StringFlag{
|
||||||
|
Name: influxAddr,
|
||||||
|
Value: "http://localhost:8086",
|
||||||
|
Usage: "Influx server addr",
|
||||||
|
},
|
||||||
|
&cli.StringFlag{
|
||||||
|
Name: influxUser,
|
||||||
|
Usage: "Influx user",
|
||||||
|
EnvVars: []string{"INFLUX_USERNAME"},
|
||||||
|
},
|
||||||
|
&cli.StringFlag{
|
||||||
|
Name: influxPassword,
|
||||||
|
Usage: "Influx user password",
|
||||||
|
EnvVars: []string{"INFLUX_PASSWORD"},
|
||||||
|
},
|
||||||
|
&cli.StringFlag{
|
||||||
|
Name: influxDB,
|
||||||
|
Usage: "Influx database",
|
||||||
|
Required: true,
|
||||||
|
},
|
||||||
|
&cli.StringFlag{
|
||||||
|
Name: influxRetention,
|
||||||
|
Usage: "Influx retention policy",
|
||||||
|
Value: "autogen",
|
||||||
|
},
|
||||||
|
&cli.IntFlag{
|
||||||
|
Name: influxChunkSize,
|
||||||
|
Usage: "The chunkSize defines max amount of series to be returned in one chunk",
|
||||||
|
Value: 10e3,
|
||||||
|
},
|
||||||
|
&cli.IntFlag{
|
||||||
|
Name: influxConcurrency,
|
||||||
|
Usage: "Number of concurrently running fetch queries to InfluxDB",
|
||||||
|
Value: 1,
|
||||||
|
},
|
||||||
|
&cli.StringFlag{
|
||||||
|
Name: influxFilterSeries,
|
||||||
|
Usage: "Influx filter expression to select series. E.g. \"from cpu where arch='x86' AND hostname='host_2753'\".\n" +
|
||||||
|
"See for details https://docs.influxdata.com/influxdb/v1.7/query_language/schema_exploration#show-series",
|
||||||
|
},
|
||||||
|
&cli.StringFlag{
|
||||||
|
Name: influxFilterTimeStart,
|
||||||
|
Usage: "The time filter to select timeseries with timestamp equal or higher than provided value. E.g. '2020-01-01T20:07:00Z'",
|
||||||
|
},
|
||||||
|
&cli.StringFlag{
|
||||||
|
Name: influxFilterTimeEnd,
|
||||||
|
Usage: "The time filter to select timeseries with timestamp equal or lower than provided value. E.g. '2020-01-01T20:07:00Z'",
|
||||||
|
},
|
||||||
|
&cli.StringFlag{
|
||||||
|
Name: influxMeasurementFieldSeparator,
|
||||||
|
Usage: "The {separator} symbol used to concatenate {measurement} and {field} names into series name {measurement}{separator}{field}.",
|
||||||
|
Value: "_",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
promSnapshot = "prom-snapshot"
|
||||||
|
promConcurrency = "prom-concurrency"
|
||||||
|
promFilterTimeStart = "prom-filter-time-start"
|
||||||
|
promFilterTimeEnd = "prom-filter-time-end"
|
||||||
|
promFilterLabel = "prom-filter-label"
|
||||||
|
promFilterLabelValue = "prom-filter-label-value"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
promFlags = []cli.Flag{
|
||||||
|
&cli.StringFlag{
|
||||||
|
Name: promSnapshot,
|
||||||
|
Usage: "Path to Prometheus snapshot. Pls see for details https://www.robustperception.io/taking-snapshots-of-prometheus-data",
|
||||||
|
Required: true,
|
||||||
|
},
|
||||||
|
&cli.IntFlag{
|
||||||
|
Name: promConcurrency,
|
||||||
|
Usage: "Number of concurrently running snapshot readers",
|
||||||
|
Value: 1,
|
||||||
|
},
|
||||||
|
&cli.StringFlag{
|
||||||
|
Name: promFilterTimeStart,
|
||||||
|
Usage: "The time filter in RFC3339 format to select timeseries with timestamp equal or higher than provided value. E.g. '2020-01-01T20:07:00Z'",
|
||||||
|
},
|
||||||
|
&cli.StringFlag{
|
||||||
|
Name: promFilterTimeEnd,
|
||||||
|
Usage: "The time filter in RFC3339 format to select timeseries with timestamp equal or lower than provided value. E.g. '2020-01-01T20:07:00Z'",
|
||||||
|
},
|
||||||
|
&cli.StringFlag{
|
||||||
|
Name: promFilterLabel,
|
||||||
|
Usage: "Prometheus label name to filter timeseries by. E.g. '__name__' will filter timeseries by name.",
|
||||||
|
},
|
||||||
|
&cli.StringFlag{
|
||||||
|
Name: promFilterLabelValue,
|
||||||
|
Usage: fmt.Sprintf("Prometheus regular expression to filter label from %q flag.", promFilterLabel),
|
||||||
|
Value: ".*",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
vmNativeFilterMatch = "vm-native-filter-match"
|
||||||
|
vmNativeFilterTimeStart = "vm-native-filter-time-start"
|
||||||
|
vmNativeFilterTimeEnd = "vm-native-filter-time-end"
|
||||||
|
|
||||||
|
vmNativeSrcAddr = "vm-native-src-addr"
|
||||||
|
vmNativeSrcUser = "vm-native-src-user"
|
||||||
|
vmNativeSrcPassword = "vm-native-src-password"
|
||||||
|
|
||||||
|
vmNativeDstAddr = "vm-native-dst-addr"
|
||||||
|
vmNativeDstUser = "vm-native-dst-user"
|
||||||
|
vmNativeDstPassword = "vm-native-dst-password"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
vmNativeFlags = []cli.Flag{
|
||||||
|
&cli.StringFlag{
|
||||||
|
Name: vmNativeFilterMatch,
|
||||||
|
Usage: "Time series selector to match series for export. For example, select {instance!=\"localhost\"} will " +
|
||||||
|
"match all series with \"instance\" label different to \"localhost\".\n" +
|
||||||
|
" See more details here https://github.com/VictoriaMetrics/VictoriaMetrics#how-to-export-data-in-native-format",
|
||||||
|
Value: `{__name__!=""}`,
|
||||||
|
},
|
||||||
|
&cli.StringFlag{
|
||||||
|
Name: vmNativeFilterTimeStart,
|
||||||
|
Usage: "The time filter may contain either unix timestamp in seconds or RFC3339 values. E.g. '2020-01-01T20:07:00Z'",
|
||||||
|
},
|
||||||
|
&cli.StringFlag{
|
||||||
|
Name: vmNativeFilterTimeEnd,
|
||||||
|
Usage: "The time filter may contain either unix timestamp in seconds or RFC3339 values. E.g. '2020-01-01T20:07:00Z'",
|
||||||
|
},
|
||||||
|
&cli.StringFlag{
|
||||||
|
Name: vmNativeSrcAddr,
|
||||||
|
Usage: "VictoriaMetrics address to perform export from. \n" +
|
||||||
|
" Should be the same as --httpListenAddr value for single-node version or VMSelect component." +
|
||||||
|
" If exporting from cluster version - include the tenet token in address.",
|
||||||
|
Required: true,
|
||||||
|
},
|
||||||
|
&cli.StringFlag{
|
||||||
|
Name: vmNativeSrcUser,
|
||||||
|
Usage: "VictoriaMetrics username for basic auth",
|
||||||
|
EnvVars: []string{"VM_NATIVE_SRC_USERNAME"},
|
||||||
|
},
|
||||||
|
&cli.StringFlag{
|
||||||
|
Name: vmNativeSrcPassword,
|
||||||
|
Usage: "VictoriaMetrics password for basic auth",
|
||||||
|
EnvVars: []string{"VM_NATIVE_SRC_PASSWORD"},
|
||||||
|
},
|
||||||
|
&cli.StringFlag{
|
||||||
|
Name: vmNativeDstAddr,
|
||||||
|
Usage: "VictoriaMetrics address to perform import to. \n" +
|
||||||
|
" Should be the same as --httpListenAddr value for single-node version or VMInsert component." +
|
||||||
|
" If importing into cluster version - include the tenet token in address.",
|
||||||
|
Required: true,
|
||||||
|
},
|
||||||
|
&cli.StringFlag{
|
||||||
|
Name: vmNativeDstUser,
|
||||||
|
Usage: "VictoriaMetrics username for basic auth",
|
||||||
|
EnvVars: []string{"VM_NATIVE_DST_USERNAME"},
|
||||||
|
},
|
||||||
|
&cli.StringFlag{
|
||||||
|
Name: vmNativeDstPassword,
|
||||||
|
Usage: "VictoriaMetrics password for basic auth",
|
||||||
|
EnvVars: []string{"VM_NATIVE_DST_PASSWORD"},
|
||||||
|
},
|
||||||
|
&cli.StringSliceFlag{
|
||||||
|
Name: vmExtraLabel,
|
||||||
|
Value: nil,
|
||||||
|
Usage: "Extra labels, that will be added to imported timeseries. In case of collision, label value defined by flag" +
|
||||||
|
"will have priority. Flag can be set multiple times, to add few additional labels.",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
func mergeFlags(flags ...[]cli.Flag) []cli.Flag {
|
||||||
|
var result []cli.Flag
|
||||||
|
for _, f := range flags {
|
||||||
|
result = append(result, f...)
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
}
|
144
app/vmctl/influx.go
Normal file
144
app/vmctl/influx.go
Normal file
|
@ -0,0 +1,144 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"log"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/cheggaaa/pb/v3"
|
||||||
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/influx"
|
||||||
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/vm"
|
||||||
|
)
|
||||||
|
|
||||||
|
type influxProcessor struct {
|
||||||
|
ic *influx.Client
|
||||||
|
im *vm.Importer
|
||||||
|
cc int
|
||||||
|
separator string
|
||||||
|
}
|
||||||
|
|
||||||
|
func newInfluxProcessor(ic *influx.Client, im *vm.Importer, cc int, separator string) *influxProcessor {
|
||||||
|
if cc < 1 {
|
||||||
|
cc = 1
|
||||||
|
}
|
||||||
|
return &influxProcessor{
|
||||||
|
ic: ic,
|
||||||
|
im: im,
|
||||||
|
cc: cc,
|
||||||
|
separator: separator,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ip *influxProcessor) run(silent bool) error {
|
||||||
|
series, err := ip.ic.Explore()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("explore query failed: %s", err)
|
||||||
|
}
|
||||||
|
if len(series) < 1 {
|
||||||
|
return fmt.Errorf("found no timeseries to import")
|
||||||
|
}
|
||||||
|
|
||||||
|
question := fmt.Sprintf("Found %d timeseries to import. Continue?", len(series))
|
||||||
|
if !silent && !prompt(question) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
bar := pb.StartNew(len(series))
|
||||||
|
seriesCh := make(chan *influx.Series)
|
||||||
|
errCh := make(chan error)
|
||||||
|
ip.im.ResetStats()
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
wg.Add(ip.cc)
|
||||||
|
for i := 0; i < ip.cc; i++ {
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
for s := range seriesCh {
|
||||||
|
if err := ip.do(s); err != nil {
|
||||||
|
errCh <- fmt.Errorf("request failed for %q.%q: %s", s.Measurement, s.Field, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
bar.Increment()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
// any error breaks the import
|
||||||
|
for _, s := range series {
|
||||||
|
select {
|
||||||
|
case infErr := <-errCh:
|
||||||
|
return fmt.Errorf("influx error: %s", infErr)
|
||||||
|
case vmErr := <-ip.im.Errors():
|
||||||
|
return fmt.Errorf("Import process failed: \n%s", wrapErr(vmErr))
|
||||||
|
case seriesCh <- s:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
close(seriesCh)
|
||||||
|
wg.Wait()
|
||||||
|
ip.im.Close()
|
||||||
|
// drain import errors channel
|
||||||
|
for vmErr := range ip.im.Errors() {
|
||||||
|
return fmt.Errorf("Import process failed: \n%s", wrapErr(vmErr))
|
||||||
|
}
|
||||||
|
bar.Finish()
|
||||||
|
log.Println("Import finished!")
|
||||||
|
log.Print(ip.im.Stats())
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
const dbLabel = "db"
|
||||||
|
|
||||||
|
func (ip *influxProcessor) do(s *influx.Series) error {
|
||||||
|
cr, err := ip.ic.FetchDataPoints(s)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to fetch datapoints: %s", err)
|
||||||
|
}
|
||||||
|
defer cr.Close()
|
||||||
|
var name string
|
||||||
|
if s.Measurement != "" {
|
||||||
|
name = fmt.Sprintf("%s%s%s", s.Measurement, ip.separator, s.Field)
|
||||||
|
} else {
|
||||||
|
name = s.Field
|
||||||
|
}
|
||||||
|
|
||||||
|
labels := make([]vm.LabelPair, len(s.LabelPairs))
|
||||||
|
var containsDBLabel bool
|
||||||
|
for i, lp := range s.LabelPairs {
|
||||||
|
if lp.Name == dbLabel {
|
||||||
|
containsDBLabel = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
labels[i] = vm.LabelPair{
|
||||||
|
Name: lp.Name,
|
||||||
|
Value: lp.Value,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !containsDBLabel {
|
||||||
|
labels = append(labels, vm.LabelPair{
|
||||||
|
Name: dbLabel,
|
||||||
|
Value: ip.ic.Database(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
for {
|
||||||
|
time, values, err := cr.Next()
|
||||||
|
if err != nil {
|
||||||
|
if err == io.EOF {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// skip empty results
|
||||||
|
if len(time) < 1 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
ip.im.Input() <- &vm.TimeSeries{
|
||||||
|
Name: name,
|
||||||
|
LabelPairs: labels,
|
||||||
|
Timestamps: time,
|
||||||
|
Values: values,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
360
app/vmctl/influx/influx.go
Normal file
360
app/vmctl/influx/influx.go
Normal file
|
@ -0,0 +1,360 @@
|
||||||
|
package influx
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"log"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
influx "github.com/influxdata/influxdb/client/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Client represents a wrapper over
|
||||||
|
// influx HTTP client
|
||||||
|
type Client struct {
|
||||||
|
influx.Client
|
||||||
|
|
||||||
|
database string
|
||||||
|
retention string
|
||||||
|
chunkSize int
|
||||||
|
|
||||||
|
filterSeries string
|
||||||
|
filterTime string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Config contains fields required
|
||||||
|
// for Client configuration
|
||||||
|
type Config struct {
|
||||||
|
Addr string
|
||||||
|
Username string
|
||||||
|
Password string
|
||||||
|
Database string
|
||||||
|
Retention string
|
||||||
|
ChunkSize int
|
||||||
|
|
||||||
|
Filter Filter
|
||||||
|
}
|
||||||
|
|
||||||
|
// Filter contains configuration for filtering
|
||||||
|
// the timeseries
|
||||||
|
type Filter struct {
|
||||||
|
Series string
|
||||||
|
TimeStart string
|
||||||
|
TimeEnd string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Series holds the time series
|
||||||
|
type Series struct {
|
||||||
|
Measurement string
|
||||||
|
Field string
|
||||||
|
LabelPairs []LabelPair
|
||||||
|
}
|
||||||
|
|
||||||
|
var valueEscaper = strings.NewReplacer(`\`, `\\`, `'`, `\'`)
|
||||||
|
|
||||||
|
func (s Series) fetchQuery(timeFilter string) string {
|
||||||
|
f := &strings.Builder{}
|
||||||
|
fmt.Fprintf(f, "select %q from %q", s.Field, s.Measurement)
|
||||||
|
if len(s.LabelPairs) > 0 || len(timeFilter) > 0 {
|
||||||
|
f.WriteString(" where")
|
||||||
|
}
|
||||||
|
for i, pair := range s.LabelPairs {
|
||||||
|
pairV := valueEscaper.Replace(pair.Value)
|
||||||
|
fmt.Fprintf(f, " %q='%s'", pair.Name, pairV)
|
||||||
|
if i != len(s.LabelPairs)-1 {
|
||||||
|
f.WriteString(" and")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(timeFilter) > 0 {
|
||||||
|
if len(s.LabelPairs) > 0 {
|
||||||
|
f.WriteString(" and")
|
||||||
|
}
|
||||||
|
fmt.Fprintf(f, " %s", timeFilter)
|
||||||
|
}
|
||||||
|
return f.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// LabelPair is the key-value record
|
||||||
|
// of time series label
|
||||||
|
type LabelPair struct {
|
||||||
|
Name string
|
||||||
|
Value string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewClient creates and returns influx client
|
||||||
|
// configured with passed Config
|
||||||
|
func NewClient(cfg Config) (*Client, error) {
|
||||||
|
c := influx.HTTPConfig{
|
||||||
|
Addr: cfg.Addr,
|
||||||
|
Username: cfg.Username,
|
||||||
|
Password: cfg.Password,
|
||||||
|
InsecureSkipVerify: true,
|
||||||
|
}
|
||||||
|
hc, err := influx.NewHTTPClient(c)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to establish conn: %s", err)
|
||||||
|
}
|
||||||
|
if _, _, err := hc.Ping(time.Second); err != nil {
|
||||||
|
return nil, fmt.Errorf("ping failed: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
chunkSize := cfg.ChunkSize
|
||||||
|
if chunkSize < 1 {
|
||||||
|
chunkSize = 10e3
|
||||||
|
}
|
||||||
|
|
||||||
|
client := &Client{
|
||||||
|
Client: hc,
|
||||||
|
database: cfg.Database,
|
||||||
|
retention: cfg.Retention,
|
||||||
|
chunkSize: chunkSize,
|
||||||
|
filterTime: timeFilter(cfg.Filter.TimeStart, cfg.Filter.TimeEnd),
|
||||||
|
filterSeries: cfg.Filter.Series,
|
||||||
|
}
|
||||||
|
return client, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c Client) Database() string {
|
||||||
|
return c.database
|
||||||
|
}
|
||||||
|
|
||||||
|
func timeFilter(start, end string) string {
|
||||||
|
if start == "" && end == "" {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
var tf string
|
||||||
|
if start != "" {
|
||||||
|
tf = fmt.Sprintf("time >= '%s'", start)
|
||||||
|
}
|
||||||
|
if end != "" {
|
||||||
|
if tf != "" {
|
||||||
|
tf += " and "
|
||||||
|
}
|
||||||
|
tf += fmt.Sprintf("time <= '%s'", end)
|
||||||
|
}
|
||||||
|
return tf
|
||||||
|
}
|
||||||
|
|
||||||
|
// Explore checks the existing data schema in influx
|
||||||
|
// by checking available fields and series,
|
||||||
|
// which unique combination represents all possible
|
||||||
|
// time series existing in database.
|
||||||
|
// The explore required to reduce the load on influx
|
||||||
|
// by querying field of the exact time series at once,
|
||||||
|
// instead of fetching all of the values over and over.
|
||||||
|
//
|
||||||
|
// May contain non-existing time series.
|
||||||
|
func (c *Client) Explore() ([]*Series, error) {
|
||||||
|
log.Printf("Exploring scheme for database %q", c.database)
|
||||||
|
mFields, err := c.fieldsByMeasurement()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to get field keys: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
series, err := c.getSeries()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to get series: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var iSeries []*Series
|
||||||
|
for _, s := range series {
|
||||||
|
fields, ok := mFields[s.Measurement]
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("can't find field keys for measurement %q", s.Measurement)
|
||||||
|
}
|
||||||
|
for _, field := range fields {
|
||||||
|
is := &Series{
|
||||||
|
Measurement: s.Measurement,
|
||||||
|
Field: field,
|
||||||
|
LabelPairs: s.LabelPairs,
|
||||||
|
}
|
||||||
|
iSeries = append(iSeries, is)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return iSeries, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChunkedResponse is a wrapper over influx.ChunkedResponse.
|
||||||
|
// Used for better memory usage control while iterating
|
||||||
|
// over huge time series.
|
||||||
|
type ChunkedResponse struct {
|
||||||
|
cr *influx.ChunkedResponse
|
||||||
|
iq influx.Query
|
||||||
|
field string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cr *ChunkedResponse) Close() error {
|
||||||
|
return cr.cr.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Next reads the next part/chunk of time series.
|
||||||
|
// Returns io.EOF when time series was read entirely.
|
||||||
|
func (cr *ChunkedResponse) Next() ([]int64, []float64, error) {
|
||||||
|
resp, err := cr.cr.NextResponse()
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
if resp.Error() != nil {
|
||||||
|
return nil, nil, fmt.Errorf("response error for %s: %s", cr.iq.Command, resp.Error())
|
||||||
|
}
|
||||||
|
if len(resp.Results) != 1 {
|
||||||
|
return nil, nil, fmt.Errorf("unexpected number of results in response: %d", len(resp.Results))
|
||||||
|
}
|
||||||
|
results, err := parseResult(resp.Results[0])
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
if len(results) < 1 {
|
||||||
|
return nil, nil, nil
|
||||||
|
}
|
||||||
|
r := results[0]
|
||||||
|
|
||||||
|
const key = "time"
|
||||||
|
timestamps, ok := r.values[key]
|
||||||
|
if !ok {
|
||||||
|
return nil, nil, fmt.Errorf("response doesn't contain field %q", key)
|
||||||
|
}
|
||||||
|
|
||||||
|
fieldValues, ok := r.values[cr.field]
|
||||||
|
if !ok {
|
||||||
|
return nil, nil, fmt.Errorf("response doesn't contain filed %q", cr.field)
|
||||||
|
}
|
||||||
|
values := make([]float64, len(fieldValues))
|
||||||
|
for i, fv := range fieldValues {
|
||||||
|
v, err := toFloat64(fv)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, fmt.Errorf("failed to convert value %q.%v to float64: %s",
|
||||||
|
cr.field, v, err)
|
||||||
|
}
|
||||||
|
values[i] = v
|
||||||
|
}
|
||||||
|
|
||||||
|
ts := make([]int64, len(results[0].values[key]))
|
||||||
|
for i, v := range timestamps {
|
||||||
|
t, err := parseDate(v.(string))
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
ts[i] = t
|
||||||
|
}
|
||||||
|
return ts, values, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FetchDataPoints performs SELECT request to fetch
|
||||||
|
// datapoints for particular field.
|
||||||
|
func (c *Client) FetchDataPoints(s *Series) (*ChunkedResponse, error) {
|
||||||
|
iq := influx.Query{
|
||||||
|
Command: s.fetchQuery(c.filterTime),
|
||||||
|
Database: c.database,
|
||||||
|
RetentionPolicy: c.retention,
|
||||||
|
Chunked: true,
|
||||||
|
ChunkSize: 1e4,
|
||||||
|
}
|
||||||
|
cr, err := c.QueryAsChunk(iq)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("query %q err: %s", iq.Command, err)
|
||||||
|
}
|
||||||
|
return &ChunkedResponse{cr, iq, s.Field}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) fieldsByMeasurement() (map[string][]string, error) {
|
||||||
|
q := influx.Query{
|
||||||
|
Command: "show field keys",
|
||||||
|
Database: c.database,
|
||||||
|
RetentionPolicy: c.retention,
|
||||||
|
}
|
||||||
|
log.Printf("fetching fields: %s", stringify(q))
|
||||||
|
qValues, err := c.do(q)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error while executing query %q: %s", q.Command, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var total int
|
||||||
|
var skipped int
|
||||||
|
const fKey = "fieldKey"
|
||||||
|
const fType = "fieldType"
|
||||||
|
result := make(map[string][]string, len(qValues))
|
||||||
|
for _, qv := range qValues {
|
||||||
|
types := qv.values[fType]
|
||||||
|
fields := qv.values[fKey]
|
||||||
|
values := make([]string, 0)
|
||||||
|
for key, field := range fields {
|
||||||
|
if types[key].(string) == "string" {
|
||||||
|
skipped++
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
values = append(values, field.(string))
|
||||||
|
total++
|
||||||
|
}
|
||||||
|
result[qv.name] = values
|
||||||
|
}
|
||||||
|
|
||||||
|
if skipped > 0 {
|
||||||
|
log.Printf("found %d fields; skipped %d non-numeric fields", total, skipped)
|
||||||
|
} else {
|
||||||
|
log.Printf("found %d fields", total)
|
||||||
|
}
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) getSeries() ([]*Series, error) {
|
||||||
|
com := "show series"
|
||||||
|
if c.filterSeries != "" {
|
||||||
|
com = fmt.Sprintf("%s %s", com, c.filterSeries)
|
||||||
|
}
|
||||||
|
q := influx.Query{
|
||||||
|
Command: com,
|
||||||
|
Database: c.database,
|
||||||
|
RetentionPolicy: c.retention,
|
||||||
|
Chunked: true,
|
||||||
|
ChunkSize: c.chunkSize,
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("fetching series: %s", stringify(q))
|
||||||
|
cr, err := c.QueryAsChunk(q)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error while executing query %q: %s", q.Command, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
const key = "key"
|
||||||
|
var result []*Series
|
||||||
|
for {
|
||||||
|
resp, err := cr.NextResponse()
|
||||||
|
if err != nil {
|
||||||
|
if err == io.EOF {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if resp.Error() != nil {
|
||||||
|
return nil, fmt.Errorf("response error for query %q: %s", q.Command, resp.Error())
|
||||||
|
}
|
||||||
|
qValues, err := parseResult(resp.Results[0])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
for _, qv := range qValues {
|
||||||
|
for _, v := range qv.values[key] {
|
||||||
|
s := &Series{}
|
||||||
|
if err := s.unmarshal(v.(string)); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
result = append(result, s)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
log.Printf("found %d series", len(result))
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) do(q influx.Query) ([]queryValues, error) {
|
||||||
|
res, err := c.Query(q)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("query %q err: %s", q.Command, err)
|
||||||
|
}
|
||||||
|
if len(res.Results) < 1 {
|
||||||
|
return nil, fmt.Errorf("exploration query %q returned 0 results", q.Command)
|
||||||
|
}
|
||||||
|
return parseResult(res.Results[0])
|
||||||
|
}
|
127
app/vmctl/influx/influx_test.go
Normal file
127
app/vmctl/influx/influx_test.go
Normal file
|
@ -0,0 +1,127 @@
|
||||||
|
package influx
|
||||||
|
|
||||||
|
import "testing"
|
||||||
|
|
||||||
|
func TestFetchQuery(t *testing.T) {
|
||||||
|
testCases := []struct {
|
||||||
|
s Series
|
||||||
|
timeFilter string
|
||||||
|
expected string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
s: Series{
|
||||||
|
Measurement: "cpu",
|
||||||
|
Field: "value",
|
||||||
|
LabelPairs: []LabelPair{
|
||||||
|
{
|
||||||
|
Name: "foo",
|
||||||
|
Value: "bar",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expected: `select "value" from "cpu" where "foo"='bar'`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
s: Series{
|
||||||
|
Measurement: "cpu",
|
||||||
|
Field: "value",
|
||||||
|
LabelPairs: []LabelPair{
|
||||||
|
{
|
||||||
|
Name: "foo",
|
||||||
|
Value: "bar",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "baz",
|
||||||
|
Value: "qux",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expected: `select "value" from "cpu" where "foo"='bar' and "baz"='qux'`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
s: Series{
|
||||||
|
Measurement: "cpu",
|
||||||
|
Field: "value",
|
||||||
|
LabelPairs: []LabelPair{
|
||||||
|
{
|
||||||
|
Name: "foo",
|
||||||
|
Value: "b'ar",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
timeFilter: "time >= now()",
|
||||||
|
expected: `select "value" from "cpu" where "foo"='b\'ar' and time >= now()`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
s: Series{
|
||||||
|
Measurement: "cpu",
|
||||||
|
Field: "value",
|
||||||
|
LabelPairs: []LabelPair{
|
||||||
|
{
|
||||||
|
Name: "name",
|
||||||
|
Value: `dev-mapper-centos\x2dswap.swap`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "state",
|
||||||
|
Value: "dev-mapp'er-c'en'tos",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
timeFilter: "time >= now()",
|
||||||
|
expected: `select "value" from "cpu" where "name"='dev-mapper-centos\\x2dswap.swap' and "state"='dev-mapp\'er-c\'en\'tos' and time >= now()`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
s: Series{
|
||||||
|
Measurement: "cpu",
|
||||||
|
Field: "value",
|
||||||
|
},
|
||||||
|
timeFilter: "time >= now()",
|
||||||
|
expected: `select "value" from "cpu" where time >= now()`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
s: Series{
|
||||||
|
Measurement: "cpu",
|
||||||
|
Field: "value",
|
||||||
|
},
|
||||||
|
expected: `select "value" from "cpu"`,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range testCases {
|
||||||
|
query := tc.s.fetchQuery(tc.timeFilter)
|
||||||
|
if query != tc.expected {
|
||||||
|
t.Fatalf("got: \n%s;\nexpected: \n%s", query, tc.expected)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTimeFilter(t *testing.T) {
|
||||||
|
testCases := []struct {
|
||||||
|
start string
|
||||||
|
end string
|
||||||
|
expected string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
start: "2020-01-01T20:07:00Z",
|
||||||
|
end: "2020-01-01T21:07:00Z",
|
||||||
|
expected: "time >= '2020-01-01T20:07:00Z' and time <= '2020-01-01T21:07:00Z'",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
expected: "",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
start: "2020-01-01T20:07:00Z",
|
||||||
|
expected: "time >= '2020-01-01T20:07:00Z'",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
end: "2020-01-01T21:07:00Z",
|
||||||
|
expected: "time <= '2020-01-01T21:07:00Z'",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tc := range testCases {
|
||||||
|
f := timeFilter(tc.start, tc.end)
|
||||||
|
if f != tc.expected {
|
||||||
|
t.Fatalf("got: \n%q;\nexpected: \n%q", f, tc.expected)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
191
app/vmctl/influx/parser.go
Normal file
191
app/vmctl/influx/parser.go
Normal file
|
@ -0,0 +1,191 @@
|
||||||
|
package influx
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
influx "github.com/influxdata/influxdb/client/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
type queryValues struct {
|
||||||
|
name string
|
||||||
|
values map[string][]interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseResult(r influx.Result) ([]queryValues, error) {
|
||||||
|
if len(r.Err) > 0 {
|
||||||
|
return nil, fmt.Errorf("result error: %s", r.Err)
|
||||||
|
}
|
||||||
|
qValues := make([]queryValues, len(r.Series))
|
||||||
|
for i, row := range r.Series {
|
||||||
|
values := make(map[string][]interface{}, len(row.Values))
|
||||||
|
for _, value := range row.Values {
|
||||||
|
for idx, v := range value {
|
||||||
|
key := row.Columns[idx]
|
||||||
|
values[key] = append(values[key], v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
qValues[i] = queryValues{
|
||||||
|
name: row.Name,
|
||||||
|
values: values,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return qValues, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func toFloat64(v interface{}) (float64, error) {
|
||||||
|
switch i := v.(type) {
|
||||||
|
case json.Number:
|
||||||
|
return i.Float64()
|
||||||
|
case float64:
|
||||||
|
return i, nil
|
||||||
|
case float32:
|
||||||
|
return float64(i), nil
|
||||||
|
case int64:
|
||||||
|
return float64(i), nil
|
||||||
|
case int32:
|
||||||
|
return float64(i), nil
|
||||||
|
case int:
|
||||||
|
return float64(i), nil
|
||||||
|
case uint64:
|
||||||
|
return float64(i), nil
|
||||||
|
case uint32:
|
||||||
|
return float64(i), nil
|
||||||
|
case uint:
|
||||||
|
return float64(i), nil
|
||||||
|
case string:
|
||||||
|
return strconv.ParseFloat(i, 64)
|
||||||
|
default:
|
||||||
|
return 0, fmt.Errorf("unexpected value type %v", i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseDate(dateStr string) (int64, error) {
|
||||||
|
startTime, err := time.Parse(time.RFC3339, dateStr)
|
||||||
|
if err != nil {
|
||||||
|
return 0, fmt.Errorf("cannot parse %q: %s", dateStr, err)
|
||||||
|
}
|
||||||
|
return startTime.UnixNano() / 1e6, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func stringify(q influx.Query) string {
|
||||||
|
return fmt.Sprintf("command: %q; database: %q; retention: %q",
|
||||||
|
q.Command, q.Database, q.RetentionPolicy)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Series) unmarshal(v string) error {
|
||||||
|
noEscapeChars := strings.IndexByte(v, '\\') < 0
|
||||||
|
n := nextUnescapedChar(v, ',', noEscapeChars)
|
||||||
|
if n < 0 {
|
||||||
|
s.Measurement = unescapeTagValue(v, noEscapeChars)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
s.Measurement = unescapeTagValue(v[:n], noEscapeChars)
|
||||||
|
var err error
|
||||||
|
s.LabelPairs, err = unmarshalTags(v[n+1:], noEscapeChars)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to unmarhsal tags: %s", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func unmarshalTags(s string, noEscapeChars bool) ([]LabelPair, error) {
|
||||||
|
var result []LabelPair
|
||||||
|
for {
|
||||||
|
lp := LabelPair{}
|
||||||
|
n := nextUnescapedChar(s, ',', noEscapeChars)
|
||||||
|
if n < 0 {
|
||||||
|
if err := lp.unmarshal(s, noEscapeChars); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if len(lp.Name) == 0 || len(lp.Value) == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
result = append(result, lp)
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
if err := lp.unmarshal(s[:n], noEscapeChars); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
s = s[n+1:]
|
||||||
|
if len(lp.Name) == 0 || len(lp.Value) == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
result = append(result, lp)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (lp *LabelPair) unmarshal(s string, noEscapeChars bool) error {
|
||||||
|
n := nextUnescapedChar(s, '=', noEscapeChars)
|
||||||
|
if n < 0 {
|
||||||
|
return fmt.Errorf("missing tag value for %q", s)
|
||||||
|
}
|
||||||
|
lp.Name = unescapeTagValue(s[:n], noEscapeChars)
|
||||||
|
lp.Value = unescapeTagValue(s[n+1:], noEscapeChars)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func unescapeTagValue(s string, noEscapeChars bool) string {
|
||||||
|
if noEscapeChars {
|
||||||
|
// Fast path - no escape chars.
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
n := strings.IndexByte(s, '\\')
|
||||||
|
if n < 0 {
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Slow path. Remove escape chars.
|
||||||
|
dst := make([]byte, 0, len(s))
|
||||||
|
for {
|
||||||
|
dst = append(dst, s[:n]...)
|
||||||
|
s = s[n+1:]
|
||||||
|
if len(s) == 0 {
|
||||||
|
return string(append(dst, '\\'))
|
||||||
|
}
|
||||||
|
ch := s[0]
|
||||||
|
if ch != ' ' && ch != ',' && ch != '=' && ch != '\\' {
|
||||||
|
dst = append(dst, '\\')
|
||||||
|
}
|
||||||
|
dst = append(dst, ch)
|
||||||
|
s = s[1:]
|
||||||
|
n = strings.IndexByte(s, '\\')
|
||||||
|
if n < 0 {
|
||||||
|
return string(append(dst, s...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func nextUnescapedChar(s string, ch byte, noEscapeChars bool) int {
|
||||||
|
if noEscapeChars {
|
||||||
|
// Fast path: just search for ch in s, since s has no escape chars.
|
||||||
|
return strings.IndexByte(s, ch)
|
||||||
|
}
|
||||||
|
|
||||||
|
sOrig := s
|
||||||
|
again:
|
||||||
|
n := strings.IndexByte(s, ch)
|
||||||
|
if n < 0 {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
if n == 0 {
|
||||||
|
return len(sOrig) - len(s) + n
|
||||||
|
}
|
||||||
|
if s[n-1] != '\\' {
|
||||||
|
return len(sOrig) - len(s) + n
|
||||||
|
}
|
||||||
|
nOrig := n
|
||||||
|
slashes := 0
|
||||||
|
for n > 0 && s[n-1] == '\\' {
|
||||||
|
slashes++
|
||||||
|
n--
|
||||||
|
}
|
||||||
|
if slashes&1 == 0 {
|
||||||
|
return len(sOrig) - len(s) + nOrig
|
||||||
|
}
|
||||||
|
s = s[nOrig+1:]
|
||||||
|
goto again
|
||||||
|
}
|
60
app/vmctl/influx/parser_test.go
Normal file
60
app/vmctl/influx/parser_test.go
Normal file
|
@ -0,0 +1,60 @@
|
||||||
|
package influx
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestSeries_Unmarshal(t *testing.T) {
|
||||||
|
tag := func(name, value string) LabelPair {
|
||||||
|
return LabelPair{
|
||||||
|
Name: name,
|
||||||
|
Value: value,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
series := func(measurement string, lp ...LabelPair) Series {
|
||||||
|
return Series{
|
||||||
|
Measurement: measurement,
|
||||||
|
LabelPairs: lp,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
testCases := []struct {
|
||||||
|
got string
|
||||||
|
want Series
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
got: "cpu",
|
||||||
|
want: series("cpu"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
got: "cpu,host=localhost",
|
||||||
|
want: series("cpu", tag("host", "localhost")),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
got: "cpu,host=localhost,instance=instance",
|
||||||
|
want: series("cpu", tag("host", "localhost"), tag("instance", "instance")),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
got: `fo\,bar\=baz,x\=\b=\\a\,\=\q\ `,
|
||||||
|
want: series("fo,bar=baz", tag(`x=\b`, `\a,=\q `)),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
got: "cpu,host=192.168.0.1,instance=fe80::fdc8:5e36:c2c6:baac%utun1",
|
||||||
|
want: series("cpu", tag("host", "192.168.0.1"), tag("instance", "fe80::fdc8:5e36:c2c6:baac%utun1")),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
got: `cpu,db=db1,host=localhost,server=host\=localhost\ user\=user\ `,
|
||||||
|
want: series("cpu", tag("db", "db1"),
|
||||||
|
tag("host", "localhost"), tag("server", "host=localhost user=user ")),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tc := range testCases {
|
||||||
|
s := Series{}
|
||||||
|
if err := s.unmarshal(tc.got); err != nil {
|
||||||
|
t.Fatalf("%q: unmarshal err: %s", tc.got, err)
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(s, tc.want) {
|
||||||
|
t.Fatalf("%q: expected\n%#v\nto be equal\n%#v", tc.got, s, tc.want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
158
app/vmctl/main.go
Normal file
158
app/vmctl/main.go
Normal file
|
@ -0,0 +1,158 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"os/signal"
|
||||||
|
"strings"
|
||||||
|
"syscall"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/urfave/cli/v2"
|
||||||
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/influx"
|
||||||
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/prometheus"
|
||||||
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/vm"
|
||||||
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/buildinfo"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
start := time.Now()
|
||||||
|
app := &cli.App{
|
||||||
|
Name: "vmctl",
|
||||||
|
Usage: "Victoria metrics command-line tool",
|
||||||
|
Version: buildinfo.Version,
|
||||||
|
Commands: []*cli.Command{
|
||||||
|
{
|
||||||
|
Name: "influx",
|
||||||
|
Usage: "Migrate timeseries from InfluxDB",
|
||||||
|
Flags: mergeFlags(globalFlags, influxFlags, vmFlags),
|
||||||
|
Action: func(c *cli.Context) error {
|
||||||
|
fmt.Println("InfluxDB import mode")
|
||||||
|
|
||||||
|
iCfg := influx.Config{
|
||||||
|
Addr: c.String(influxAddr),
|
||||||
|
Username: c.String(influxUser),
|
||||||
|
Password: c.String(influxPassword),
|
||||||
|
Database: c.String(influxDB),
|
||||||
|
Retention: c.String(influxRetention),
|
||||||
|
Filter: influx.Filter{
|
||||||
|
Series: c.String(influxFilterSeries),
|
||||||
|
TimeStart: c.String(influxFilterTimeStart),
|
||||||
|
TimeEnd: c.String(influxFilterTimeEnd),
|
||||||
|
},
|
||||||
|
ChunkSize: c.Int(influxChunkSize),
|
||||||
|
}
|
||||||
|
influxClient, err := influx.NewClient(iCfg)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to create influx client: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
vmCfg := initConfigVM(c)
|
||||||
|
importer, err := vm.NewImporter(vmCfg)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to create VM importer: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
processor := newInfluxProcessor(influxClient, importer,
|
||||||
|
c.Int(influxConcurrency), c.String(influxMeasurementFieldSeparator))
|
||||||
|
return processor.run(c.Bool(globalSilent))
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "prometheus",
|
||||||
|
Usage: "Migrate timeseries from Prometheus",
|
||||||
|
Flags: mergeFlags(globalFlags, promFlags, vmFlags),
|
||||||
|
Action: func(c *cli.Context) error {
|
||||||
|
fmt.Println("Prometheus import mode")
|
||||||
|
|
||||||
|
vmCfg := initConfigVM(c)
|
||||||
|
importer, err := vm.NewImporter(vmCfg)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to create VM importer: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
promCfg := prometheus.Config{
|
||||||
|
Snapshot: c.String(promSnapshot),
|
||||||
|
Filter: prometheus.Filter{
|
||||||
|
TimeMin: c.String(promFilterTimeStart),
|
||||||
|
TimeMax: c.String(promFilterTimeEnd),
|
||||||
|
Label: c.String(promFilterLabel),
|
||||||
|
LabelValue: c.String(promFilterLabelValue),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
cl, err := prometheus.NewClient(promCfg)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to create prometheus client: %s", err)
|
||||||
|
}
|
||||||
|
pp := prometheusProcessor{
|
||||||
|
cl: cl,
|
||||||
|
im: importer,
|
||||||
|
cc: c.Int(promConcurrency),
|
||||||
|
}
|
||||||
|
return pp.run(c.Bool(globalSilent))
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "vm-native",
|
||||||
|
Usage: "Migrate time series between VictoriaMetrics installations via native binary format",
|
||||||
|
Flags: vmNativeFlags,
|
||||||
|
Action: func(c *cli.Context) error {
|
||||||
|
fmt.Println("VictoriaMetrics Native import mode")
|
||||||
|
|
||||||
|
if c.String(vmNativeFilterMatch) == "" {
|
||||||
|
return fmt.Errorf("flag %q can't be empty", vmNativeFilterMatch)
|
||||||
|
}
|
||||||
|
|
||||||
|
p := vmNativeProcessor{
|
||||||
|
filter: filter{
|
||||||
|
match: c.String(vmNativeFilterMatch),
|
||||||
|
timeStart: c.String(vmNativeFilterTimeStart),
|
||||||
|
timeEnd: c.String(vmNativeFilterTimeEnd),
|
||||||
|
},
|
||||||
|
src: &vmNativeClient{
|
||||||
|
addr: strings.Trim(c.String(vmNativeSrcAddr), "/"),
|
||||||
|
user: c.String(vmNativeSrcUser),
|
||||||
|
password: c.String(vmNativeSrcPassword),
|
||||||
|
},
|
||||||
|
dst: &vmNativeClient{
|
||||||
|
addr: strings.Trim(c.String(vmNativeDstAddr), "/"),
|
||||||
|
user: c.String(vmNativeDstUser),
|
||||||
|
password: c.String(vmNativeDstPassword),
|
||||||
|
extraLabels: c.StringSlice(vmExtraLabel),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
return p.run()
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
c := make(chan os.Signal, 1)
|
||||||
|
signal.Notify(c, syscall.SIGINT, syscall.SIGTERM)
|
||||||
|
go func() {
|
||||||
|
<-c
|
||||||
|
fmt.Println("\r- Execution cancelled")
|
||||||
|
os.Exit(0)
|
||||||
|
}()
|
||||||
|
|
||||||
|
err := app.Run(os.Args)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
log.Printf("Total time: %v", time.Since(start))
|
||||||
|
}
|
||||||
|
|
||||||
|
func initConfigVM(c *cli.Context) vm.Config {
|
||||||
|
return vm.Config{
|
||||||
|
Addr: c.String(vmAddr),
|
||||||
|
User: c.String(vmUser),
|
||||||
|
Password: c.String(vmPassword),
|
||||||
|
Concurrency: uint8(c.Int(vmConcurrency)),
|
||||||
|
Compress: c.Bool(vmCompress),
|
||||||
|
AccountID: c.String(vmAccountID),
|
||||||
|
BatchSize: c.Int(vmBatchSize),
|
||||||
|
SignificantFigures: c.Int(vmSignificantFigures),
|
||||||
|
ExtraLabels: c.StringSlice(vmExtraLabel),
|
||||||
|
}
|
||||||
|
}
|
131
app/vmctl/prometheus.go
Normal file
131
app/vmctl/prometheus.go
Normal file
|
@ -0,0 +1,131 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/cheggaaa/pb/v3"
|
||||||
|
"github.com/prometheus/prometheus/tsdb"
|
||||||
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/prometheus"
|
||||||
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/vm"
|
||||||
|
)
|
||||||
|
|
||||||
|
type prometheusProcessor struct {
|
||||||
|
// prometheus client fetches and reads
|
||||||
|
// snapshot blocks
|
||||||
|
cl *prometheus.Client
|
||||||
|
// importer performs import requests
|
||||||
|
// for timeseries data returned from
|
||||||
|
// snapshot blocks
|
||||||
|
im *vm.Importer
|
||||||
|
// cc stands for concurrency
|
||||||
|
// and defines number of concurrently
|
||||||
|
// running snapshot block readers
|
||||||
|
cc int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pp *prometheusProcessor) run(silent bool) error {
|
||||||
|
blocks, err := pp.cl.Explore()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("explore failed: %s", err)
|
||||||
|
}
|
||||||
|
if len(blocks) < 1 {
|
||||||
|
return fmt.Errorf("found no blocks to import")
|
||||||
|
}
|
||||||
|
question := fmt.Sprintf("Found %d blocks to import. Continue?", len(blocks))
|
||||||
|
if !silent && !prompt(question) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
bar := pb.StartNew(len(blocks))
|
||||||
|
blockReadersCh := make(chan tsdb.BlockReader)
|
||||||
|
errCh := make(chan error, pp.cc)
|
||||||
|
pp.im.ResetStats()
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
wg.Add(pp.cc)
|
||||||
|
for i := 0; i < pp.cc; i++ {
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
for br := range blockReadersCh {
|
||||||
|
if err := pp.do(br); err != nil {
|
||||||
|
errCh <- fmt.Errorf("read failed for block %q: %s", br.Meta().ULID, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
bar.Increment()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
// any error breaks the import
|
||||||
|
for _, br := range blocks {
|
||||||
|
select {
|
||||||
|
case promErr := <-errCh:
|
||||||
|
close(blockReadersCh)
|
||||||
|
return fmt.Errorf("prometheus error: %s", promErr)
|
||||||
|
case vmErr := <-pp.im.Errors():
|
||||||
|
close(blockReadersCh)
|
||||||
|
return fmt.Errorf("Import process failed: \n%s", wrapErr(vmErr))
|
||||||
|
case blockReadersCh <- br:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
close(blockReadersCh)
|
||||||
|
wg.Wait()
|
||||||
|
// wait for all buffers to flush
|
||||||
|
pp.im.Close()
|
||||||
|
// drain import errors channel
|
||||||
|
for vmErr := range pp.im.Errors() {
|
||||||
|
return fmt.Errorf("Import process failed: \n%s", wrapErr(vmErr))
|
||||||
|
}
|
||||||
|
bar.Finish()
|
||||||
|
log.Println("Import finished!")
|
||||||
|
log.Print(pp.im.Stats())
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pp *prometheusProcessor) do(b tsdb.BlockReader) error {
|
||||||
|
ss, err := pp.cl.Read(b)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to read block: %s", err)
|
||||||
|
}
|
||||||
|
for ss.Next() {
|
||||||
|
var name string
|
||||||
|
var labels []vm.LabelPair
|
||||||
|
series := ss.At()
|
||||||
|
|
||||||
|
for _, label := range series.Labels() {
|
||||||
|
if label.Name == "__name__" {
|
||||||
|
name = label.Value
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
labels = append(labels, vm.LabelPair{
|
||||||
|
Name: label.Name,
|
||||||
|
Value: label.Value,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
if name == "" {
|
||||||
|
return fmt.Errorf("failed to find `__name__` label in labelset for block %v", b.Meta().ULID)
|
||||||
|
}
|
||||||
|
|
||||||
|
var timestamps []int64
|
||||||
|
var values []float64
|
||||||
|
it := series.Iterator()
|
||||||
|
for it.Next() {
|
||||||
|
t, v := it.At()
|
||||||
|
timestamps = append(timestamps, t)
|
||||||
|
values = append(values, v)
|
||||||
|
}
|
||||||
|
if err := it.Err(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
pp.im.Input() <- &vm.TimeSeries{
|
||||||
|
Name: name,
|
||||||
|
LabelPairs: labels,
|
||||||
|
Timestamps: timestamps,
|
||||||
|
Values: values,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ss.Err()
|
||||||
|
}
|
147
app/vmctl/prometheus/prometheus.go
Normal file
147
app/vmctl/prometheus/prometheus.go
Normal file
|
@ -0,0 +1,147 @@
|
||||||
|
package prometheus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/prometheus/prometheus/pkg/labels"
|
||||||
|
"github.com/prometheus/prometheus/storage"
|
||||||
|
"github.com/prometheus/prometheus/tsdb"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Config contains a list of params needed
|
||||||
|
// for reading Prometheus snapshots
|
||||||
|
type Config struct {
|
||||||
|
// Path to snapshot directory
|
||||||
|
Snapshot string
|
||||||
|
|
||||||
|
Filter Filter
|
||||||
|
}
|
||||||
|
|
||||||
|
// Filter contains configuration for filtering
|
||||||
|
// the timeseries
|
||||||
|
type Filter struct {
|
||||||
|
TimeMin string
|
||||||
|
TimeMax string
|
||||||
|
Label string
|
||||||
|
LabelValue string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clinet is a wrapper over Prometheus tsdb.DBReader
|
||||||
|
type Client struct {
|
||||||
|
*tsdb.DBReadOnly
|
||||||
|
filter filter
|
||||||
|
}
|
||||||
|
|
||||||
|
type filter struct {
|
||||||
|
min, max int64
|
||||||
|
label string
|
||||||
|
labelValue string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f filter) inRange(min, max int64) bool {
|
||||||
|
fmin, fmax := f.min, f.max
|
||||||
|
if min == 0 {
|
||||||
|
fmin = min
|
||||||
|
}
|
||||||
|
if fmax == 0 {
|
||||||
|
fmax = max
|
||||||
|
}
|
||||||
|
return min <= fmax && fmin <= max
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewClient creates and validates new Client
|
||||||
|
// with given Config
|
||||||
|
func NewClient(cfg Config) (*Client, error) {
|
||||||
|
db, err := tsdb.OpenDBReadOnly(cfg.Snapshot, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to open snapshot %q: %s", cfg.Snapshot, err)
|
||||||
|
}
|
||||||
|
c := &Client{DBReadOnly: db}
|
||||||
|
min, max, err := parseTime(cfg.Filter.TimeMin, cfg.Filter.TimeMax)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to parse time in filter: %s", err)
|
||||||
|
}
|
||||||
|
c.filter = filter{
|
||||||
|
min: min,
|
||||||
|
max: max,
|
||||||
|
label: cfg.Filter.Label,
|
||||||
|
labelValue: cfg.Filter.LabelValue,
|
||||||
|
}
|
||||||
|
return c, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Explore fetches all available blocks from a snapshot
|
||||||
|
// and collects the Meta() data from each block.
|
||||||
|
// Explore does initial filtering by time-range
|
||||||
|
// for snapshot blocks but does not take into account
|
||||||
|
// label filters.
|
||||||
|
func (c *Client) Explore() ([]tsdb.BlockReader, error) {
|
||||||
|
blocks, err := c.Blocks()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to fetch blocks: %s", err)
|
||||||
|
}
|
||||||
|
s := &Stats{
|
||||||
|
Filtered: c.filter.min != 0 || c.filter.max != 0 || c.filter.label != "",
|
||||||
|
Blocks: len(blocks),
|
||||||
|
}
|
||||||
|
var blocksToImport []tsdb.BlockReader
|
||||||
|
for _, block := range blocks {
|
||||||
|
meta := block.Meta()
|
||||||
|
if !c.filter.inRange(meta.MinTime, meta.MaxTime) {
|
||||||
|
s.SkippedBlocks++
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if s.MinTime == 0 || meta.MinTime < s.MinTime {
|
||||||
|
s.MinTime = meta.MinTime
|
||||||
|
}
|
||||||
|
if s.MaxTime == 0 || meta.MaxTime > s.MaxTime {
|
||||||
|
s.MaxTime = meta.MaxTime
|
||||||
|
}
|
||||||
|
s.Samples += meta.Stats.NumSamples
|
||||||
|
s.Series += meta.Stats.NumSeries
|
||||||
|
blocksToImport = append(blocksToImport, block)
|
||||||
|
}
|
||||||
|
fmt.Println(s)
|
||||||
|
return blocksToImport, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read reads the given BlockReader according to configured
|
||||||
|
// time and label filters.
|
||||||
|
func (c *Client) Read(block tsdb.BlockReader) (storage.SeriesSet, error) {
|
||||||
|
minTime, maxTime := block.Meta().MinTime, block.Meta().MaxTime
|
||||||
|
if c.filter.min != 0 {
|
||||||
|
minTime = c.filter.min
|
||||||
|
}
|
||||||
|
if c.filter.max != 0 {
|
||||||
|
maxTime = c.filter.max
|
||||||
|
}
|
||||||
|
q, err := tsdb.NewBlockQuerier(block, minTime, maxTime)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
ss := q.Select(false, nil, labels.MustNewMatcher(labels.MatchRegexp, c.filter.label, c.filter.labelValue))
|
||||||
|
return ss, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseTime(start, end string) (int64, int64, error) {
|
||||||
|
var s, e int64
|
||||||
|
if start == "" && end == "" {
|
||||||
|
return 0, 0, nil
|
||||||
|
}
|
||||||
|
if start != "" {
|
||||||
|
v, err := time.Parse(time.RFC3339, start)
|
||||||
|
if err != nil {
|
||||||
|
return 0, 0, fmt.Errorf("failed to parse %q: %s", start, err)
|
||||||
|
}
|
||||||
|
s = v.UnixNano() / int64(time.Millisecond)
|
||||||
|
}
|
||||||
|
if end != "" {
|
||||||
|
v, err := time.Parse(time.RFC3339, end)
|
||||||
|
if err != nil {
|
||||||
|
return 0, 0, fmt.Errorf("failed to parse %q: %s", end, err)
|
||||||
|
}
|
||||||
|
e = v.UnixNano() / int64(time.Millisecond)
|
||||||
|
}
|
||||||
|
return s, e, nil
|
||||||
|
}
|
34
app/vmctl/prometheus/prometheus_test.go
Normal file
34
app/vmctl/prometheus/prometheus_test.go
Normal file
|
@ -0,0 +1,34 @@
|
||||||
|
package prometheus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestInRange(t *testing.T) {
|
||||||
|
testCases := []struct {
|
||||||
|
filterMin, filterMax int64
|
||||||
|
blockMin, blockMax int64
|
||||||
|
expected bool
|
||||||
|
}{
|
||||||
|
{0, 0, 1, 2, true},
|
||||||
|
{0, 3, 1, 2, true},
|
||||||
|
{0, 3, 4, 5, false},
|
||||||
|
{3, 0, 1, 2, false},
|
||||||
|
{3, 0, 2, 4, true},
|
||||||
|
{3, 10, 1, 2, false},
|
||||||
|
{3, 10, 1, 4, true},
|
||||||
|
{3, 10, 5, 9, true},
|
||||||
|
{3, 10, 9, 12, true},
|
||||||
|
{3, 10, 12, 15, false},
|
||||||
|
}
|
||||||
|
for _, tc := range testCases {
|
||||||
|
f := filter{
|
||||||
|
min: tc.filterMin,
|
||||||
|
max: tc.filterMax,
|
||||||
|
}
|
||||||
|
got := f.inRange(tc.blockMin, tc.blockMax)
|
||||||
|
if got != tc.expected {
|
||||||
|
t.Fatalf("got %v; expected %v: %v", got, tc.expected, tc)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
36
app/vmctl/prometheus/stats.go
Normal file
36
app/vmctl/prometheus/stats.go
Normal file
|
@ -0,0 +1,36 @@
|
||||||
|
package prometheus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Stats struct {
|
||||||
|
Filtered bool
|
||||||
|
MinTime int64
|
||||||
|
MaxTime int64
|
||||||
|
Samples uint64
|
||||||
|
Series uint64
|
||||||
|
Blocks int
|
||||||
|
SkippedBlocks int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s Stats) String() string {
|
||||||
|
str := fmt.Sprintf("Prometheus snapshot stats:\n"+
|
||||||
|
" blocks found: %d;\n"+
|
||||||
|
" blocks skipped by time filter: %d;\n"+
|
||||||
|
" min time: %d (%v);\n"+
|
||||||
|
" max time: %d (%v);\n"+
|
||||||
|
" samples: %d;\n"+
|
||||||
|
" series: %d.",
|
||||||
|
s.Blocks, s.SkippedBlocks,
|
||||||
|
s.MinTime, time.Unix(s.MinTime/1e3, 0).Format(time.RFC3339),
|
||||||
|
s.MaxTime, time.Unix(s.MaxTime/1e3, 0).Format(time.RFC3339),
|
||||||
|
s.Samples, s.Series)
|
||||||
|
|
||||||
|
if s.Filtered {
|
||||||
|
str += "\n* Stats numbers are based on blocks meta info and don't account for applied filters."
|
||||||
|
}
|
||||||
|
|
||||||
|
return str
|
||||||
|
}
|
33
app/vmctl/utils.go
Normal file
33
app/vmctl/utils.go
Normal file
|
@ -0,0 +1,33 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/vm"
|
||||||
|
)
|
||||||
|
|
||||||
|
func prompt(question string) bool {
|
||||||
|
reader := bufio.NewReader(os.Stdin)
|
||||||
|
fmt.Print(question, " [Y/n] ")
|
||||||
|
answer, err := reader.ReadString('\n')
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
answer = strings.TrimSpace(strings.ToLower(answer))
|
||||||
|
if answer == "" || answer == "yes" || answer == "y" {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func wrapErr(vmErr *vm.ImportError) error {
|
||||||
|
var errTS string
|
||||||
|
for _, ts := range vmErr.Batch {
|
||||||
|
errTS += fmt.Sprintf("%s for timestamps range %d - %d\n",
|
||||||
|
ts.String(), ts.Timestamps[0], ts.Timestamps[len(ts.Timestamps)-1])
|
||||||
|
}
|
||||||
|
return fmt.Errorf("%s with error: %s", errTS, vmErr.Err)
|
||||||
|
}
|
47
app/vmctl/vm/stats.go
Normal file
47
app/vmctl/vm/stats.go
Normal file
|
@ -0,0 +1,47 @@
|
||||||
|
package vm
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type stats struct {
|
||||||
|
sync.Mutex
|
||||||
|
samples uint64
|
||||||
|
bytes uint64
|
||||||
|
requests uint64
|
||||||
|
retries uint64
|
||||||
|
startTime time.Time
|
||||||
|
idleDuration time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stats) String() string {
|
||||||
|
s.Lock()
|
||||||
|
defer s.Unlock()
|
||||||
|
|
||||||
|
totalImportDuration := time.Since(s.startTime)
|
||||||
|
totalImportDurationS := totalImportDuration.Seconds()
|
||||||
|
var samplesPerS float64
|
||||||
|
if s.samples > 0 && totalImportDurationS > 0 {
|
||||||
|
samplesPerS = float64(s.samples) / totalImportDurationS
|
||||||
|
}
|
||||||
|
bytesPerS := byteCountSI(0)
|
||||||
|
if s.bytes > 0 && totalImportDurationS > 0 {
|
||||||
|
bytesPerS = byteCountSI(int64(float64(s.bytes) / totalImportDurationS))
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Sprintf("VictoriaMetrics importer stats:\n"+
|
||||||
|
" idle duration: %v;\n"+
|
||||||
|
" time spent while importing: %v;\n"+
|
||||||
|
" total samples: %d;\n"+
|
||||||
|
" samples/s: %.2f;\n"+
|
||||||
|
" total bytes: %s;\n"+
|
||||||
|
" bytes/s: %s;\n"+
|
||||||
|
" import requests: %d;\n"+
|
||||||
|
" import requests retries: %d;",
|
||||||
|
s.idleDuration, totalImportDuration,
|
||||||
|
s.samples, samplesPerS,
|
||||||
|
byteCountSI(int64(s.bytes)), bytesPerS,
|
||||||
|
s.requests, s.retries)
|
||||||
|
}
|
79
app/vmctl/vm/timeseries.go
Normal file
79
app/vmctl/vm/timeseries.go
Normal file
|
@ -0,0 +1,79 @@
|
||||||
|
package vm
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
type TimeSeries struct {
|
||||||
|
Name string
|
||||||
|
LabelPairs []LabelPair
|
||||||
|
Timestamps []int64
|
||||||
|
Values []float64
|
||||||
|
}
|
||||||
|
|
||||||
|
type LabelPair struct {
|
||||||
|
Name string
|
||||||
|
Value string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ts TimeSeries) String() string {
|
||||||
|
s := ts.Name
|
||||||
|
if len(ts.LabelPairs) < 1 {
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
var labels string
|
||||||
|
for i, lp := range ts.LabelPairs {
|
||||||
|
labels += fmt.Sprintf("%s=%q", lp.Name, lp.Value)
|
||||||
|
if i < len(ts.LabelPairs)-1 {
|
||||||
|
labels += ","
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%s{%s}", s, labels)
|
||||||
|
}
|
||||||
|
|
||||||
|
// cWriter used to avoid error checking
|
||||||
|
// while doing Write calls.
|
||||||
|
// cWriter caches the first error if any
|
||||||
|
// and discards all sequential write calls
|
||||||
|
type cWriter struct {
|
||||||
|
w io.Writer
|
||||||
|
n int
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cw *cWriter) printf(format string, args ...interface{}) {
|
||||||
|
if cw.err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
n, err := fmt.Fprintf(cw.w, format, args...)
|
||||||
|
cw.n += n
|
||||||
|
cw.err = err
|
||||||
|
}
|
||||||
|
|
||||||
|
//"{"metric":{"__name__":"cpu_usage_guest","arch":"x64","hostname":"host_19",},"timestamps":[1567296000000,1567296010000],"values":[1567296000000,66]}
|
||||||
|
func (ts *TimeSeries) write(w io.Writer) (int, error) {
|
||||||
|
pointsCount := len(ts.Timestamps)
|
||||||
|
if pointsCount == 0 {
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
cw := &cWriter{w: w}
|
||||||
|
cw.printf(`{"metric":{"__name__":%q`, ts.Name)
|
||||||
|
if len(ts.LabelPairs) > 0 {
|
||||||
|
for _, lp := range ts.LabelPairs {
|
||||||
|
cw.printf(",%q:%q", lp.Name, lp.Value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
cw.printf(`},"timestamps":[`)
|
||||||
|
for i := 0; i < pointsCount-1; i++ {
|
||||||
|
cw.printf(`%d,`, ts.Timestamps[i])
|
||||||
|
}
|
||||||
|
cw.printf(`%d],"values":[`, ts.Timestamps[pointsCount-1])
|
||||||
|
for i := 0; i < pointsCount-1; i++ {
|
||||||
|
cw.printf(`%v,`, ts.Values[i])
|
||||||
|
}
|
||||||
|
cw.printf("%v]}\n", ts.Values[pointsCount-1])
|
||||||
|
return cw.n, cw.err
|
||||||
|
}
|
89
app/vmctl/vm/timeseries_test.go
Normal file
89
app/vmctl/vm/timeseries_test.go
Normal file
|
@ -0,0 +1,89 @@
|
||||||
|
package vm
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"math"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestTimeSeries_Write(t *testing.T) {
|
||||||
|
var testCases = []struct {
|
||||||
|
name string
|
||||||
|
ts *TimeSeries
|
||||||
|
exp string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "one datapoint",
|
||||||
|
ts: &TimeSeries{
|
||||||
|
Name: "foo",
|
||||||
|
LabelPairs: []LabelPair{
|
||||||
|
{
|
||||||
|
Name: "key",
|
||||||
|
Value: "val",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Timestamps: []int64{1577877162200},
|
||||||
|
Values: []float64{1},
|
||||||
|
},
|
||||||
|
exp: `{"metric":{"__name__":"foo","key":"val"},"timestamps":[1577877162200],"values":[1]}`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "multiple samples",
|
||||||
|
ts: &TimeSeries{
|
||||||
|
Name: "foo",
|
||||||
|
LabelPairs: []LabelPair{
|
||||||
|
{
|
||||||
|
Name: "key",
|
||||||
|
Value: "val",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Timestamps: []int64{1577877162200, 15778771622400, 15778771622600},
|
||||||
|
Values: []float64{1, 1.6263, 32.123},
|
||||||
|
},
|
||||||
|
exp: `{"metric":{"__name__":"foo","key":"val"},"timestamps":[1577877162200,15778771622400,15778771622600],"values":[1,1.6263,32.123]}`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "no samples",
|
||||||
|
ts: &TimeSeries{
|
||||||
|
Name: "foo",
|
||||||
|
LabelPairs: []LabelPair{
|
||||||
|
{
|
||||||
|
Name: "key",
|
||||||
|
Value: "val",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
exp: ``,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "inf values",
|
||||||
|
ts: &TimeSeries{
|
||||||
|
Name: "foo",
|
||||||
|
LabelPairs: []LabelPair{
|
||||||
|
{
|
||||||
|
Name: "key",
|
||||||
|
Value: "val",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Timestamps: []int64{1577877162200, 1577877162200, 1577877162200},
|
||||||
|
Values: []float64{0, math.Inf(-1), math.Inf(1)},
|
||||||
|
},
|
||||||
|
exp: `{"metric":{"__name__":"foo","key":"val"},"timestamps":[1577877162200,1577877162200,1577877162200],"values":[0,-Inf,+Inf]}`,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range testCases {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
b := &bytes.Buffer{}
|
||||||
|
_, err := tc.ts.write(b)
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
got := strings.TrimSpace(b.String())
|
||||||
|
if got != tc.exp {
|
||||||
|
t.Fatalf("\ngot: %q\nwant: %q", got, tc.exp)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
369
app/vmctl/vm/vm.go
Normal file
369
app/vmctl/vm/vm.go
Normal file
|
@ -0,0 +1,369 @@
|
||||||
|
package vm
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"compress/gzip"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"math"
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/decimal"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Config contains list of params to configure
|
||||||
|
// the Importer
|
||||||
|
type Config struct {
|
||||||
|
// VictoriaMetrics address to perform import requests
|
||||||
|
// --httpListenAddr value for single node version
|
||||||
|
// --httpListenAddr value of VMSelect component for cluster version
|
||||||
|
Addr string
|
||||||
|
// Concurrency defines number of worker
|
||||||
|
// performing the import requests concurrently
|
||||||
|
Concurrency uint8
|
||||||
|
// Whether to apply gzip compression
|
||||||
|
Compress bool
|
||||||
|
// AccountID for cluster version.
|
||||||
|
// Empty value assumes it is a single node version
|
||||||
|
AccountID string
|
||||||
|
// BatchSize defines how many samples
|
||||||
|
// importer collects before sending the import request
|
||||||
|
BatchSize int
|
||||||
|
// User name for basic auth
|
||||||
|
User string
|
||||||
|
// Password for basic auth
|
||||||
|
Password string
|
||||||
|
// SignificantFigures defines the number of significant figures to leave
|
||||||
|
// in metric values before importing.
|
||||||
|
// Zero value saves all the significant decimal places
|
||||||
|
SignificantFigures int
|
||||||
|
// ExtraLabels that will be added to all imported series. Must be in label=value format.
|
||||||
|
ExtraLabels []string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Importer performs insertion of timeseries
|
||||||
|
// via VictoriaMetrics import protocol
|
||||||
|
// see https://github.com/VictoriaMetrics/VictoriaMetrics/tree/master#how-to-import-time-series-data
|
||||||
|
type Importer struct {
|
||||||
|
addr string
|
||||||
|
importPath string
|
||||||
|
compress bool
|
||||||
|
user string
|
||||||
|
password string
|
||||||
|
|
||||||
|
close chan struct{}
|
||||||
|
input chan *TimeSeries
|
||||||
|
errors chan *ImportError
|
||||||
|
|
||||||
|
wg sync.WaitGroup
|
||||||
|
once sync.Once
|
||||||
|
|
||||||
|
s *stats
|
||||||
|
}
|
||||||
|
|
||||||
|
func (im *Importer) ResetStats() {
|
||||||
|
im.s = &stats{
|
||||||
|
startTime: time.Now(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (im *Importer) Stats() string {
|
||||||
|
return im.s.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddExtraLabelsToImportPath - adds extra labels query params to given url path.
|
||||||
|
func AddExtraLabelsToImportPath(path string, extraLabels []string) (string, error) {
|
||||||
|
dst := path
|
||||||
|
separator := "?"
|
||||||
|
for _, extraLabel := range extraLabels {
|
||||||
|
if !strings.Contains(extraLabel, "=") {
|
||||||
|
return path, fmt.Errorf("bad format for extra_label flag, it must be `key=value`, got: %q", extraLabel)
|
||||||
|
}
|
||||||
|
if strings.Contains(dst, "?") {
|
||||||
|
separator = "&"
|
||||||
|
}
|
||||||
|
dst += fmt.Sprintf("%sextra_label=%s", separator, extraLabel)
|
||||||
|
}
|
||||||
|
return dst, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewImporter(cfg Config) (*Importer, error) {
|
||||||
|
if cfg.Concurrency < 1 {
|
||||||
|
return nil, fmt.Errorf("concurrency can't be lower than 1")
|
||||||
|
}
|
||||||
|
|
||||||
|
addr := strings.TrimRight(cfg.Addr, "/")
|
||||||
|
// if single version
|
||||||
|
// see https://github.com/VictoriaMetrics/VictoriaMetrics/tree/master#how-to-import-time-series-data
|
||||||
|
importPath := addr + "/api/v1/import"
|
||||||
|
if cfg.AccountID != "" {
|
||||||
|
// if cluster version
|
||||||
|
// see https://github.com/VictoriaMetrics/VictoriaMetrics/tree/cluster#url-format
|
||||||
|
importPath = fmt.Sprintf("%s/insert/%s/prometheus/api/v1/import", addr, cfg.AccountID)
|
||||||
|
}
|
||||||
|
importPath, err := AddExtraLabelsToImportPath(importPath, cfg.ExtraLabels)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
im := &Importer{
|
||||||
|
addr: addr,
|
||||||
|
importPath: importPath,
|
||||||
|
compress: cfg.Compress,
|
||||||
|
user: cfg.User,
|
||||||
|
password: cfg.Password,
|
||||||
|
close: make(chan struct{}),
|
||||||
|
input: make(chan *TimeSeries, cfg.Concurrency*4),
|
||||||
|
errors: make(chan *ImportError, cfg.Concurrency),
|
||||||
|
}
|
||||||
|
if err := im.Ping(); err != nil {
|
||||||
|
return nil, fmt.Errorf("ping to %q failed: %s", addr, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if cfg.BatchSize < 1 {
|
||||||
|
cfg.BatchSize = 1e5
|
||||||
|
}
|
||||||
|
|
||||||
|
im.wg.Add(int(cfg.Concurrency))
|
||||||
|
for i := 0; i < int(cfg.Concurrency); i++ {
|
||||||
|
go func() {
|
||||||
|
defer im.wg.Done()
|
||||||
|
im.startWorker(cfg.BatchSize, cfg.SignificantFigures)
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
im.ResetStats()
|
||||||
|
return im, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ImportError is type of error generated
|
||||||
|
// in case of unsuccessful import request
|
||||||
|
type ImportError struct {
|
||||||
|
// The batch of timeseries that failed
|
||||||
|
Batch []*TimeSeries
|
||||||
|
// The error that appeared during insert
|
||||||
|
Err error
|
||||||
|
}
|
||||||
|
|
||||||
|
// Errors returns a channel for receiving
|
||||||
|
// import errors if any
|
||||||
|
func (im *Importer) Errors() chan *ImportError { return im.errors }
|
||||||
|
|
||||||
|
// Input returns a channel for sending timeseries
|
||||||
|
// that need to be imported
|
||||||
|
func (im *Importer) Input() chan<- *TimeSeries { return im.input }
|
||||||
|
|
||||||
|
// Close sends signal to all goroutines to exit
|
||||||
|
// and waits until they are finished
|
||||||
|
func (im *Importer) Close() {
|
||||||
|
im.once.Do(func() {
|
||||||
|
close(im.close)
|
||||||
|
im.wg.Wait()
|
||||||
|
close(im.errors)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (im *Importer) startWorker(batchSize, significantFigures int) {
|
||||||
|
var batch []*TimeSeries
|
||||||
|
var dataPoints int
|
||||||
|
var waitForBatch time.Time
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-im.close:
|
||||||
|
if err := im.Import(batch); err != nil {
|
||||||
|
im.errors <- &ImportError{
|
||||||
|
Batch: batch,
|
||||||
|
Err: err,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
case ts := <-im.input:
|
||||||
|
// init waitForBatch when first
|
||||||
|
// value was received
|
||||||
|
if waitForBatch.IsZero() {
|
||||||
|
waitForBatch = time.Now()
|
||||||
|
}
|
||||||
|
|
||||||
|
if significantFigures > 0 {
|
||||||
|
// Round values according to significantFigures
|
||||||
|
for i, v := range ts.Values {
|
||||||
|
ts.Values[i] = decimal.Round(v, significantFigures)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
batch = append(batch, ts)
|
||||||
|
dataPoints += len(ts.Values)
|
||||||
|
if dataPoints < batchSize {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
im.s.Lock()
|
||||||
|
im.s.idleDuration += time.Since(waitForBatch)
|
||||||
|
im.s.Unlock()
|
||||||
|
|
||||||
|
if err := im.flush(batch); err != nil {
|
||||||
|
im.errors <- &ImportError{
|
||||||
|
Batch: batch,
|
||||||
|
Err: err,
|
||||||
|
}
|
||||||
|
// make a new batch, since old one was referenced as err
|
||||||
|
batch = make([]*TimeSeries, len(batch))
|
||||||
|
}
|
||||||
|
batch = batch[:0]
|
||||||
|
dataPoints = 0
|
||||||
|
waitForBatch = time.Now()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
// TODO: make configurable
|
||||||
|
backoffRetries = 5
|
||||||
|
backoffFactor = 1.7
|
||||||
|
backoffMinDuration = time.Second
|
||||||
|
)
|
||||||
|
|
||||||
|
func (im *Importer) flush(b []*TimeSeries) error {
|
||||||
|
var err error
|
||||||
|
for i := 0; i < backoffRetries; i++ {
|
||||||
|
err = im.Import(b)
|
||||||
|
if err == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if errors.Is(err, ErrBadRequest) {
|
||||||
|
return err // fail fast if not recoverable
|
||||||
|
}
|
||||||
|
im.s.Lock()
|
||||||
|
im.s.retries++
|
||||||
|
im.s.Unlock()
|
||||||
|
backoff := float64(backoffMinDuration) * math.Pow(backoffFactor, float64(i))
|
||||||
|
time.Sleep(time.Duration(backoff))
|
||||||
|
}
|
||||||
|
return fmt.Errorf("import failed with %d retries: %s", backoffRetries, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (im *Importer) Ping() error {
|
||||||
|
url := fmt.Sprintf("%s/health", im.addr)
|
||||||
|
req, err := http.NewRequest("GET", url, nil)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("cannot create request to %q: %s", im.addr, err)
|
||||||
|
}
|
||||||
|
if im.user != "" {
|
||||||
|
req.SetBasicAuth(im.user, im.password)
|
||||||
|
}
|
||||||
|
resp, err := http.DefaultClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
return fmt.Errorf("bad status code: %d", resp.StatusCode)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (im *Importer) Import(tsBatch []*TimeSeries) error {
|
||||||
|
if len(tsBatch) < 1 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
pr, pw := io.Pipe()
|
||||||
|
req, err := http.NewRequest("POST", im.importPath, pr)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("cannot create request to %q: %s", im.addr, err)
|
||||||
|
}
|
||||||
|
if im.user != "" {
|
||||||
|
req.SetBasicAuth(im.user, im.password)
|
||||||
|
}
|
||||||
|
if im.compress {
|
||||||
|
req.Header.Set("Content-Encoding", "gzip")
|
||||||
|
}
|
||||||
|
|
||||||
|
errCh := make(chan error)
|
||||||
|
go func() {
|
||||||
|
errCh <- do(req)
|
||||||
|
close(errCh)
|
||||||
|
}()
|
||||||
|
|
||||||
|
w := io.Writer(pw)
|
||||||
|
if im.compress {
|
||||||
|
zw, err := gzip.NewWriterLevel(pw, 1)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("unexpected error when creating gzip writer: %s", err)
|
||||||
|
}
|
||||||
|
w = zw
|
||||||
|
}
|
||||||
|
bw := bufio.NewWriterSize(w, 16*1024)
|
||||||
|
|
||||||
|
var totalSamples, totalBytes int
|
||||||
|
for _, ts := range tsBatch {
|
||||||
|
n, err := ts.write(bw)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("write err: %w", err)
|
||||||
|
}
|
||||||
|
totalBytes += n
|
||||||
|
totalSamples += len(ts.Values)
|
||||||
|
}
|
||||||
|
if err := bw.Flush(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if im.compress {
|
||||||
|
err := w.(*gzip.Writer).Close()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := pw.Close(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
requestErr := <-errCh
|
||||||
|
if requestErr != nil {
|
||||||
|
return fmt.Errorf("import request error for %q: %w", im.addr, requestErr)
|
||||||
|
}
|
||||||
|
|
||||||
|
im.s.Lock()
|
||||||
|
im.s.bytes += uint64(totalBytes)
|
||||||
|
im.s.samples += uint64(totalSamples)
|
||||||
|
im.s.requests++
|
||||||
|
im.s.Unlock()
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var ErrBadRequest = errors.New("bad request")
|
||||||
|
|
||||||
|
func do(req *http.Request) error {
|
||||||
|
resp, err := http.DefaultClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("unexpected error when performing request: %s", err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
if resp.StatusCode != http.StatusNoContent {
|
||||||
|
body, err := ioutil.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to read response body for status code %d: %s", resp.StatusCode, err)
|
||||||
|
}
|
||||||
|
if resp.StatusCode == http.StatusBadRequest {
|
||||||
|
return fmt.Errorf("%w: unexpected response code %d: %s", ErrBadRequest, resp.StatusCode, string(body))
|
||||||
|
}
|
||||||
|
return fmt.Errorf("unexpected response code %d: %s", resp.StatusCode, string(body))
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func byteCountSI(b int64) string {
|
||||||
|
const unit = 1000
|
||||||
|
if b < unit {
|
||||||
|
return fmt.Sprintf("%d B", b)
|
||||||
|
}
|
||||||
|
div, exp := int64(unit), 0
|
||||||
|
for n := b / unit; n >= unit; n /= unit {
|
||||||
|
div *= unit
|
||||||
|
exp++
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%.1f %cB",
|
||||||
|
float64(b)/float64(div), "kMGTPE"[exp])
|
||||||
|
}
|
69
app/vmctl/vm/vm_test.go
Normal file
69
app/vmctl/vm/vm_test.go
Normal file
|
@ -0,0 +1,69 @@
|
||||||
|
package vm
|
||||||
|
|
||||||
|
import "testing"
|
||||||
|
|
||||||
|
func TestAddExtraLabelsToImportPath(t *testing.T) {
|
||||||
|
type args struct {
|
||||||
|
path string
|
||||||
|
extraLabels []string
|
||||||
|
}
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
args args
|
||||||
|
want string
|
||||||
|
wantErr bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "ok w/o extra labels",
|
||||||
|
args: args{
|
||||||
|
path: "/api/v1/import",
|
||||||
|
},
|
||||||
|
want: "/api/v1/import",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "ok one extra label",
|
||||||
|
args: args{
|
||||||
|
path: "/api/v1/import",
|
||||||
|
extraLabels: []string{"instance=host-1"},
|
||||||
|
},
|
||||||
|
want: "/api/v1/import?extra_label=instance=host-1",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "ok two extra labels",
|
||||||
|
args: args{
|
||||||
|
path: "/api/v1/import",
|
||||||
|
extraLabels: []string{"instance=host-2", "job=vmagent"},
|
||||||
|
},
|
||||||
|
want: "/api/v1/import?extra_label=instance=host-2&extra_label=job=vmagent",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "ok two extra with exist param",
|
||||||
|
args: args{
|
||||||
|
path: "/api/v1/import?timeout=50",
|
||||||
|
extraLabels: []string{"instance=host-2", "job=vmagent"},
|
||||||
|
},
|
||||||
|
want: "/api/v1/import?timeout=50&extra_label=instance=host-2&extra_label=job=vmagent",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "bad incorrect format for extra label",
|
||||||
|
args: args{
|
||||||
|
path: "/api/v1/import",
|
||||||
|
extraLabels: []string{"label=value", "bad_label_wo_value"},
|
||||||
|
},
|
||||||
|
want: "/api/v1/import",
|
||||||
|
wantErr: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
got, err := AddExtraLabelsToImportPath(tt.args.path, tt.args.extraLabels)
|
||||||
|
if (err != nil) != tt.wantErr {
|
||||||
|
t.Errorf("AddExtraLabelsToImportPath() error = %v, wantErr %v", err, tt.wantErr)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if got != tt.want {
|
||||||
|
t.Errorf("AddExtraLabelsToImportPath() got = %v, want %v", got, tt.want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
141
app/vmctl/vm_native.go
Normal file
141
app/vmctl/vm_native.go
Normal file
|
@ -0,0 +1,141 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"log"
|
||||||
|
"net/http"
|
||||||
|
|
||||||
|
"github.com/cheggaaa/pb/v3"
|
||||||
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/vm"
|
||||||
|
)
|
||||||
|
|
||||||
|
type vmNativeProcessor struct {
|
||||||
|
filter filter
|
||||||
|
|
||||||
|
dst *vmNativeClient
|
||||||
|
src *vmNativeClient
|
||||||
|
}
|
||||||
|
|
||||||
|
type vmNativeClient struct {
|
||||||
|
addr string
|
||||||
|
user string
|
||||||
|
password string
|
||||||
|
extraLabels []string
|
||||||
|
}
|
||||||
|
|
||||||
|
type filter struct {
|
||||||
|
match string
|
||||||
|
timeStart string
|
||||||
|
timeEnd string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f filter) String() string {
|
||||||
|
s := fmt.Sprintf("\n\tfilter: match[]=%s", f.match)
|
||||||
|
if f.timeStart != "" {
|
||||||
|
s += fmt.Sprintf("\n\tstart: %s", f.timeStart)
|
||||||
|
}
|
||||||
|
if f.timeEnd != "" {
|
||||||
|
s += fmt.Sprintf("\n\tend: %s", f.timeEnd)
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
nativeExportAddr = "api/v1/export/native"
|
||||||
|
nativeImportAddr = "api/v1/import/native"
|
||||||
|
|
||||||
|
barTpl = `Total: {{counters . }} {{ cycle . "↖" "↗" "↘" "↙" }} Speed: {{speed . }} {{string . "suffix"}}`
|
||||||
|
)
|
||||||
|
|
||||||
|
func (p *vmNativeProcessor) run() error {
|
||||||
|
pr, pw := io.Pipe()
|
||||||
|
|
||||||
|
fmt.Printf("Initing export pipe from %q with filters: %s\n", p.src.addr, p.filter)
|
||||||
|
exportReader, err := p.exportPipe()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to init export pipe: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
sync := make(chan struct{})
|
||||||
|
nativeImportAddr, err := vm.AddExtraLabelsToImportPath(nativeImportAddr, p.dst.extraLabels)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
defer func() { close(sync) }()
|
||||||
|
u := fmt.Sprintf("%s/%s", p.dst.addr, nativeImportAddr)
|
||||||
|
req, err := http.NewRequest("POST", u, pr)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("cannot create import request to %q: %s", p.dst.addr, err)
|
||||||
|
}
|
||||||
|
importResp, err := p.dst.do(req, http.StatusNoContent)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("import request failed: %s", err)
|
||||||
|
}
|
||||||
|
importResp.Body.Close()
|
||||||
|
}()
|
||||||
|
|
||||||
|
fmt.Printf("Initing import process to %q:\n", p.dst.addr)
|
||||||
|
bar := pb.ProgressBarTemplate(barTpl).Start64(0)
|
||||||
|
barReader := bar.NewProxyReader(exportReader)
|
||||||
|
|
||||||
|
_, err = io.Copy(pw, barReader)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to write into %q: %s", p.dst.addr, err)
|
||||||
|
}
|
||||||
|
if err := pw.Close(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
<-sync
|
||||||
|
|
||||||
|
bar.Finish()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *vmNativeProcessor) exportPipe() (io.ReadCloser, error) {
|
||||||
|
u := fmt.Sprintf("%s/%s", p.src.addr, nativeExportAddr)
|
||||||
|
req, err := http.NewRequest("GET", u, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("cannot create request to %q: %s", p.src.addr, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
params := req.URL.Query()
|
||||||
|
params.Set("match[]", p.filter.match)
|
||||||
|
if p.filter.timeStart != "" {
|
||||||
|
params.Set("start", p.filter.timeStart)
|
||||||
|
}
|
||||||
|
if p.filter.timeEnd != "" {
|
||||||
|
params.Set("end", p.filter.timeEnd)
|
||||||
|
}
|
||||||
|
req.URL.RawQuery = params.Encode()
|
||||||
|
|
||||||
|
// disable compression since it is meaningless for native format
|
||||||
|
req.Header.Set("Accept-Encoding", "identity")
|
||||||
|
resp, err := p.src.do(req, http.StatusOK)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("export request failed: %s", err)
|
||||||
|
}
|
||||||
|
return resp.Body, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *vmNativeClient) do(req *http.Request, expSC int) (*http.Response, error) {
|
||||||
|
if c.user != "" {
|
||||||
|
req.SetBasicAuth(c.user, c.password)
|
||||||
|
}
|
||||||
|
resp, err := http.DefaultClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("unexpected error when performing request: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp.StatusCode != expSC {
|
||||||
|
body, err := ioutil.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to read response body for status code %d: %s", resp.StatusCode, err)
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("unexpected response code %d: %s", resp.StatusCode, string(body))
|
||||||
|
}
|
||||||
|
return resp, err
|
||||||
|
}
|
|
@ -2,6 +2,7 @@
|
||||||
|
|
||||||
# tip
|
# tip
|
||||||
|
|
||||||
|
* FEATURE: added [vmctl tool](https://victoriametrics.github.io/vmctl.html) to VictoriaMetrics release process. Now it is packaged in `vmutils-*.tar.gz` archive on [the releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases). Source code for `vmctl` tool has been moved from [github.com/VictoriaMetrics/vmctl](https://github.com/VictoriaMetrics/vmctl) to [github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/master/app/vmctl).
|
||||||
* FEATURE: added `-loggerTimezone` command-line flag for adjusting time zone for timestamps in log messages. By default UTC is used.
|
* FEATURE: added `-loggerTimezone` command-line flag for adjusting time zone for timestamps in log messages. By default UTC is used.
|
||||||
* FEATURE: added `-search.maxStepForPointsAdjustment` command-line flag, which can be used for disabling adjustment for points returned by `/api/v1/query_range` handler if such points have timestamps closer than `-search.latencyOffset` to the current time. Such points may contain incomplete data, so they are substituted by the previous values for `step` query args smaller than one minute by default.
|
* FEATURE: added `-search.maxStepForPointsAdjustment` command-line flag, which can be used for disabling adjustment for points returned by `/api/v1/query_range` handler if such points have timestamps closer than `-search.latencyOffset` to the current time. Such points may contain incomplete data, so they are substituted by the previous values for `step` query args smaller than one minute by default.
|
||||||
* FEATURE: vmalert: added `-datasource.queryStep` command-line flag for passing optional `step` query arg to `/api/v1/query` endpoint. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1025
|
* FEATURE: vmalert: added `-datasource.queryStep` command-line flag for passing optional `step` query arg to `/api/v1/query` endpoint. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1025
|
||||||
|
|
|
@ -154,6 +154,7 @@ Alphabetically sorted links to case studies:
|
||||||
* [Tuning](#tuning)
|
* [Tuning](#tuning)
|
||||||
* [Monitoring](#monitoring)
|
* [Monitoring](#monitoring)
|
||||||
* [Troubleshooting](#troubleshooting)
|
* [Troubleshooting](#troubleshooting)
|
||||||
|
* [Data migration](#data-migration)
|
||||||
* [Backfilling](#backfilling)
|
* [Backfilling](#backfilling)
|
||||||
* [Data updates](#data-updates)
|
* [Data updates](#data-updates)
|
||||||
* [Replication](#replication)
|
* [Replication](#replication)
|
||||||
|
@ -1353,6 +1354,17 @@ See the example of alerting rules for VM components [here](https://github.com/Vi
|
||||||
* VictoriaMetrics ignores `NaN` values during data ingestion.
|
* VictoriaMetrics ignores `NaN` values during data ingestion.
|
||||||
|
|
||||||
|
|
||||||
|
## Data migration
|
||||||
|
|
||||||
|
Use [vmctl](https://victoriametrics.github.io/vmctl.html) for data migration. It supports the following data migration types:
|
||||||
|
|
||||||
|
* From Prometheus to VictoriaMetrics
|
||||||
|
* From InfluxDB to VictoriaMetrics
|
||||||
|
* From VictoriaMetrics to VictoriaMetrics
|
||||||
|
|
||||||
|
See [vmctl docs](https://victoriametrics.github.io/vmctl.html) for more details.
|
||||||
|
|
||||||
|
|
||||||
## Backfilling
|
## Backfilling
|
||||||
|
|
||||||
VictoriaMetrics accepts historical data in arbitrary order of time via [any supported ingestion method](#how-to-import-time-series-data).
|
VictoriaMetrics accepts historical data in arbitrary order of time via [any supported ingestion method](#how-to-import-time-series-data).
|
||||||
|
@ -1420,7 +1432,6 @@ The collected profiles may be analyzed with [go tool pprof](https://github.com/g
|
||||||
|
|
||||||
* [Helm charts for single-node and cluster versions of VictoriaMetrics](https://github.com/VictoriaMetrics/helm-charts).
|
* [Helm charts for single-node and cluster versions of VictoriaMetrics](https://github.com/VictoriaMetrics/helm-charts).
|
||||||
* [Kubernetes operator for VictoriaMetrics](https://github.com/VictoriaMetrics/operator).
|
* [Kubernetes operator for VictoriaMetrics](https://github.com/VictoriaMetrics/operator).
|
||||||
* [vmctl tool for data migration to VictoriaMetrics](https://github.com/VictoriaMetrics/vmctl).
|
|
||||||
* [netdata](https://github.com/netdata/netdata) can push data into VictoriaMetrics via `Prometheus remote_write API`.
|
* [netdata](https://github.com/netdata/netdata) can push data into VictoriaMetrics via `Prometheus remote_write API`.
|
||||||
See [these docs](https://github.com/netdata/netdata#integrations).
|
See [these docs](https://github.com/netdata/netdata#integrations).
|
||||||
* [go-graphite/carbonapi](https://github.com/go-graphite/carbonapi) can use VictoriaMetrics as time series backend.
|
* [go-graphite/carbonapi](https://github.com/go-graphite/carbonapi) can use VictoriaMetrics as time series backend.
|
||||||
|
|
427
docs/vmctl.md
Normal file
427
docs/vmctl.md
Normal file
|
@ -0,0 +1,427 @@
|
||||||
|
# vmctl - Victoria metrics command-line tool
|
||||||
|
|
||||||
|
Features:
|
||||||
|
- [x] Prometheus: migrate data from Prometheus to VictoriaMetrics using snapshot API
|
||||||
|
- [x] Thanos: migrate data from Thanos to VictoriaMetrics
|
||||||
|
- [ ] ~~Prometheus: migrate data from Prometheus to VictoriaMetrics by query~~(discarded)
|
||||||
|
- [x] InfluxDB: migrate data from InfluxDB to VictoriaMetrics
|
||||||
|
- [ ] Storage Management: data re-balancing between nodes
|
||||||
|
|
||||||
|
# Table of contents
|
||||||
|
|
||||||
|
* [Articles](#articles)
|
||||||
|
* [How to build](#how-to-build)
|
||||||
|
* [Migrating data from InfluxDB 1.x](#migrating-data-from-influxdb-1x)
|
||||||
|
* [Data mapping](#data-mapping)
|
||||||
|
* [Configuration](#configuration)
|
||||||
|
* [Filtering](#filtering)
|
||||||
|
* [Migrating data from InfluxDB 2.x](#migrating-data-from-influxdb-2x)
|
||||||
|
* [Migrating data from Prometheus](#migrating-data-from-prometheus)
|
||||||
|
* [Data mapping](#data-mapping-1)
|
||||||
|
* [Configuration](#configuration-1)
|
||||||
|
* [Filtering](#filtering-1)
|
||||||
|
* [Migrating data from Thanos](#migrating-data-from-thanos)
|
||||||
|
* [Current data](#current-data)
|
||||||
|
* [Historical data](#historical-data)
|
||||||
|
* [Migrating data from VictoriaMetrics](#migrating-data-from-victoriametrics)
|
||||||
|
* [Native protocol](#native-protocol)
|
||||||
|
* [Tuning](#tuning)
|
||||||
|
* [Influx mode](#influx-mode)
|
||||||
|
* [Prometheus mode](#prometheus-mode)
|
||||||
|
* [VictoriaMetrics importer](#victoriametrics-importer)
|
||||||
|
* [Importer stats](#importer-stats)
|
||||||
|
* [Significant figures](#significant-figures)
|
||||||
|
* [Adding extra labels](#adding-extra-labels)
|
||||||
|
|
||||||
|
|
||||||
|
## Articles
|
||||||
|
|
||||||
|
* [How to migrate data from Prometheus](https://medium.com/@romanhavronenko/victoriametrics-how-to-migrate-data-from-prometheus-d44a6728f043)
|
||||||
|
* [How to migrate data from Prometheus. Filtering and modifying time series](https://medium.com/@romanhavronenko/victoriametrics-how-to-migrate-data-from-prometheus-filtering-and-modifying-time-series-6d40cea4bf21)
|
||||||
|
|
||||||
|
## How to build
|
||||||
|
|
||||||
|
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.12.
|
||||||
|
2. Run `make build` from the root folder of the repository.
|
||||||
|
It builds `vmctl` binary and puts it into the `bin` folder.
|
||||||
|
|
||||||
|
## Migrating data from InfluxDB (1.x)
|
||||||
|
|
||||||
|
`vmctl` supports the `influx` mode to migrate data from InfluxDB to VictoriaMetrics time-series database.
|
||||||
|
|
||||||
|
See `./vmctl influx --help` for details and full list of flags.
|
||||||
|
|
||||||
|
To use migration tool please specify the InfluxDB address `--influx-addr`, the database `--influx-database` and VictoriaMetrics address `--vm-addr`.
|
||||||
|
Flag `--vm-addr` for single-node VM is usually equal to `--httpListenAddr`, and for cluster version
|
||||||
|
is equal to `--httpListenAddr` flag of VMInsert component. Please note, that vmctl performs initial readiness check for the given address
|
||||||
|
by checking `/health` endpoint. For cluster version it is additionally required to specify the `--vm-account-id` flag.
|
||||||
|
See more details for cluster version [here](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/cluster).
|
||||||
|
|
||||||
|
As soon as required flags are provided and all endpoints are accessible, `vmctl` will start the InfluxDB scheme exploration.
|
||||||
|
Basically, it just fetches all fields and timeseries from the provided database and builds up registry of all available timeseries.
|
||||||
|
Then `vmctl` sends fetch requests for each timeseries to InfluxDB one by one and pass results to VM importer.
|
||||||
|
VM importer then accumulates received samples in batches and sends import requests to VM.
|
||||||
|
|
||||||
|
The importing process example for local installation of InfluxDB(`http://localhost:8086`)
|
||||||
|
and single-node VictoriaMetrics(`http://localhost:8428`):
|
||||||
|
```
|
||||||
|
./vmctl influx --influx-database benchmark
|
||||||
|
InfluxDB import mode
|
||||||
|
2020/01/18 20:47:11 Exploring scheme for database "benchmark"
|
||||||
|
2020/01/18 20:47:11 fetching fields: command: "show field keys"; database: "benchmark"; retention: "autogen"
|
||||||
|
2020/01/18 20:47:11 found 10 fields
|
||||||
|
2020/01/18 20:47:11 fetching series: command: "show series "; database: "benchmark"; retention: "autogen"
|
||||||
|
Found 40000 timeseries to import. Continue? [Y/n] y
|
||||||
|
40000 / 40000 [-----------------------------------------------------------------------------------------------------------------------------------------------] 100.00% 21 p/s
|
||||||
|
2020/01/18 21:19:00 Import finished!
|
||||||
|
2020/01/18 21:19:00 VictoriaMetrics importer stats:
|
||||||
|
idle duration: 13m51.461434876s;
|
||||||
|
time spent while importing: 17m56.923899847s;
|
||||||
|
total samples: 345600000;
|
||||||
|
samples/s: 320914.04;
|
||||||
|
total bytes: 5.9 GB;
|
||||||
|
bytes/s: 5.4 MB;
|
||||||
|
import requests: 40001;
|
||||||
|
2020/01/18 21:19:00 Total time: 31m48.467044016s
|
||||||
|
```
|
||||||
|
|
||||||
|
### Data mapping
|
||||||
|
|
||||||
|
Vmctl maps Influx data the same way as VictoriaMetrics does by using the following rules:
|
||||||
|
|
||||||
|
* `influx-database` arg is mapped into `db` label value unless `db` tag exists in the Influx line.
|
||||||
|
* Field names are mapped to time series names prefixed with {measurement}{separator} value,
|
||||||
|
where {separator} equals to _ by default.
|
||||||
|
It can be changed with `--influx-measurement-field-separator` command-line flag.
|
||||||
|
* Field values are mapped to time series values.
|
||||||
|
* Tags are mapped to Prometheus labels format as-is.
|
||||||
|
|
||||||
|
For example, the following Influx line:
|
||||||
|
```
|
||||||
|
foo,tag1=value1,tag2=value2 field1=12,field2=40
|
||||||
|
```
|
||||||
|
|
||||||
|
is converted into the following Prometheus format data points:
|
||||||
|
```
|
||||||
|
foo_field1{tag1="value1", tag2="value2"} 12
|
||||||
|
foo_field2{tag1="value1", tag2="value2"} 40
|
||||||
|
```
|
||||||
|
|
||||||
|
### Configuration
|
||||||
|
|
||||||
|
The configuration flags should contain self-explanatory descriptions.
|
||||||
|
|
||||||
|
### Filtering
|
||||||
|
|
||||||
|
The filtering consists of two parts: timeseries and time.
|
||||||
|
The first step of application is to select all available timeseries
|
||||||
|
for given database and retention. User may specify additional filtering
|
||||||
|
condition via `--influx-filter-series` flag. For example:
|
||||||
|
```
|
||||||
|
./vmctl influx --influx-database benchmark \
|
||||||
|
--influx-filter-series "on benchmark from cpu where hostname='host_1703'"
|
||||||
|
InfluxDB import mode
|
||||||
|
2020/01/26 14:23:29 Exploring scheme for database "benchmark"
|
||||||
|
2020/01/26 14:23:29 fetching fields: command: "show field keys"; database: "benchmark"; retention: "autogen"
|
||||||
|
2020/01/26 14:23:29 found 12 fields
|
||||||
|
2020/01/26 14:23:29 fetching series: command: "show series on benchmark from cpu where hostname='host_1703'"; database: "benchmark"; retention: "autogen"
|
||||||
|
Found 10 timeseries to import. Continue? [Y/n]
|
||||||
|
```
|
||||||
|
The timeseries select query would be following:
|
||||||
|
`fetching series: command: "show series on benchmark from cpu where hostname='host_1703'"; database: "benchmark"; retention: "autogen"`
|
||||||
|
|
||||||
|
The second step of filtering is a time filter and it applies when fetching the datapoints from Influx.
|
||||||
|
Time filtering may be configured with two flags:
|
||||||
|
* --influx-filter-time-start
|
||||||
|
* --influx-filter-time-end
|
||||||
|
Here's an example of importing timeseries for one day only:
|
||||||
|
`./vmctl influx --influx-database benchmark --influx-filter-series "where hostname='host_1703'" --influx-filter-time-start "2020-01-01T10:07:00Z" --influx-filter-time-end "2020-01-01T15:07:00Z"`
|
||||||
|
|
||||||
|
Please see more about time filtering [here](https://docs.influxdata.com/influxdb/v1.7/query_language/schema_exploration#filter-meta-queries-by-time).
|
||||||
|
|
||||||
|
## Migrating data from InfluxDB (2.x)
|
||||||
|
|
||||||
|
Migrating data from InfluxDB v2.x is not supported yet ([#32](https://github.com/VictoriaMetrics/vmctl/issues/32)).
|
||||||
|
You may find useful a 3rd party solution for this - https://github.com/jonppe/influx_to_victoriametrics.
|
||||||
|
|
||||||
|
|
||||||
|
## Migrating data from Prometheus
|
||||||
|
|
||||||
|
`vmctl` supports the `prometheus` mode for migrating data from Prometheus to VictoriaMetrics time-series database.
|
||||||
|
Migration is based on reading Prometheus snapshot, which is basically a hard-link to Prometheus data files.
|
||||||
|
|
||||||
|
See `./vmctl prometheus --help` for details and full list of flags.
|
||||||
|
|
||||||
|
To use migration tool please specify the path to Prometheus snapshot `--prom-snapshot` and VictoriaMetrics address `--vm-addr`.
|
||||||
|
More about Prometheus snapshots may be found [here](https://www.robustperception.io/taking-snapshots-of-prometheus-data).
|
||||||
|
Flag `--vm-addr` for single-node VM is usually equal to `--httpListenAddr`, and for cluster version
|
||||||
|
is equal to `--httpListenAddr` flag of VMInsert component. Please note, that vmctl performs initial readiness check for the given address
|
||||||
|
by checking `/health` endpoint. For cluster version it is additionally required to specify the `--vm-account-id` flag.
|
||||||
|
See more details for cluster version [here](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/cluster).
|
||||||
|
|
||||||
|
As soon as required flags are provided and all endpoints are accessible, `vmctl` will start the Prometheus snapshot exploration.
|
||||||
|
Basically, it just fetches all available blocks in provided snapshot and read the metadata. It also does initial filtering by time
|
||||||
|
if flags `--prom-filter-time-start` or `--prom-filter-time-end` were set. The exploration procedure prints some stats from read blocks.
|
||||||
|
Please note that stats are not taking into account timeseries or samples filtering. This will be done during importing process.
|
||||||
|
|
||||||
|
The importing process takes the snapshot blocks revealed from Explore procedure and processes them one by one
|
||||||
|
accumulating timeseries and samples. Please note, that `vmctl` relies on responses from Influx on this stage,
|
||||||
|
so ensure that Explore queries are executed without errors or limits. Please see this
|
||||||
|
[issue](https://github.com/VictoriaMetrics/vmctl/issues/30) for details.
|
||||||
|
The data processed in chunks and then sent to VM.
|
||||||
|
|
||||||
|
The importing process example for local installation of Prometheus
|
||||||
|
and single-node VictoriaMetrics(`http://localhost:8428`):
|
||||||
|
```
|
||||||
|
./vmctl prometheus --prom-snapshot=/path/to/snapshot \
|
||||||
|
--vm-concurrency=1 \
|
||||||
|
--vm-batch-size=200000 \
|
||||||
|
--prom-concurrency=3
|
||||||
|
Prometheus import mode
|
||||||
|
Prometheus snapshot stats:
|
||||||
|
blocks found: 14;
|
||||||
|
blocks skipped: 0;
|
||||||
|
min time: 1581288163058 (2020-02-09T22:42:43Z);
|
||||||
|
max time: 1582409128139 (2020-02-22T22:05:28Z);
|
||||||
|
samples: 32549106;
|
||||||
|
series: 27289.
|
||||||
|
Found 14 blocks to import. Continue? [Y/n] y
|
||||||
|
14 / 14 [-------------------------------------------------------------------------------------------] 100.00% 0 p/s
|
||||||
|
2020/02/23 15:50:03 Import finished!
|
||||||
|
2020/02/23 15:50:03 VictoriaMetrics importer stats:
|
||||||
|
idle duration: 6.152953029s;
|
||||||
|
time spent while importing: 44.908522491s;
|
||||||
|
total samples: 32549106;
|
||||||
|
samples/s: 724786.84;
|
||||||
|
total bytes: 669.1 MB;
|
||||||
|
bytes/s: 14.9 MB;
|
||||||
|
import requests: 323;
|
||||||
|
import requests retries: 0;
|
||||||
|
2020/02/23 15:50:03 Total time: 51.077451066s
|
||||||
|
```
|
||||||
|
|
||||||
|
### Data mapping
|
||||||
|
|
||||||
|
VictoriaMetrics has very similar data model to Prometheus and supports [RemoteWrite integration](https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage).
|
||||||
|
So no data changes will be applied.
|
||||||
|
|
||||||
|
### Configuration
|
||||||
|
|
||||||
|
The configuration flags should contain self-explanatory descriptions.
|
||||||
|
|
||||||
|
### Filtering
|
||||||
|
|
||||||
|
The filtering consists of three parts: by timeseries and time.
|
||||||
|
|
||||||
|
Filtering by time may be configured via flags `--prom-filter-time-start` and `--prom-filter-time-end`
|
||||||
|
in in RFC3339 format. This filter applied twice: to drop blocks out of range and to filter timeseries in blocks with
|
||||||
|
overlapping time range.
|
||||||
|
|
||||||
|
Example of applying time filter:
|
||||||
|
```
|
||||||
|
./vmctl prometheus --prom-snapshot=/path/to/snapshot \
|
||||||
|
--prom-filter-time-start=2020-02-07T00:07:01Z \
|
||||||
|
--prom-filter-time-end=2020-02-11T00:07:01Z
|
||||||
|
Prometheus import mode
|
||||||
|
Prometheus snapshot stats:
|
||||||
|
blocks found: 2;
|
||||||
|
blocks skipped: 12;
|
||||||
|
min time: 1581288163058 (2020-02-09T22:42:43Z);
|
||||||
|
max time: 1581328800000 (2020-02-10T10:00:00Z);
|
||||||
|
samples: 1657698;
|
||||||
|
series: 3930.
|
||||||
|
Found 2 blocks to import. Continue? [Y/n] y
|
||||||
|
```
|
||||||
|
|
||||||
|
Please notice, that total amount of blocks in provided snapshot is 14, but only 2 of them were in provided
|
||||||
|
time range. So other 12 blocks were marked as `skipped`. The amount of samples and series is not taken into account,
|
||||||
|
since this is heavy operation and will be done during import process.
|
||||||
|
|
||||||
|
|
||||||
|
Filtering by timeseries is configured with following flags:
|
||||||
|
* `--prom-filter-label` - the label name, e.g. `__name__` or `instance`;
|
||||||
|
* `--prom-filter-label-value` - the regular expression to filter the label value. By default matches all `.*`
|
||||||
|
|
||||||
|
For example:
|
||||||
|
```
|
||||||
|
./vmctl prometheus --prom-snapshot=/path/to/snapshot \
|
||||||
|
--prom-filter-label="__name__" \
|
||||||
|
--prom-filter-label-value="promhttp.*" \
|
||||||
|
--prom-filter-time-start=2020-02-07T00:07:01Z \
|
||||||
|
--prom-filter-time-end=2020-02-11T00:07:01Z
|
||||||
|
Prometheus import mode
|
||||||
|
Prometheus snapshot stats:
|
||||||
|
blocks found: 2;
|
||||||
|
blocks skipped: 12;
|
||||||
|
min time: 1581288163058 (2020-02-09T22:42:43Z);
|
||||||
|
max time: 1581328800000 (2020-02-10T10:00:00Z);
|
||||||
|
samples: 1657698;
|
||||||
|
series: 3930.
|
||||||
|
Found 2 blocks to import. Continue? [Y/n] y
|
||||||
|
14 / 14 [------------------------------------------------------------------------------------------------------------------------------------------------------] 100.00% ? p/s
|
||||||
|
2020/02/23 15:51:07 Import finished!
|
||||||
|
2020/02/23 15:51:07 VictoriaMetrics importer stats:
|
||||||
|
idle duration: 0s;
|
||||||
|
time spent while importing: 37.415461ms;
|
||||||
|
total samples: 10128;
|
||||||
|
samples/s: 270690.24;
|
||||||
|
total bytes: 195.2 kB;
|
||||||
|
bytes/s: 5.2 MB;
|
||||||
|
import requests: 2;
|
||||||
|
import requests retries: 0;
|
||||||
|
2020/02/23 15:51:07 Total time: 7.153158218s
|
||||||
|
```
|
||||||
|
|
||||||
|
## Migrating data from Thanos
|
||||||
|
|
||||||
|
Thanos uses the same storage engine as Prometheus and the data layout on-disk should be the same. That means
|
||||||
|
`vmctl` in mode `prometheus` may be used for Thanos historical data migration as well.
|
||||||
|
These instructions may vary based on the details of your Thanos configuration.
|
||||||
|
Please read carefully and verify as you go. We assume you're using Thanos Sidecar on your Prometheus pods,
|
||||||
|
and that you have a separate Thanos Store installation.
|
||||||
|
|
||||||
|
### Current data
|
||||||
|
|
||||||
|
1. For now, keep your Thanos Sidecar and Thanos-related Prometheus configuration, but add this to also stream
|
||||||
|
metrics to VictoriaMetrics:
|
||||||
|
```
|
||||||
|
remote_write:
|
||||||
|
- url: http://victoria-metrics:8428/api/v1/write
|
||||||
|
```
|
||||||
|
2. Make sure VM is running, of course. Now check the logs to make sure that Prometheus is sending and VM is receiving.
|
||||||
|
In Prometheus, make sure there are no errors. On the VM side, you should see messages like this:
|
||||||
|
```
|
||||||
|
2020-04-27T18:38:46.474Z info VictoriaMetrics/lib/storage/partition.go:207 creating a partition "2020_04" with smallPartsPath="/victoria-metrics-data/data/small/2020_04", bigPartsPath="/victoria-metrics-data/data/big/2020_04"
|
||||||
|
2020-04-27T18:38:46.506Z info VictoriaMetrics/lib/storage/partition.go:222 partition "2020_04" has been created
|
||||||
|
```
|
||||||
|
3. Now just wait. Within two hours, Prometheus should finish its current data file and hand it off to Thanos Store for long term
|
||||||
|
storage.
|
||||||
|
|
||||||
|
### Historical data
|
||||||
|
|
||||||
|
Let's assume your data is stored on S3 served by minio. You first need to copy that out to a local filesystem,
|
||||||
|
then import it into VM using `vmctl` in `prometheus` mode.
|
||||||
|
1. Copy data from minio.
|
||||||
|
1. Run the `minio/mc` Docker container.
|
||||||
|
1. `mc config host add minio http://minio:9000 accessKey secretKey`, substituting appropriate values for the last 3 items.
|
||||||
|
1. `mc cp -r minio/prometheus thanos-data`
|
||||||
|
1. Import using `vmctl`.
|
||||||
|
1. Follow the [instructions](#how-to-build) to compile `vmctl` on your machine.
|
||||||
|
1. Use [prometheus](#migrating-data-from-prometheus) mode to import data:
|
||||||
|
```
|
||||||
|
vmctl prometheus --prom-snapshot thanos-data --vm-addr http://victoria-metrics:8428
|
||||||
|
```
|
||||||
|
|
||||||
|
## Migrating data from VictoriaMetrics
|
||||||
|
|
||||||
|
### Native protocol
|
||||||
|
|
||||||
|
The [native binary protocol](https://victoriametrics.github.io/#how-to-export-data-in-native-format)
|
||||||
|
was introduced in [1.42.0 release](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.42.0)
|
||||||
|
and provides the most efficient way to migrate data between VM instances: single to single, cluster to cluster,
|
||||||
|
single to cluster and vice versa. Please note that both instances (source and destination) should be of v1.42.0
|
||||||
|
or higher.
|
||||||
|
|
||||||
|
See `./vmctl vm-native --help` for details and full list of flags.
|
||||||
|
|
||||||
|
In this mode `vmctl` acts as a proxy between two VM instances, where time series filtering is done by "source" (`src`)
|
||||||
|
and processing is done by "destination" (`dst`). Because of that, `vmctl` doesn't actually know how much data will be
|
||||||
|
processed and can't show the progress bar. It will show the current processing speed and total number of processed bytes:
|
||||||
|
|
||||||
|
```
|
||||||
|
./vmctl vm-native --vm-native-src-addr=http://localhost:8528 \
|
||||||
|
--vm-native-dst-addr=http://localhost:8428 \
|
||||||
|
--vm-native-filter-match='{job="vmagent"}' \
|
||||||
|
--vm-native-filter-time-start='2020-01-01T20:07:00Z'
|
||||||
|
VictoriaMetrics Native import mode
|
||||||
|
Initing export pipe from "http://localhost:8528" with filters:
|
||||||
|
filter: match[]={job="vmagent"}
|
||||||
|
Initing import process to "http://localhost:8428":
|
||||||
|
Total: 336.75 KiB ↖ Speed: 454.46 KiB p/s
|
||||||
|
2020/10/13 17:04:59 Total time: 952.143376ms
|
||||||
|
```
|
||||||
|
|
||||||
|
Importing tips:
|
||||||
|
1. Migrating all the metrics from one VM to another may collide with existing application metrics
|
||||||
|
(prefixed with `vm_`) at destination and lead to confusion when using
|
||||||
|
[official Grafana dashboards](https://grafana.com/orgs/victoriametrics/dashboards).
|
||||||
|
To avoid such situation try to filter out VM process metrics via `--vm-native-filter-match` flag.
|
||||||
|
2. Migration is a backfilling process, so it is recommended to read
|
||||||
|
[Backfilling tips](https://github.com/VictoriaMetrics/VictoriaMetrics#backfilling) section.
|
||||||
|
3. `vmctl` doesn't provide relabeling or other types of labels management in this mode.
|
||||||
|
Instead, use [relabeling in VictoriaMetrics](https://github.com/VictoriaMetrics/vmctl/issues/4#issuecomment-683424375).
|
||||||
|
|
||||||
|
|
||||||
|
## Tuning
|
||||||
|
|
||||||
|
### Influx mode
|
||||||
|
|
||||||
|
The flag `--influx-concurrency` controls how many concurrent requests may be sent to InfluxDB while fetching
|
||||||
|
timeseries. Please set it wisely to avoid InfluxDB overwhelming.
|
||||||
|
|
||||||
|
The flag `--influx-chunk-size` controls the max amount of datapoints to return in single chunk from fetch requests.
|
||||||
|
Please see more details [here](https://docs.influxdata.com/influxdb/v1.7/guides/querying_data/#chunking).
|
||||||
|
The chunk size is used to control InfluxDB memory usage, so it won't OOM on processing large timeseries with
|
||||||
|
billions of datapoints.
|
||||||
|
|
||||||
|
### Prometheus mode
|
||||||
|
|
||||||
|
The flag `--prom-concurrency` controls how many concurrent readers will be reading the blocks in snapshot.
|
||||||
|
Since snapshots are just files on disk it would be hard to overwhelm the system. Please go with value equal
|
||||||
|
to number of free CPU cores.
|
||||||
|
|
||||||
|
### VictoriaMetrics importer
|
||||||
|
|
||||||
|
The flag `--vm-concurrency` controls the number of concurrent workers that process the input from InfluxDB query results.
|
||||||
|
Please note that each import request can load up to a single vCPU core on VictoriaMetrics. So try to set it according
|
||||||
|
to allocated CPU resources of your VictoriMetrics installation.
|
||||||
|
|
||||||
|
The flag `--vm-batch-size` controls max amount of samples collected before sending the import request.
|
||||||
|
For example, if `--influx-chunk-size=500` and `--vm-batch-size=2000` then importer will process not more
|
||||||
|
than 4 chunks before sending the request.
|
||||||
|
|
||||||
|
### Importer stats
|
||||||
|
|
||||||
|
After successful import `vmctl` prints some statistics for details.
|
||||||
|
The important numbers to watch are following:
|
||||||
|
- `idle duration` - shows time that importer spent while waiting for data from InfluxDB/Prometheus
|
||||||
|
to fill up `--vm-batch-size` batch size. Value shows total duration across all workers configured
|
||||||
|
via `--vm-concurrency`. High value may be a sign of too slow InfluxDB/Prometheus fetches or too
|
||||||
|
high `--vm-concurrency` value. Try to improve it by increasing `--<mode>-concurrency` value or
|
||||||
|
decreasing `--vm-concurrency` value.
|
||||||
|
- `import requests` - shows how many import requests were issued to VM server.
|
||||||
|
The import request is issued once the batch size(`--vm-batch-size`) is full and ready to be sent.
|
||||||
|
Please prefer big batch sizes (50k-500k) to improve performance.
|
||||||
|
- `import requests retries` - shows number of unsuccessful import requests. Non-zero value may be
|
||||||
|
a sign of network issues or VM being overloaded. See the logs during import for error messages.
|
||||||
|
|
||||||
|
### Silent mode
|
||||||
|
|
||||||
|
By default `vmctl` waits confirmation from user before starting the import. If this is unwanted
|
||||||
|
behavior and no user interaction required - pass `-s` flag to enable "silence" mode:
|
||||||
|
```
|
||||||
|
-s Whether to run in silent mode. If set to true no confirmation prompts will appear. (default: false)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Significant figures
|
||||||
|
|
||||||
|
`vmctl` allows to limit the number of [significant figures](https://en.wikipedia.org/wiki/Significant_figures)
|
||||||
|
before importing. For example, the average value for response size is `102.342305` bytes and it has 9 significant figures.
|
||||||
|
If you ask a human to pronounce this value then with high probability value will be rounded to first 4 or 5 figures
|
||||||
|
because the rest aren't really that important to mention. In most cases, such a high precision is too much.
|
||||||
|
Moreover, such values may be just a result of [floating point arithmetic](https://en.wikipedia.org/wiki/Floating-point_arithmetic),
|
||||||
|
create a [false precision](https://en.wikipedia.org/wiki/False_precision) and result into bad compression ratio
|
||||||
|
according to [information theory](https://en.wikipedia.org/wiki/Information_theory).
|
||||||
|
|
||||||
|
The `--vm-significant-figures` flag allows to limit the number of significant figures. It takes no effect if set
|
||||||
|
to 0 (by default), but set `--vm-significant-figures=5` and `102.342305` will be rounded to `102.34`. Such value will
|
||||||
|
have much higher compression ratio comparing to previous one and will save some extra disk space after the migration.
|
||||||
|
The most common case for using this flag is to reduce number of significant figures for time series storing aggregation
|
||||||
|
results such as `average`, `rate`, etc.
|
||||||
|
|
||||||
|
### Adding extra labels
|
||||||
|
|
||||||
|
`vmctl` allows to add extra labels to all imported series. It can be achived with flag `--vm-extra-label label=value`.
|
||||||
|
If multiple labels needs to be added, set flag for each label, for example, `--vm-extra-label label1=value1 --vm-extra-label label2=value2`.
|
||||||
|
If timeseries already have label, that must be added with `--vm-extra-label` flag, flag has priority and will override label value from timeseries.
|
||||||
|
|
4
go.mod
4
go.mod
|
@ -12,8 +12,12 @@ require (
|
||||||
github.com/VictoriaMetrics/metricsql v0.10.0
|
github.com/VictoriaMetrics/metricsql v0.10.0
|
||||||
github.com/aws/aws-sdk-go v1.36.25
|
github.com/aws/aws-sdk-go v1.36.25
|
||||||
github.com/cespare/xxhash/v2 v2.1.1
|
github.com/cespare/xxhash/v2 v2.1.1
|
||||||
|
github.com/cheggaaa/pb/v3 v3.0.5
|
||||||
github.com/golang/snappy v0.0.2
|
github.com/golang/snappy v0.0.2
|
||||||
|
github.com/influxdata/influxdb v1.8.3
|
||||||
github.com/klauspost/compress v1.11.6
|
github.com/klauspost/compress v1.11.6
|
||||||
|
github.com/prometheus/prometheus v1.8.2-0.20201119142752-3ad25a6dc3d9
|
||||||
|
github.com/urfave/cli/v2 v2.3.0
|
||||||
github.com/valyala/fastjson v1.6.3
|
github.com/valyala/fastjson v1.6.3
|
||||||
github.com/valyala/fastrand v1.0.0
|
github.com/valyala/fastrand v1.0.0
|
||||||
github.com/valyala/fasttemplate v1.2.1
|
github.com/valyala/fasttemplate v1.2.1
|
||||||
|
|
2
vendor/github.com/VividCortex/ewma/.gitignore
generated
vendored
Normal file
2
vendor/github.com/VividCortex/ewma/.gitignore
generated
vendored
Normal file
|
@ -0,0 +1,2 @@
|
||||||
|
.DS_Store
|
||||||
|
.*.sw?
|
21
vendor/github.com/VividCortex/ewma/LICENSE
generated
vendored
Normal file
21
vendor/github.com/VividCortex/ewma/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,21 @@
|
||||||
|
The MIT License
|
||||||
|
|
||||||
|
Copyright (c) 2013 VividCortex
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in
|
||||||
|
all copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||||
|
THE SOFTWARE.
|
140
vendor/github.com/VividCortex/ewma/README.md
generated
vendored
Normal file
140
vendor/github.com/VividCortex/ewma/README.md
generated
vendored
Normal file
|
@ -0,0 +1,140 @@
|
||||||
|
# EWMA [![GoDoc](https://godoc.org/github.com/VividCortex/ewma?status.svg)](https://godoc.org/github.com/VividCortex/ewma) ![Build Status](https://circleci.com/gh/VividCortex/moving_average.png?circle-token=1459fa37f9ca0e50cef05d1963146d96d47ea523)
|
||||||
|
|
||||||
|
This repo provides Exponentially Weighted Moving Average algorithms, or EWMAs for short, [based on our
|
||||||
|
Quantifying Abnormal Behavior talk](https://vividcortex.com/blog/2013/07/23/a-fast-go-library-for-exponential-moving-averages/).
|
||||||
|
|
||||||
|
### Exponentially Weighted Moving Average
|
||||||
|
|
||||||
|
An exponentially weighted moving average is a way to continuously compute a type of
|
||||||
|
average for a series of numbers, as the numbers arrive. After a value in the series is
|
||||||
|
added to the average, its weight in the average decreases exponentially over time. This
|
||||||
|
biases the average towards more recent data. EWMAs are useful for several reasons, chiefly
|
||||||
|
their inexpensive computational and memory cost, as well as the fact that they represent
|
||||||
|
the recent central tendency of the series of values.
|
||||||
|
|
||||||
|
The EWMA algorithm requires a decay factor, alpha. The larger the alpha, the more the average
|
||||||
|
is biased towards recent history. The alpha must be between 0 and 1, and is typically
|
||||||
|
a fairly small number, such as 0.04. We will discuss the choice of alpha later.
|
||||||
|
|
||||||
|
The algorithm works thus, in pseudocode:
|
||||||
|
|
||||||
|
1. Multiply the next number in the series by alpha.
|
||||||
|
2. Multiply the current value of the average by 1 minus alpha.
|
||||||
|
3. Add the result of steps 1 and 2, and store it as the new current value of the average.
|
||||||
|
4. Repeat for each number in the series.
|
||||||
|
|
||||||
|
There are special-case behaviors for how to initialize the current value, and these vary
|
||||||
|
between implementations. One approach is to start with the first value in the series;
|
||||||
|
another is to average the first 10 or so values in the series using an arithmetic average,
|
||||||
|
and then begin the incremental updating of the average. Each method has pros and cons.
|
||||||
|
|
||||||
|
It may help to look at it pictorially. Suppose the series has five numbers, and we choose
|
||||||
|
alpha to be 0.50 for simplicity. Here's the series, with numbers in the neighborhood of 300.
|
||||||
|
|
||||||
|
![Data Series](https://user-images.githubusercontent.com/279875/28242350-463289a2-6977-11e7-88ca-fd778ccef1f0.png)
|
||||||
|
|
||||||
|
Now let's take the moving average of those numbers. First we set the average to the value
|
||||||
|
of the first number.
|
||||||
|
|
||||||
|
![EWMA Step 1](https://user-images.githubusercontent.com/279875/28242353-464c96bc-6977-11e7-9981-dc4e0789c7ba.png)
|
||||||
|
|
||||||
|
Next we multiply the next number by alpha, multiply the current value by 1-alpha, and add
|
||||||
|
them to generate a new value.
|
||||||
|
|
||||||
|
![EWMA Step 2](https://user-images.githubusercontent.com/279875/28242351-464abefa-6977-11e7-95d0-43900f29bef2.png)
|
||||||
|
|
||||||
|
This continues until we are done.
|
||||||
|
|
||||||
|
![EWMA Step N](https://user-images.githubusercontent.com/279875/28242352-464c58f0-6977-11e7-8cd0-e01e4efaac7f.png)
|
||||||
|
|
||||||
|
Notice how each of the values in the series decays by half each time a new value
|
||||||
|
is added, and the top of the bars in the lower portion of the image represents the
|
||||||
|
size of the moving average. It is a smoothed, or low-pass, average of the original
|
||||||
|
series.
|
||||||
|
|
||||||
|
For further reading, see [Exponentially weighted moving average](http://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average) on wikipedia.
|
||||||
|
|
||||||
|
### Choosing Alpha
|
||||||
|
|
||||||
|
Consider a fixed-size sliding-window moving average (not an exponentially weighted moving average)
|
||||||
|
that averages over the previous N samples. What is the average age of each sample? It is N/2.
|
||||||
|
|
||||||
|
Now suppose that you wish to construct a EWMA whose samples have the same average age. The formula
|
||||||
|
to compute the alpha required for this is: alpha = 2/(N+1). Proof is in the book
|
||||||
|
"Production and Operations Analysis" by Steven Nahmias.
|
||||||
|
|
||||||
|
So, for example, if you have a time-series with samples once per second, and you want to get the
|
||||||
|
moving average over the previous minute, you should use an alpha of .032786885. This, by the way,
|
||||||
|
is the constant alpha used for this repository's SimpleEWMA.
|
||||||
|
|
||||||
|
### Implementations
|
||||||
|
|
||||||
|
This repository contains two implementations of the EWMA algorithm, with different properties.
|
||||||
|
|
||||||
|
The implementations all conform to the MovingAverage interface, and the constructor returns
|
||||||
|
that type.
|
||||||
|
|
||||||
|
Current implementations assume an implicit time interval of 1.0 between every sample added.
|
||||||
|
That is, the passage of time is treated as though it's the same as the arrival of samples.
|
||||||
|
If you need time-based decay when samples are not arriving precisely at set intervals, then
|
||||||
|
this package will not support your needs at present.
|
||||||
|
|
||||||
|
#### SimpleEWMA
|
||||||
|
|
||||||
|
A SimpleEWMA is designed for low CPU and memory consumption. It **will** have different behavior than the VariableEWMA
|
||||||
|
for multiple reasons. It has no warm-up period and it uses a constant
|
||||||
|
decay. These properties let it use less memory. It will also behave
|
||||||
|
differently when it's equal to zero, which is assumed to mean
|
||||||
|
uninitialized, so if a value is likely to actually become zero over time,
|
||||||
|
then any non-zero value will cause a sharp jump instead of a small change.
|
||||||
|
|
||||||
|
#### VariableEWMA
|
||||||
|
|
||||||
|
Unlike SimpleEWMA, this supports a custom age which must be stored, and thus uses more memory.
|
||||||
|
It also has a "warmup" time when you start adding values to it. It will report a value of 0.0
|
||||||
|
until you have added the required number of samples to it. It uses some memory to store the
|
||||||
|
number of samples added to it. As a result it uses a little over twice the memory of SimpleEWMA.
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
### API Documentation
|
||||||
|
|
||||||
|
View the GoDoc generated documentation [here](http://godoc.org/github.com/VividCortex/ewma).
|
||||||
|
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
import "github.com/VividCortex/ewma"
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
samples := [100]float64{
|
||||||
|
4599, 5711, 4746, 4621, 5037, 4218, 4925, 4281, 5207, 5203, 5594, 5149,
|
||||||
|
}
|
||||||
|
|
||||||
|
e := ewma.NewMovingAverage() //=> Returns a SimpleEWMA if called without params
|
||||||
|
a := ewma.NewMovingAverage(5) //=> returns a VariableEWMA with a decay of 2 / (5 + 1)
|
||||||
|
|
||||||
|
for _, f := range samples {
|
||||||
|
e.Add(f)
|
||||||
|
a.Add(f)
|
||||||
|
}
|
||||||
|
|
||||||
|
e.Value() //=> 13.577404704631077
|
||||||
|
a.Value() //=> 1.5806140565521463e-12
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Contributing
|
||||||
|
|
||||||
|
We only accept pull requests for minor fixes or improvements. This includes:
|
||||||
|
|
||||||
|
* Small bug fixes
|
||||||
|
* Typos
|
||||||
|
* Documentation or comments
|
||||||
|
|
||||||
|
Please open issues to discuss new features. Pull requests for new features will be rejected,
|
||||||
|
so we recommend forking the repository and making changes in your fork for your use case.
|
||||||
|
|
||||||
|
## License
|
||||||
|
|
||||||
|
This repository is Copyright (c) 2013 VividCortex, Inc. All rights reserved.
|
||||||
|
It is licensed under the MIT license. Please see the LICENSE file for applicable license terms.
|
126
vendor/github.com/VividCortex/ewma/ewma.go
generated
vendored
Normal file
126
vendor/github.com/VividCortex/ewma/ewma.go
generated
vendored
Normal file
|
@ -0,0 +1,126 @@
|
||||||
|
// Package ewma implements exponentially weighted moving averages.
|
||||||
|
package ewma
|
||||||
|
|
||||||
|
// Copyright (c) 2013 VividCortex, Inc. All rights reserved.
|
||||||
|
// Please see the LICENSE file for applicable license terms.
|
||||||
|
|
||||||
|
const (
|
||||||
|
// By default, we average over a one-minute period, which means the average
|
||||||
|
// age of the metrics in the period is 30 seconds.
|
||||||
|
AVG_METRIC_AGE float64 = 30.0
|
||||||
|
|
||||||
|
// The formula for computing the decay factor from the average age comes
|
||||||
|
// from "Production and Operations Analysis" by Steven Nahmias.
|
||||||
|
DECAY float64 = 2 / (float64(AVG_METRIC_AGE) + 1)
|
||||||
|
|
||||||
|
// For best results, the moving average should not be initialized to the
|
||||||
|
// samples it sees immediately. The book "Production and Operations
|
||||||
|
// Analysis" by Steven Nahmias suggests initializing the moving average to
|
||||||
|
// the mean of the first 10 samples. Until the VariableEwma has seen this
|
||||||
|
// many samples, it is not "ready" to be queried for the value of the
|
||||||
|
// moving average. This adds some memory cost.
|
||||||
|
WARMUP_SAMPLES uint8 = 10
|
||||||
|
)
|
||||||
|
|
||||||
|
// MovingAverage is the interface that computes a moving average over a time-
|
||||||
|
// series stream of numbers. The average may be over a window or exponentially
|
||||||
|
// decaying.
|
||||||
|
type MovingAverage interface {
|
||||||
|
Add(float64)
|
||||||
|
Value() float64
|
||||||
|
Set(float64)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMovingAverage constructs a MovingAverage that computes an average with the
|
||||||
|
// desired characteristics in the moving window or exponential decay. If no
|
||||||
|
// age is given, it constructs a default exponentially weighted implementation
|
||||||
|
// that consumes minimal memory. The age is related to the decay factor alpha
|
||||||
|
// by the formula given for the DECAY constant. It signifies the average age
|
||||||
|
// of the samples as time goes to infinity.
|
||||||
|
func NewMovingAverage(age ...float64) MovingAverage {
|
||||||
|
if len(age) == 0 || age[0] == AVG_METRIC_AGE {
|
||||||
|
return new(SimpleEWMA)
|
||||||
|
}
|
||||||
|
return &VariableEWMA{
|
||||||
|
decay: 2 / (age[0] + 1),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// A SimpleEWMA represents the exponentially weighted moving average of a
|
||||||
|
// series of numbers. It WILL have different behavior than the VariableEWMA
|
||||||
|
// for multiple reasons. It has no warm-up period and it uses a constant
|
||||||
|
// decay. These properties let it use less memory. It will also behave
|
||||||
|
// differently when it's equal to zero, which is assumed to mean
|
||||||
|
// uninitialized, so if a value is likely to actually become zero over time,
|
||||||
|
// then any non-zero value will cause a sharp jump instead of a small change.
|
||||||
|
// However, note that this takes a long time, and the value may just
|
||||||
|
// decays to a stable value that's close to zero, but which won't be mistaken
|
||||||
|
// for uninitialized. See http://play.golang.org/p/litxBDr_RC for example.
|
||||||
|
type SimpleEWMA struct {
|
||||||
|
// The current value of the average. After adding with Add(), this is
|
||||||
|
// updated to reflect the average of all values seen thus far.
|
||||||
|
value float64
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add adds a value to the series and updates the moving average.
|
||||||
|
func (e *SimpleEWMA) Add(value float64) {
|
||||||
|
if e.value == 0 { // this is a proxy for "uninitialized"
|
||||||
|
e.value = value
|
||||||
|
} else {
|
||||||
|
e.value = (value * DECAY) + (e.value * (1 - DECAY))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Value returns the current value of the moving average.
|
||||||
|
func (e *SimpleEWMA) Value() float64 {
|
||||||
|
return e.value
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set sets the EWMA's value.
|
||||||
|
func (e *SimpleEWMA) Set(value float64) {
|
||||||
|
e.value = value
|
||||||
|
}
|
||||||
|
|
||||||
|
// VariableEWMA represents the exponentially weighted moving average of a series of
|
||||||
|
// numbers. Unlike SimpleEWMA, it supports a custom age, and thus uses more memory.
|
||||||
|
type VariableEWMA struct {
|
||||||
|
// The multiplier factor by which the previous samples decay.
|
||||||
|
decay float64
|
||||||
|
// The current value of the average.
|
||||||
|
value float64
|
||||||
|
// The number of samples added to this instance.
|
||||||
|
count uint8
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add adds a value to the series and updates the moving average.
|
||||||
|
func (e *VariableEWMA) Add(value float64) {
|
||||||
|
switch {
|
||||||
|
case e.count < WARMUP_SAMPLES:
|
||||||
|
e.count++
|
||||||
|
e.value += value
|
||||||
|
case e.count == WARMUP_SAMPLES:
|
||||||
|
e.count++
|
||||||
|
e.value = e.value / float64(WARMUP_SAMPLES)
|
||||||
|
e.value = (value * e.decay) + (e.value * (1 - e.decay))
|
||||||
|
default:
|
||||||
|
e.value = (value * e.decay) + (e.value * (1 - e.decay))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Value returns the current value of the average, or 0.0 if the series hasn't
|
||||||
|
// warmed up yet.
|
||||||
|
func (e *VariableEWMA) Value() float64 {
|
||||||
|
if e.count <= WARMUP_SAMPLES {
|
||||||
|
return 0.0
|
||||||
|
}
|
||||||
|
|
||||||
|
return e.value
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set sets the EWMA's value.
|
||||||
|
func (e *VariableEWMA) Set(value float64) {
|
||||||
|
e.value = value
|
||||||
|
if e.count <= WARMUP_SAMPLES {
|
||||||
|
e.count = WARMUP_SAMPLES + 1
|
||||||
|
}
|
||||||
|
}
|
20
vendor/github.com/beorn7/perks/LICENSE
generated
vendored
Normal file
20
vendor/github.com/beorn7/perks/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,20 @@
|
||||||
|
Copyright (C) 2013 Blake Mizerany
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining
|
||||||
|
a copy of this software and associated documentation files (the
|
||||||
|
"Software"), to deal in the Software without restriction, including
|
||||||
|
without limitation the rights to use, copy, modify, merge, publish,
|
||||||
|
distribute, sublicense, and/or sell copies of the Software, and to
|
||||||
|
permit persons to whom the Software is furnished to do so, subject to
|
||||||
|
the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be
|
||||||
|
included in all copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||||
|
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||||
|
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||||
|
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||||
|
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||||
|
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||||
|
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
2388
vendor/github.com/beorn7/perks/quantile/exampledata.txt
generated
vendored
Normal file
2388
vendor/github.com/beorn7/perks/quantile/exampledata.txt
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
316
vendor/github.com/beorn7/perks/quantile/stream.go
generated
vendored
Normal file
316
vendor/github.com/beorn7/perks/quantile/stream.go
generated
vendored
Normal file
|
@ -0,0 +1,316 @@
|
||||||
|
// Package quantile computes approximate quantiles over an unbounded data
|
||||||
|
// stream within low memory and CPU bounds.
|
||||||
|
//
|
||||||
|
// A small amount of accuracy is traded to achieve the above properties.
|
||||||
|
//
|
||||||
|
// Multiple streams can be merged before calling Query to generate a single set
|
||||||
|
// of results. This is meaningful when the streams represent the same type of
|
||||||
|
// data. See Merge and Samples.
|
||||||
|
//
|
||||||
|
// For more detailed information about the algorithm used, see:
|
||||||
|
//
|
||||||
|
// Effective Computation of Biased Quantiles over Data Streams
|
||||||
|
//
|
||||||
|
// http://www.cs.rutgers.edu/~muthu/bquant.pdf
|
||||||
|
package quantile
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math"
|
||||||
|
"sort"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Sample holds an observed value and meta information for compression. JSON
|
||||||
|
// tags have been added for convenience.
|
||||||
|
type Sample struct {
|
||||||
|
Value float64 `json:",string"`
|
||||||
|
Width float64 `json:",string"`
|
||||||
|
Delta float64 `json:",string"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Samples represents a slice of samples. It implements sort.Interface.
|
||||||
|
type Samples []Sample
|
||||||
|
|
||||||
|
func (a Samples) Len() int { return len(a) }
|
||||||
|
func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value }
|
||||||
|
func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||||
|
|
||||||
|
type invariant func(s *stream, r float64) float64
|
||||||
|
|
||||||
|
// NewLowBiased returns an initialized Stream for low-biased quantiles
|
||||||
|
// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
|
||||||
|
// error guarantees can still be given even for the lower ranks of the data
|
||||||
|
// distribution.
|
||||||
|
//
|
||||||
|
// The provided epsilon is a relative error, i.e. the true quantile of a value
|
||||||
|
// returned by a query is guaranteed to be within (1±Epsilon)*Quantile.
|
||||||
|
//
|
||||||
|
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
|
||||||
|
// properties.
|
||||||
|
func NewLowBiased(epsilon float64) *Stream {
|
||||||
|
ƒ := func(s *stream, r float64) float64 {
|
||||||
|
return 2 * epsilon * r
|
||||||
|
}
|
||||||
|
return newStream(ƒ)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewHighBiased returns an initialized Stream for high-biased quantiles
|
||||||
|
// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
|
||||||
|
// error guarantees can still be given even for the higher ranks of the data
|
||||||
|
// distribution.
|
||||||
|
//
|
||||||
|
// The provided epsilon is a relative error, i.e. the true quantile of a value
|
||||||
|
// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile).
|
||||||
|
//
|
||||||
|
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
|
||||||
|
// properties.
|
||||||
|
func NewHighBiased(epsilon float64) *Stream {
|
||||||
|
ƒ := func(s *stream, r float64) float64 {
|
||||||
|
return 2 * epsilon * (s.n - r)
|
||||||
|
}
|
||||||
|
return newStream(ƒ)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewTargeted returns an initialized Stream concerned with a particular set of
|
||||||
|
// quantile values that are supplied a priori. Knowing these a priori reduces
|
||||||
|
// space and computation time. The targets map maps the desired quantiles to
|
||||||
|
// their absolute errors, i.e. the true quantile of a value returned by a query
|
||||||
|
// is guaranteed to be within (Quantile±Epsilon).
|
||||||
|
//
|
||||||
|
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties.
|
||||||
|
func NewTargeted(targetMap map[float64]float64) *Stream {
|
||||||
|
// Convert map to slice to avoid slow iterations on a map.
|
||||||
|
// ƒ is called on the hot path, so converting the map to a slice
|
||||||
|
// beforehand results in significant CPU savings.
|
||||||
|
targets := targetMapToSlice(targetMap)
|
||||||
|
|
||||||
|
ƒ := func(s *stream, r float64) float64 {
|
||||||
|
var m = math.MaxFloat64
|
||||||
|
var f float64
|
||||||
|
for _, t := range targets {
|
||||||
|
if t.quantile*s.n <= r {
|
||||||
|
f = (2 * t.epsilon * r) / t.quantile
|
||||||
|
} else {
|
||||||
|
f = (2 * t.epsilon * (s.n - r)) / (1 - t.quantile)
|
||||||
|
}
|
||||||
|
if f < m {
|
||||||
|
m = f
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return m
|
||||||
|
}
|
||||||
|
return newStream(ƒ)
|
||||||
|
}
|
||||||
|
|
||||||
|
type target struct {
|
||||||
|
quantile float64
|
||||||
|
epsilon float64
|
||||||
|
}
|
||||||
|
|
||||||
|
func targetMapToSlice(targetMap map[float64]float64) []target {
|
||||||
|
targets := make([]target, 0, len(targetMap))
|
||||||
|
|
||||||
|
for quantile, epsilon := range targetMap {
|
||||||
|
t := target{
|
||||||
|
quantile: quantile,
|
||||||
|
epsilon: epsilon,
|
||||||
|
}
|
||||||
|
targets = append(targets, t)
|
||||||
|
}
|
||||||
|
|
||||||
|
return targets
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stream computes quantiles for a stream of float64s. It is not thread-safe by
|
||||||
|
// design. Take care when using across multiple goroutines.
|
||||||
|
type Stream struct {
|
||||||
|
*stream
|
||||||
|
b Samples
|
||||||
|
sorted bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func newStream(ƒ invariant) *Stream {
|
||||||
|
x := &stream{ƒ: ƒ}
|
||||||
|
return &Stream{x, make(Samples, 0, 500), true}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Insert inserts v into the stream.
|
||||||
|
func (s *Stream) Insert(v float64) {
|
||||||
|
s.insert(Sample{Value: v, Width: 1})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Stream) insert(sample Sample) {
|
||||||
|
s.b = append(s.b, sample)
|
||||||
|
s.sorted = false
|
||||||
|
if len(s.b) == cap(s.b) {
|
||||||
|
s.flush()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Query returns the computed qth percentiles value. If s was created with
|
||||||
|
// NewTargeted, and q is not in the set of quantiles provided a priori, Query
|
||||||
|
// will return an unspecified result.
|
||||||
|
func (s *Stream) Query(q float64) float64 {
|
||||||
|
if !s.flushed() {
|
||||||
|
// Fast path when there hasn't been enough data for a flush;
|
||||||
|
// this also yields better accuracy for small sets of data.
|
||||||
|
l := len(s.b)
|
||||||
|
if l == 0 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
i := int(math.Ceil(float64(l) * q))
|
||||||
|
if i > 0 {
|
||||||
|
i -= 1
|
||||||
|
}
|
||||||
|
s.maybeSort()
|
||||||
|
return s.b[i].Value
|
||||||
|
}
|
||||||
|
s.flush()
|
||||||
|
return s.stream.query(q)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Merge merges samples into the underlying streams samples. This is handy when
|
||||||
|
// merging multiple streams from separate threads, database shards, etc.
|
||||||
|
//
|
||||||
|
// ATTENTION: This method is broken and does not yield correct results. The
|
||||||
|
// underlying algorithm is not capable of merging streams correctly.
|
||||||
|
func (s *Stream) Merge(samples Samples) {
|
||||||
|
sort.Sort(samples)
|
||||||
|
s.stream.merge(samples)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reset reinitializes and clears the list reusing the samples buffer memory.
|
||||||
|
func (s *Stream) Reset() {
|
||||||
|
s.stream.reset()
|
||||||
|
s.b = s.b[:0]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Samples returns stream samples held by s.
|
||||||
|
func (s *Stream) Samples() Samples {
|
||||||
|
if !s.flushed() {
|
||||||
|
return s.b
|
||||||
|
}
|
||||||
|
s.flush()
|
||||||
|
return s.stream.samples()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Count returns the total number of samples observed in the stream
|
||||||
|
// since initialization.
|
||||||
|
func (s *Stream) Count() int {
|
||||||
|
return len(s.b) + s.stream.count()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Stream) flush() {
|
||||||
|
s.maybeSort()
|
||||||
|
s.stream.merge(s.b)
|
||||||
|
s.b = s.b[:0]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Stream) maybeSort() {
|
||||||
|
if !s.sorted {
|
||||||
|
s.sorted = true
|
||||||
|
sort.Sort(s.b)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Stream) flushed() bool {
|
||||||
|
return len(s.stream.l) > 0
|
||||||
|
}
|
||||||
|
|
||||||
|
type stream struct {
|
||||||
|
n float64
|
||||||
|
l []Sample
|
||||||
|
ƒ invariant
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stream) reset() {
|
||||||
|
s.l = s.l[:0]
|
||||||
|
s.n = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stream) insert(v float64) {
|
||||||
|
s.merge(Samples{{v, 1, 0}})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stream) merge(samples Samples) {
|
||||||
|
// TODO(beorn7): This tries to merge not only individual samples, but
|
||||||
|
// whole summaries. The paper doesn't mention merging summaries at
|
||||||
|
// all. Unittests show that the merging is inaccurate. Find out how to
|
||||||
|
// do merges properly.
|
||||||
|
var r float64
|
||||||
|
i := 0
|
||||||
|
for _, sample := range samples {
|
||||||
|
for ; i < len(s.l); i++ {
|
||||||
|
c := s.l[i]
|
||||||
|
if c.Value > sample.Value {
|
||||||
|
// Insert at position i.
|
||||||
|
s.l = append(s.l, Sample{})
|
||||||
|
copy(s.l[i+1:], s.l[i:])
|
||||||
|
s.l[i] = Sample{
|
||||||
|
sample.Value,
|
||||||
|
sample.Width,
|
||||||
|
math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1),
|
||||||
|
// TODO(beorn7): How to calculate delta correctly?
|
||||||
|
}
|
||||||
|
i++
|
||||||
|
goto inserted
|
||||||
|
}
|
||||||
|
r += c.Width
|
||||||
|
}
|
||||||
|
s.l = append(s.l, Sample{sample.Value, sample.Width, 0})
|
||||||
|
i++
|
||||||
|
inserted:
|
||||||
|
s.n += sample.Width
|
||||||
|
r += sample.Width
|
||||||
|
}
|
||||||
|
s.compress()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stream) count() int {
|
||||||
|
return int(s.n)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stream) query(q float64) float64 {
|
||||||
|
t := math.Ceil(q * s.n)
|
||||||
|
t += math.Ceil(s.ƒ(s, t) / 2)
|
||||||
|
p := s.l[0]
|
||||||
|
var r float64
|
||||||
|
for _, c := range s.l[1:] {
|
||||||
|
r += p.Width
|
||||||
|
if r+c.Width+c.Delta > t {
|
||||||
|
return p.Value
|
||||||
|
}
|
||||||
|
p = c
|
||||||
|
}
|
||||||
|
return p.Value
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stream) compress() {
|
||||||
|
if len(s.l) < 2 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
x := s.l[len(s.l)-1]
|
||||||
|
xi := len(s.l) - 1
|
||||||
|
r := s.n - 1 - x.Width
|
||||||
|
|
||||||
|
for i := len(s.l) - 2; i >= 0; i-- {
|
||||||
|
c := s.l[i]
|
||||||
|
if c.Width+x.Width+x.Delta <= s.ƒ(s, r) {
|
||||||
|
x.Width += c.Width
|
||||||
|
s.l[xi] = x
|
||||||
|
// Remove element at i.
|
||||||
|
copy(s.l[i:], s.l[i+1:])
|
||||||
|
s.l = s.l[:len(s.l)-1]
|
||||||
|
xi -= 1
|
||||||
|
} else {
|
||||||
|
x = c
|
||||||
|
xi = i
|
||||||
|
}
|
||||||
|
r -= c.Width
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stream) samples() Samples {
|
||||||
|
samples := make(Samples, len(s.l))
|
||||||
|
copy(samples, s.l)
|
||||||
|
return samples
|
||||||
|
}
|
12
vendor/github.com/cheggaaa/pb/v3/LICENSE
generated
vendored
Normal file
12
vendor/github.com/cheggaaa/pb/v3/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,12 @@
|
||||||
|
Copyright (c) 2012-2015, Sergey Cherepanov
|
||||||
|
All rights reserved.
|
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
|
||||||
|
|
||||||
|
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
|
||||||
|
|
||||||
|
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
|
||||||
|
|
||||||
|
* Neither the name of the author nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
|
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
290
vendor/github.com/cheggaaa/pb/v3/element.go
generated
vendored
Normal file
290
vendor/github.com/cheggaaa/pb/v3/element.go
generated
vendored
Normal file
|
@ -0,0 +1,290 @@
|
||||||
|
package pb
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
adElPlaceholder = "%_ad_el_%"
|
||||||
|
adElPlaceholderLen = len(adElPlaceholder)
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
defaultBarEls = [5]string{"[", "-", ">", "_", "]"}
|
||||||
|
)
|
||||||
|
|
||||||
|
// Element is an interface for bar elements
|
||||||
|
type Element interface {
|
||||||
|
ProgressElement(state *State, args ...string) string
|
||||||
|
}
|
||||||
|
|
||||||
|
// ElementFunc type implements Element interface and created for simplify elements
|
||||||
|
type ElementFunc func(state *State, args ...string) string
|
||||||
|
|
||||||
|
// ProgressElement just call self func
|
||||||
|
func (e ElementFunc) ProgressElement(state *State, args ...string) string {
|
||||||
|
return e(state, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
var elementsM sync.Mutex
|
||||||
|
|
||||||
|
var elements = map[string]Element{
|
||||||
|
"percent": ElementPercent,
|
||||||
|
"counters": ElementCounters,
|
||||||
|
"bar": adaptiveWrap(ElementBar),
|
||||||
|
"speed": ElementSpeed,
|
||||||
|
"rtime": ElementRemainingTime,
|
||||||
|
"etime": ElementElapsedTime,
|
||||||
|
"string": ElementString,
|
||||||
|
"cycle": ElementCycle,
|
||||||
|
}
|
||||||
|
|
||||||
|
// RegisterElement give you a chance to use custom elements
|
||||||
|
func RegisterElement(name string, el Element, adaptive bool) {
|
||||||
|
if adaptive {
|
||||||
|
el = adaptiveWrap(el)
|
||||||
|
}
|
||||||
|
elementsM.Lock()
|
||||||
|
elements[name] = el
|
||||||
|
elementsM.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
type argsHelper []string
|
||||||
|
|
||||||
|
func (args argsHelper) getOr(n int, value string) string {
|
||||||
|
if len(args) > n {
|
||||||
|
return args[n]
|
||||||
|
}
|
||||||
|
return value
|
||||||
|
}
|
||||||
|
|
||||||
|
func (args argsHelper) getNotEmptyOr(n int, value string) (v string) {
|
||||||
|
if v = args.getOr(n, value); v == "" {
|
||||||
|
return value
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func adaptiveWrap(el Element) Element {
|
||||||
|
return ElementFunc(func(state *State, args ...string) string {
|
||||||
|
state.recalc = append(state.recalc, ElementFunc(func(s *State, _ ...string) (result string) {
|
||||||
|
s.adaptive = true
|
||||||
|
result = el.ProgressElement(s, args...)
|
||||||
|
s.adaptive = false
|
||||||
|
return
|
||||||
|
}))
|
||||||
|
return adElPlaceholder
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// ElementPercent shows current percent of progress.
|
||||||
|
// Optionally can take one or two string arguments.
|
||||||
|
// First string will be used as value for format float64, default is "%.02f%%".
|
||||||
|
// Second string will be used when percent can't be calculated, default is "?%"
|
||||||
|
// In template use as follows: {{percent .}} or {{percent . "%.03f%%"}} or {{percent . "%.03f%%" "?"}}
|
||||||
|
var ElementPercent ElementFunc = func(state *State, args ...string) string {
|
||||||
|
argsh := argsHelper(args)
|
||||||
|
if state.Total() > 0 {
|
||||||
|
return fmt.Sprintf(
|
||||||
|
argsh.getNotEmptyOr(0, "%.02f%%"),
|
||||||
|
float64(state.Value())/(float64(state.Total())/float64(100)),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
return argsh.getOr(1, "?%")
|
||||||
|
}
|
||||||
|
|
||||||
|
// ElementCounters shows current and total values.
|
||||||
|
// Optionally can take one or two string arguments.
|
||||||
|
// First string will be used as format value when Total is present (>0). Default is "%s / %s"
|
||||||
|
// Second string will be used when total <= 0. Default is "%[1]s"
|
||||||
|
// In template use as follows: {{counters .}} or {{counters . "%s/%s"}} or {{counters . "%s/%s" "%s/?"}}
|
||||||
|
var ElementCounters ElementFunc = func(state *State, args ...string) string {
|
||||||
|
var f string
|
||||||
|
if state.Total() > 0 {
|
||||||
|
f = argsHelper(args).getNotEmptyOr(0, "%s / %s")
|
||||||
|
} else {
|
||||||
|
f = argsHelper(args).getNotEmptyOr(1, "%[1]s")
|
||||||
|
}
|
||||||
|
return fmt.Sprintf(f, state.Format(state.Value()), state.Format(state.Total()))
|
||||||
|
}
|
||||||
|
|
||||||
|
type elementKey int
|
||||||
|
|
||||||
|
const (
|
||||||
|
barObj elementKey = iota
|
||||||
|
speedObj
|
||||||
|
cycleObj
|
||||||
|
)
|
||||||
|
|
||||||
|
type bar struct {
|
||||||
|
eb [5][]byte // elements in bytes
|
||||||
|
cc [5]int // cell counts
|
||||||
|
buf *bytes.Buffer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *bar) write(state *State, eln, width int) int {
|
||||||
|
repeat := width / p.cc[eln]
|
||||||
|
for i := 0; i < repeat; i++ {
|
||||||
|
p.buf.Write(p.eb[eln])
|
||||||
|
}
|
||||||
|
StripStringToBuffer(string(p.eb[eln]), width%p.cc[eln], p.buf)
|
||||||
|
return width
|
||||||
|
}
|
||||||
|
|
||||||
|
func getProgressObj(state *State, args ...string) (p *bar) {
|
||||||
|
var ok bool
|
||||||
|
if p, ok = state.Get(barObj).(*bar); !ok {
|
||||||
|
p = &bar{
|
||||||
|
buf: bytes.NewBuffer(nil),
|
||||||
|
}
|
||||||
|
state.Set(barObj, p)
|
||||||
|
}
|
||||||
|
argsH := argsHelper(args)
|
||||||
|
for i := range p.eb {
|
||||||
|
arg := argsH.getNotEmptyOr(i, defaultBarEls[i])
|
||||||
|
if string(p.eb[i]) != arg {
|
||||||
|
p.cc[i] = CellCount(arg)
|
||||||
|
p.eb[i] = []byte(arg)
|
||||||
|
if p.cc[i] == 0 {
|
||||||
|
p.cc[i] = 1
|
||||||
|
p.eb[i] = []byte(" ")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// ElementBar make progress bar view [-->__]
|
||||||
|
// Optionally can take up to 5 string arguments. Defaults is "[", "-", ">", "_", "]"
|
||||||
|
// In template use as follows: {{bar . }} or {{bar . "<" "oOo" "|" "~" ">"}}
|
||||||
|
// Color args: {{bar . (red "[") (green "-") ...
|
||||||
|
var ElementBar ElementFunc = func(state *State, args ...string) string {
|
||||||
|
// init
|
||||||
|
var p = getProgressObj(state, args...)
|
||||||
|
|
||||||
|
total, value := state.Total(), state.Value()
|
||||||
|
if total < 0 {
|
||||||
|
total = -total
|
||||||
|
}
|
||||||
|
if value < 0 {
|
||||||
|
value = -value
|
||||||
|
}
|
||||||
|
|
||||||
|
// check for overflow
|
||||||
|
if total != 0 && value > total {
|
||||||
|
total = value
|
||||||
|
}
|
||||||
|
|
||||||
|
p.buf.Reset()
|
||||||
|
|
||||||
|
var widthLeft = state.AdaptiveElWidth()
|
||||||
|
if widthLeft <= 0 || !state.IsAdaptiveWidth() {
|
||||||
|
widthLeft = 30
|
||||||
|
}
|
||||||
|
|
||||||
|
// write left border
|
||||||
|
if p.cc[0] < widthLeft {
|
||||||
|
widthLeft -= p.write(state, 0, p.cc[0])
|
||||||
|
} else {
|
||||||
|
p.write(state, 0, widthLeft)
|
||||||
|
return p.buf.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// check right border size
|
||||||
|
if p.cc[4] < widthLeft {
|
||||||
|
// write later
|
||||||
|
widthLeft -= p.cc[4]
|
||||||
|
} else {
|
||||||
|
p.write(state, 4, widthLeft)
|
||||||
|
return p.buf.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
var curCount int
|
||||||
|
|
||||||
|
if total > 0 {
|
||||||
|
// calculate count of currenct space
|
||||||
|
curCount = int(math.Ceil((float64(value) / float64(total)) * float64(widthLeft)))
|
||||||
|
}
|
||||||
|
|
||||||
|
// write bar
|
||||||
|
if total == value && state.IsFinished() {
|
||||||
|
widthLeft -= p.write(state, 1, curCount)
|
||||||
|
} else if toWrite := curCount - p.cc[2]; toWrite > 0 {
|
||||||
|
widthLeft -= p.write(state, 1, toWrite)
|
||||||
|
widthLeft -= p.write(state, 2, p.cc[2])
|
||||||
|
} else if curCount > 0 {
|
||||||
|
widthLeft -= p.write(state, 2, curCount)
|
||||||
|
}
|
||||||
|
if widthLeft > 0 {
|
||||||
|
widthLeft -= p.write(state, 3, widthLeft)
|
||||||
|
}
|
||||||
|
// write right border
|
||||||
|
p.write(state, 4, p.cc[4])
|
||||||
|
// cut result and return string
|
||||||
|
return p.buf.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ElementRemainingTime calculates remaining time based on speed (EWMA)
|
||||||
|
// Optionally can take one or two string arguments.
|
||||||
|
// First string will be used as value for format time duration string, default is "%s".
|
||||||
|
// Second string will be used when bar finished and value indicates elapsed time, default is "%s"
|
||||||
|
// Third string will be used when value not available, default is "?"
|
||||||
|
// In template use as follows: {{rtime .}} or {{rtime . "%s remain"}} or {{rtime . "%s remain" "%s total" "???"}}
|
||||||
|
var ElementRemainingTime ElementFunc = func(state *State, args ...string) string {
|
||||||
|
var rts string
|
||||||
|
sp := getSpeedObj(state).value(state)
|
||||||
|
if !state.IsFinished() {
|
||||||
|
if sp > 0 {
|
||||||
|
remain := float64(state.Total() - state.Value())
|
||||||
|
remainDur := time.Duration(remain/sp) * time.Second
|
||||||
|
rts = remainDur.String()
|
||||||
|
} else {
|
||||||
|
return argsHelper(args).getOr(2, "?")
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
rts = state.Time().Truncate(time.Second).Sub(state.StartTime().Truncate(time.Second)).String()
|
||||||
|
return fmt.Sprintf(argsHelper(args).getOr(1, "%s"), rts)
|
||||||
|
}
|
||||||
|
return fmt.Sprintf(argsHelper(args).getOr(0, "%s"), rts)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ElementElapsedTime shows elapsed time
|
||||||
|
// Optionally cat take one argument - it's format for time string.
|
||||||
|
// In template use as follows: {{etime .}} or {{etime . "%s elapsed"}}
|
||||||
|
var ElementElapsedTime ElementFunc = func(state *State, args ...string) string {
|
||||||
|
etm := state.Time().Truncate(time.Second).Sub(state.StartTime().Truncate(time.Second))
|
||||||
|
return fmt.Sprintf(argsHelper(args).getOr(0, "%s"), etm.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
// ElementString get value from bar by given key and print them
|
||||||
|
// bar.Set("myKey", "string to print")
|
||||||
|
// In template use as follows: {{string . "myKey"}}
|
||||||
|
var ElementString ElementFunc = func(state *State, args ...string) string {
|
||||||
|
if len(args) == 0 {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
v := state.Get(args[0])
|
||||||
|
if v == nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return fmt.Sprint(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ElementCycle return next argument for every call
|
||||||
|
// In template use as follows: {{cycle . "1" "2" "3"}}
|
||||||
|
// Or mix width other elements: {{ bar . "" "" (cycle . "↖" "↗" "↘" "↙" )}}
|
||||||
|
var ElementCycle ElementFunc = func(state *State, args ...string) string {
|
||||||
|
if len(args) == 0 {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
n, _ := state.Get(cycleObj).(int)
|
||||||
|
if n >= len(args) {
|
||||||
|
n = 0
|
||||||
|
}
|
||||||
|
state.Set(cycleObj, n+1)
|
||||||
|
return args[n]
|
||||||
|
}
|
11
vendor/github.com/cheggaaa/pb/v3/go.mod
generated
vendored
Normal file
11
vendor/github.com/cheggaaa/pb/v3/go.mod
generated
vendored
Normal file
|
@ -0,0 +1,11 @@
|
||||||
|
module github.com/cheggaaa/pb/v3
|
||||||
|
|
||||||
|
require (
|
||||||
|
github.com/VividCortex/ewma v1.1.1
|
||||||
|
github.com/fatih/color v1.7.0
|
||||||
|
github.com/mattn/go-colorable v0.1.2
|
||||||
|
github.com/mattn/go-isatty v0.0.12
|
||||||
|
github.com/mattn/go-runewidth v0.0.7
|
||||||
|
)
|
||||||
|
|
||||||
|
go 1.12
|
21
vendor/github.com/cheggaaa/pb/v3/go.sum
generated
vendored
Normal file
21
vendor/github.com/cheggaaa/pb/v3/go.sum
generated
vendored
Normal file
|
@ -0,0 +1,21 @@
|
||||||
|
github.com/VividCortex/ewma v1.1.1 h1:MnEK4VOv6n0RSY4vtRe3h11qjxL3+t0B8yOL8iMXdcM=
|
||||||
|
github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmxzcbUokwA=
|
||||||
|
github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys=
|
||||||
|
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||||
|
github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU=
|
||||||
|
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||||
|
github.com/mattn/go-isatty v0.0.8 h1:HLtExJ+uU2HOZ+wI0Tt5DtUDrx8yhUqDcp7fYERX4CE=
|
||||||
|
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||||
|
github.com/mattn/go-isatty v0.0.10 h1:qxFzApOv4WsAL965uUPIsXzAKCZxN2p9UqdhFS4ZW10=
|
||||||
|
github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84=
|
||||||
|
github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY=
|
||||||
|
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
||||||
|
github.com/mattn/go-runewidth v0.0.7 h1:Ei8KR0497xHyKJPAv59M1dkC+rOZCMBJ+t3fZ+twI54=
|
||||||
|
github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
|
||||||
|
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223 h1:DH4skfRX4EBpamg7iV4ZlCpblAHI6s6TDM39bFZumv8=
|
||||||
|
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20191128015809-6d18c012aee9 h1:ZBzSG/7F4eNKz2L3GE9o300RX0Az1Bw5HF7PDraD+qU=
|
||||||
|
golang.org/x/sys v0.0.0-20191128015809-6d18c012aee9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200116001909-b77594299b42 h1:vEOn+mP2zCOVzKckCZy6YsCtDblrpj/w7B9nxGNELpg=
|
||||||
|
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
49
vendor/github.com/cheggaaa/pb/v3/io.go
generated
vendored
Normal file
49
vendor/github.com/cheggaaa/pb/v3/io.go
generated
vendored
Normal file
|
@ -0,0 +1,49 @@
|
||||||
|
package pb
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Reader it's a wrapper for given reader, but with progress handle
|
||||||
|
type Reader struct {
|
||||||
|
io.Reader
|
||||||
|
bar *ProgressBar
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read reads bytes from wrapped reader and add amount of bytes to progress bar
|
||||||
|
func (r *Reader) Read(p []byte) (n int, err error) {
|
||||||
|
n, err = r.Reader.Read(p)
|
||||||
|
r.bar.Add(n)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close the wrapped reader when it implements io.Closer
|
||||||
|
func (r *Reader) Close() (err error) {
|
||||||
|
r.bar.Finish()
|
||||||
|
if closer, ok := r.Reader.(io.Closer); ok {
|
||||||
|
return closer.Close()
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Writer it's a wrapper for given writer, but with progress handle
|
||||||
|
type Writer struct {
|
||||||
|
io.Writer
|
||||||
|
bar *ProgressBar
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write writes bytes to wrapped writer and add amount of bytes to progress bar
|
||||||
|
func (r *Writer) Write(p []byte) (n int, err error) {
|
||||||
|
n, err = r.Writer.Write(p)
|
||||||
|
r.bar.Add(n)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close the wrapped reader when it implements io.Closer
|
||||||
|
func (r *Writer) Close() (err error) {
|
||||||
|
r.bar.Finish()
|
||||||
|
if closer, ok := r.Writer.(io.Closer); ok {
|
||||||
|
return closer.Close()
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
566
vendor/github.com/cheggaaa/pb/v3/pb.go
generated
vendored
Normal file
566
vendor/github.com/cheggaaa/pb/v3/pb.go
generated
vendored
Normal file
|
@ -0,0 +1,566 @@
|
||||||
|
package pb
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
"text/template"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/mattn/go-colorable"
|
||||||
|
"github.com/mattn/go-isatty"
|
||||||
|
|
||||||
|
"github.com/cheggaaa/pb/v3/termutil"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Version of ProgressBar library
|
||||||
|
const Version = "3.0.5"
|
||||||
|
|
||||||
|
type key int
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Bytes means we're working with byte sizes. Numbers will print as Kb, Mb, etc
|
||||||
|
// bar.Set(pb.Bytes, true)
|
||||||
|
Bytes key = 1 << iota
|
||||||
|
|
||||||
|
// Use SI bytes prefix names (kB, MB, etc) instead of IEC prefix names (KiB, MiB, etc)
|
||||||
|
SIBytesPrefix
|
||||||
|
|
||||||
|
// Terminal means we're will print to terminal and can use ascii sequences
|
||||||
|
// Also we're will try to use terminal width
|
||||||
|
Terminal
|
||||||
|
|
||||||
|
// Static means progress bar will not update automaticly
|
||||||
|
Static
|
||||||
|
|
||||||
|
// ReturnSymbol - by default in terminal mode it's '\r'
|
||||||
|
ReturnSymbol
|
||||||
|
|
||||||
|
// Color by default is true when output is tty, but you can set to false for disabling colors
|
||||||
|
Color
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
defaultBarWidth = 100
|
||||||
|
defaultRefreshRate = time.Millisecond * 200
|
||||||
|
)
|
||||||
|
|
||||||
|
// New creates new ProgressBar object
|
||||||
|
func New(total int) *ProgressBar {
|
||||||
|
return New64(int64(total))
|
||||||
|
}
|
||||||
|
|
||||||
|
// New64 creates new ProgressBar object using int64 as total
|
||||||
|
func New64(total int64) *ProgressBar {
|
||||||
|
pb := new(ProgressBar)
|
||||||
|
return pb.SetTotal(total)
|
||||||
|
}
|
||||||
|
|
||||||
|
// StartNew starts new ProgressBar with Default template
|
||||||
|
func StartNew(total int) *ProgressBar {
|
||||||
|
return New(total).Start()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start64 starts new ProgressBar with Default template. Using int64 as total.
|
||||||
|
func Start64(total int64) *ProgressBar {
|
||||||
|
return New64(total).Start()
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
terminalWidth = termutil.TerminalWidth
|
||||||
|
isTerminal = isatty.IsTerminal
|
||||||
|
isCygwinTerminal = isatty.IsCygwinTerminal
|
||||||
|
)
|
||||||
|
|
||||||
|
// ProgressBar is the main object of bar
|
||||||
|
type ProgressBar struct {
|
||||||
|
current, total int64
|
||||||
|
width int
|
||||||
|
maxWidth int
|
||||||
|
mu sync.RWMutex
|
||||||
|
rm sync.Mutex
|
||||||
|
vars map[interface{}]interface{}
|
||||||
|
elements map[string]Element
|
||||||
|
output io.Writer
|
||||||
|
coutput io.Writer
|
||||||
|
nocoutput io.Writer
|
||||||
|
startTime time.Time
|
||||||
|
refreshRate time.Duration
|
||||||
|
tmpl *template.Template
|
||||||
|
state *State
|
||||||
|
buf *bytes.Buffer
|
||||||
|
ticker *time.Ticker
|
||||||
|
finish chan struct{}
|
||||||
|
finished bool
|
||||||
|
configured bool
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pb *ProgressBar) configure() {
|
||||||
|
if pb.configured {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
pb.configured = true
|
||||||
|
|
||||||
|
if pb.vars == nil {
|
||||||
|
pb.vars = make(map[interface{}]interface{})
|
||||||
|
}
|
||||||
|
if pb.output == nil {
|
||||||
|
pb.output = os.Stderr
|
||||||
|
}
|
||||||
|
|
||||||
|
if pb.tmpl == nil {
|
||||||
|
pb.tmpl, pb.err = getTemplate(string(Default))
|
||||||
|
if pb.err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if pb.vars[Terminal] == nil {
|
||||||
|
if f, ok := pb.output.(*os.File); ok {
|
||||||
|
if isTerminal(f.Fd()) || isCygwinTerminal(f.Fd()) {
|
||||||
|
pb.vars[Terminal] = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if pb.vars[ReturnSymbol] == nil {
|
||||||
|
if tm, ok := pb.vars[Terminal].(bool); ok && tm {
|
||||||
|
pb.vars[ReturnSymbol] = "\r"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if pb.vars[Color] == nil {
|
||||||
|
if tm, ok := pb.vars[Terminal].(bool); ok && tm {
|
||||||
|
pb.vars[Color] = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if pb.refreshRate == 0 {
|
||||||
|
pb.refreshRate = defaultRefreshRate
|
||||||
|
}
|
||||||
|
if f, ok := pb.output.(*os.File); ok {
|
||||||
|
pb.coutput = colorable.NewColorable(f)
|
||||||
|
} else {
|
||||||
|
pb.coutput = pb.output
|
||||||
|
}
|
||||||
|
pb.nocoutput = colorable.NewNonColorable(pb.output)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start starts the bar
|
||||||
|
func (pb *ProgressBar) Start() *ProgressBar {
|
||||||
|
pb.mu.Lock()
|
||||||
|
defer pb.mu.Unlock()
|
||||||
|
if pb.finish != nil {
|
||||||
|
return pb
|
||||||
|
}
|
||||||
|
pb.configure()
|
||||||
|
pb.finished = false
|
||||||
|
pb.state = nil
|
||||||
|
pb.startTime = time.Now()
|
||||||
|
if st, ok := pb.vars[Static].(bool); ok && st {
|
||||||
|
return pb
|
||||||
|
}
|
||||||
|
pb.finish = make(chan struct{})
|
||||||
|
pb.ticker = time.NewTicker(pb.refreshRate)
|
||||||
|
go pb.writer(pb.finish)
|
||||||
|
return pb
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pb *ProgressBar) writer(finish chan struct{}) {
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-pb.ticker.C:
|
||||||
|
pb.write(false)
|
||||||
|
case <-finish:
|
||||||
|
pb.ticker.Stop()
|
||||||
|
pb.write(true)
|
||||||
|
finish <- struct{}{}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write performs write to the output
|
||||||
|
func (pb *ProgressBar) Write() *ProgressBar {
|
||||||
|
pb.mu.RLock()
|
||||||
|
finished := pb.finished
|
||||||
|
pb.mu.RUnlock()
|
||||||
|
pb.write(finished)
|
||||||
|
return pb
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pb *ProgressBar) write(finish bool) {
|
||||||
|
result, width := pb.render()
|
||||||
|
if pb.Err() != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if pb.GetBool(Terminal) {
|
||||||
|
if r := (width - CellCount(result)); r > 0 {
|
||||||
|
result += strings.Repeat(" ", r)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if ret, ok := pb.Get(ReturnSymbol).(string); ok {
|
||||||
|
result = ret + result
|
||||||
|
if finish && ret == "\r" {
|
||||||
|
result += "\n"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if pb.GetBool(Color) {
|
||||||
|
pb.coutput.Write([]byte(result))
|
||||||
|
} else {
|
||||||
|
pb.nocoutput.Write([]byte(result))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Total return current total bar value
|
||||||
|
func (pb *ProgressBar) Total() int64 {
|
||||||
|
return atomic.LoadInt64(&pb.total)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetTotal sets the total bar value
|
||||||
|
func (pb *ProgressBar) SetTotal(value int64) *ProgressBar {
|
||||||
|
atomic.StoreInt64(&pb.total, value)
|
||||||
|
return pb
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetCurrent sets the current bar value
|
||||||
|
func (pb *ProgressBar) SetCurrent(value int64) *ProgressBar {
|
||||||
|
atomic.StoreInt64(&pb.current, value)
|
||||||
|
return pb
|
||||||
|
}
|
||||||
|
|
||||||
|
// Current return current bar value
|
||||||
|
func (pb *ProgressBar) Current() int64 {
|
||||||
|
return atomic.LoadInt64(&pb.current)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add adding given int64 value to bar value
|
||||||
|
func (pb *ProgressBar) Add64(value int64) *ProgressBar {
|
||||||
|
atomic.AddInt64(&pb.current, value)
|
||||||
|
return pb
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add adding given int value to bar value
|
||||||
|
func (pb *ProgressBar) Add(value int) *ProgressBar {
|
||||||
|
return pb.Add64(int64(value))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Increment atomically increments the progress
|
||||||
|
func (pb *ProgressBar) Increment() *ProgressBar {
|
||||||
|
return pb.Add64(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set sets any value by any key
|
||||||
|
func (pb *ProgressBar) Set(key, value interface{}) *ProgressBar {
|
||||||
|
pb.mu.Lock()
|
||||||
|
defer pb.mu.Unlock()
|
||||||
|
if pb.vars == nil {
|
||||||
|
pb.vars = make(map[interface{}]interface{})
|
||||||
|
}
|
||||||
|
pb.vars[key] = value
|
||||||
|
return pb
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get return value by key
|
||||||
|
func (pb *ProgressBar) Get(key interface{}) interface{} {
|
||||||
|
pb.mu.RLock()
|
||||||
|
defer pb.mu.RUnlock()
|
||||||
|
if pb.vars == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return pb.vars[key]
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetBool return value by key and try to convert there to boolean
|
||||||
|
// If value doesn't set or not boolean - return false
|
||||||
|
func (pb *ProgressBar) GetBool(key interface{}) bool {
|
||||||
|
if v, ok := pb.Get(key).(bool); ok {
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetWidth sets the bar width
|
||||||
|
// When given value <= 0 would be using the terminal width (if possible) or default value.
|
||||||
|
func (pb *ProgressBar) SetWidth(width int) *ProgressBar {
|
||||||
|
pb.mu.Lock()
|
||||||
|
pb.width = width
|
||||||
|
pb.mu.Unlock()
|
||||||
|
return pb
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetMaxWidth sets the bar maximum width
|
||||||
|
// When given value <= 0 would be using the terminal width (if possible) or default value.
|
||||||
|
func (pb *ProgressBar) SetMaxWidth(maxWidth int) *ProgressBar {
|
||||||
|
pb.mu.Lock()
|
||||||
|
pb.maxWidth = maxWidth
|
||||||
|
pb.mu.Unlock()
|
||||||
|
return pb
|
||||||
|
}
|
||||||
|
|
||||||
|
// Width return the bar width
|
||||||
|
// It's current terminal width or settled over 'SetWidth' value.
|
||||||
|
func (pb *ProgressBar) Width() (width int) {
|
||||||
|
defer func() {
|
||||||
|
if r := recover(); r != nil {
|
||||||
|
width = defaultBarWidth
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
pb.mu.RLock()
|
||||||
|
width = pb.width
|
||||||
|
maxWidth := pb.maxWidth
|
||||||
|
pb.mu.RUnlock()
|
||||||
|
if width <= 0 {
|
||||||
|
var err error
|
||||||
|
if width, err = terminalWidth(); err != nil {
|
||||||
|
return defaultBarWidth
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if maxWidth > 0 && width > maxWidth {
|
||||||
|
width = maxWidth
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pb *ProgressBar) SetRefreshRate(dur time.Duration) *ProgressBar {
|
||||||
|
pb.mu.Lock()
|
||||||
|
if dur > 0 {
|
||||||
|
pb.refreshRate = dur
|
||||||
|
}
|
||||||
|
pb.mu.Unlock()
|
||||||
|
return pb
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetWriter sets the io.Writer. Bar will write in this writer
|
||||||
|
// By default this is os.Stderr
|
||||||
|
func (pb *ProgressBar) SetWriter(w io.Writer) *ProgressBar {
|
||||||
|
pb.mu.Lock()
|
||||||
|
pb.output = w
|
||||||
|
pb.configured = false
|
||||||
|
pb.configure()
|
||||||
|
pb.mu.Unlock()
|
||||||
|
return pb
|
||||||
|
}
|
||||||
|
|
||||||
|
// StartTime return the time when bar started
|
||||||
|
func (pb *ProgressBar) StartTime() time.Time {
|
||||||
|
pb.mu.RLock()
|
||||||
|
defer pb.mu.RUnlock()
|
||||||
|
return pb.startTime
|
||||||
|
}
|
||||||
|
|
||||||
|
// Format convert int64 to string according to the current settings
|
||||||
|
func (pb *ProgressBar) Format(v int64) string {
|
||||||
|
if pb.GetBool(Bytes) {
|
||||||
|
return formatBytes(v, pb.GetBool(SIBytesPrefix))
|
||||||
|
}
|
||||||
|
return strconv.FormatInt(v, 10)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Finish stops the bar
|
||||||
|
func (pb *ProgressBar) Finish() *ProgressBar {
|
||||||
|
pb.mu.Lock()
|
||||||
|
if pb.finished {
|
||||||
|
pb.mu.Unlock()
|
||||||
|
return pb
|
||||||
|
}
|
||||||
|
finishChan := pb.finish
|
||||||
|
pb.finished = true
|
||||||
|
pb.mu.Unlock()
|
||||||
|
if finishChan != nil {
|
||||||
|
finishChan <- struct{}{}
|
||||||
|
<-finishChan
|
||||||
|
pb.mu.Lock()
|
||||||
|
pb.finish = nil
|
||||||
|
pb.mu.Unlock()
|
||||||
|
}
|
||||||
|
return pb
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsStarted indicates progress bar state
|
||||||
|
func (pb *ProgressBar) IsStarted() bool {
|
||||||
|
pb.mu.RLock()
|
||||||
|
defer pb.mu.RUnlock()
|
||||||
|
return pb.finish != nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetTemplateString sets ProgressBar tempate string and parse it
|
||||||
|
func (pb *ProgressBar) SetTemplateString(tmpl string) *ProgressBar {
|
||||||
|
pb.mu.Lock()
|
||||||
|
defer pb.mu.Unlock()
|
||||||
|
pb.tmpl, pb.err = getTemplate(tmpl)
|
||||||
|
return pb
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetTemplateString sets ProgressBarTempate and parse it
|
||||||
|
func (pb *ProgressBar) SetTemplate(tmpl ProgressBarTemplate) *ProgressBar {
|
||||||
|
return pb.SetTemplateString(string(tmpl))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewProxyReader creates a wrapper for given reader, but with progress handle
|
||||||
|
// Takes io.Reader or io.ReadCloser
|
||||||
|
// Also, it automatically switches progress bar to handle units as bytes
|
||||||
|
func (pb *ProgressBar) NewProxyReader(r io.Reader) *Reader {
|
||||||
|
pb.Set(Bytes, true)
|
||||||
|
return &Reader{r, pb}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewProxyWriter creates a wrapper for given writer, but with progress handle
|
||||||
|
// Takes io.Writer or io.WriteCloser
|
||||||
|
// Also, it automatically switches progress bar to handle units as bytes
|
||||||
|
func (pb *ProgressBar) NewProxyWriter(r io.Writer) *Writer {
|
||||||
|
pb.Set(Bytes, true)
|
||||||
|
return &Writer{r, pb}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pb *ProgressBar) render() (result string, width int) {
|
||||||
|
defer func() {
|
||||||
|
if r := recover(); r != nil {
|
||||||
|
pb.SetErr(fmt.Errorf("render panic: %v", r))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
pb.rm.Lock()
|
||||||
|
defer pb.rm.Unlock()
|
||||||
|
pb.mu.Lock()
|
||||||
|
pb.configure()
|
||||||
|
if pb.state == nil {
|
||||||
|
pb.state = &State{ProgressBar: pb}
|
||||||
|
pb.buf = bytes.NewBuffer(nil)
|
||||||
|
}
|
||||||
|
if pb.startTime.IsZero() {
|
||||||
|
pb.startTime = time.Now()
|
||||||
|
}
|
||||||
|
pb.state.id++
|
||||||
|
pb.state.finished = pb.finished
|
||||||
|
pb.state.time = time.Now()
|
||||||
|
pb.mu.Unlock()
|
||||||
|
|
||||||
|
pb.state.width = pb.Width()
|
||||||
|
width = pb.state.width
|
||||||
|
pb.state.total = pb.Total()
|
||||||
|
pb.state.current = pb.Current()
|
||||||
|
pb.buf.Reset()
|
||||||
|
|
||||||
|
if e := pb.tmpl.Execute(pb.buf, pb.state); e != nil {
|
||||||
|
pb.SetErr(e)
|
||||||
|
return "", 0
|
||||||
|
}
|
||||||
|
|
||||||
|
result = pb.buf.String()
|
||||||
|
|
||||||
|
aec := len(pb.state.recalc)
|
||||||
|
if aec == 0 {
|
||||||
|
// no adaptive elements
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
staticWidth := CellCount(result) - (aec * adElPlaceholderLen)
|
||||||
|
|
||||||
|
if pb.state.Width()-staticWidth <= 0 {
|
||||||
|
result = strings.Replace(result, adElPlaceholder, "", -1)
|
||||||
|
result = StripString(result, pb.state.Width())
|
||||||
|
} else {
|
||||||
|
pb.state.adaptiveElWidth = (width - staticWidth) / aec
|
||||||
|
for _, el := range pb.state.recalc {
|
||||||
|
result = strings.Replace(result, adElPlaceholder, el.ProgressElement(pb.state), 1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pb.state.recalc = pb.state.recalc[:0]
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetErr sets error to the ProgressBar
|
||||||
|
// Error will be available over Err()
|
||||||
|
func (pb *ProgressBar) SetErr(err error) *ProgressBar {
|
||||||
|
pb.mu.Lock()
|
||||||
|
pb.err = err
|
||||||
|
pb.mu.Unlock()
|
||||||
|
return pb
|
||||||
|
}
|
||||||
|
|
||||||
|
// Err return possible error
|
||||||
|
// When all ok - will be nil
|
||||||
|
// May contain template.Execute errors
|
||||||
|
func (pb *ProgressBar) Err() error {
|
||||||
|
pb.mu.RLock()
|
||||||
|
defer pb.mu.RUnlock()
|
||||||
|
return pb.err
|
||||||
|
}
|
||||||
|
|
||||||
|
// String return currrent string representation of ProgressBar
|
||||||
|
func (pb *ProgressBar) String() string {
|
||||||
|
res, _ := pb.render()
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProgressElement implements Element interface
|
||||||
|
func (pb *ProgressBar) ProgressElement(s *State, args ...string) string {
|
||||||
|
if s.IsAdaptiveWidth() {
|
||||||
|
pb.SetWidth(s.AdaptiveElWidth())
|
||||||
|
}
|
||||||
|
return pb.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// State represents the current state of bar
|
||||||
|
// Need for bar elements
|
||||||
|
type State struct {
|
||||||
|
*ProgressBar
|
||||||
|
|
||||||
|
id uint64
|
||||||
|
total, current int64
|
||||||
|
width, adaptiveElWidth int
|
||||||
|
finished, adaptive bool
|
||||||
|
time time.Time
|
||||||
|
|
||||||
|
recalc []Element
|
||||||
|
}
|
||||||
|
|
||||||
|
// Id it's the current state identifier
|
||||||
|
// - incremental
|
||||||
|
// - starts with 1
|
||||||
|
// - resets after finish/start
|
||||||
|
func (s *State) Id() uint64 {
|
||||||
|
return s.id
|
||||||
|
}
|
||||||
|
|
||||||
|
// Total it's bar int64 total
|
||||||
|
func (s *State) Total() int64 {
|
||||||
|
return s.total
|
||||||
|
}
|
||||||
|
|
||||||
|
// Value it's current value
|
||||||
|
func (s *State) Value() int64 {
|
||||||
|
return s.current
|
||||||
|
}
|
||||||
|
|
||||||
|
// Width of bar
|
||||||
|
func (s *State) Width() int {
|
||||||
|
return s.width
|
||||||
|
}
|
||||||
|
|
||||||
|
// AdaptiveElWidth - adaptive elements must return string with given cell count (when AdaptiveElWidth > 0)
|
||||||
|
func (s *State) AdaptiveElWidth() int {
|
||||||
|
return s.adaptiveElWidth
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsAdaptiveWidth returns true when element must be shown as adaptive
|
||||||
|
func (s *State) IsAdaptiveWidth() bool {
|
||||||
|
return s.adaptive
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsFinished return true when bar is finished
|
||||||
|
func (s *State) IsFinished() bool {
|
||||||
|
return s.finished
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsFirst return true only in first render
|
||||||
|
func (s *State) IsFirst() bool {
|
||||||
|
return s.id == 1
|
||||||
|
}
|
||||||
|
|
||||||
|
// Time when state was created
|
||||||
|
func (s *State) Time() time.Time {
|
||||||
|
return s.time
|
||||||
|
}
|
15
vendor/github.com/cheggaaa/pb/v3/preset.go
generated
vendored
Normal file
15
vendor/github.com/cheggaaa/pb/v3/preset.go
generated
vendored
Normal file
|
@ -0,0 +1,15 @@
|
||||||
|
package pb
|
||||||
|
|
||||||
|
var (
|
||||||
|
// Full - preset with all default available elements
|
||||||
|
// Example: 'Prefix 20/100 [-->______] 20% 1 p/s ETA 1m Suffix'
|
||||||
|
Full ProgressBarTemplate = `{{string . "prefix"}}{{counters . }} {{bar . }} {{percent . }} {{speed . }} {{rtime . "ETA %s"}}{{string . "suffix"}}`
|
||||||
|
|
||||||
|
// Default - preset like Full but without elapsed time
|
||||||
|
// Example: 'Prefix 20/100 [-->______] 20% 1 p/s ETA 1m Suffix'
|
||||||
|
Default ProgressBarTemplate = `{{string . "prefix"}}{{counters . }} {{bar . }} {{percent . }} {{speed . }}{{string . "suffix"}}`
|
||||||
|
|
||||||
|
// Simple - preset without speed and any timers. Only counters, bar and percents
|
||||||
|
// Example: 'Prefix 20/100 [-->______] 20% Suffix'
|
||||||
|
Simple ProgressBarTemplate = `{{string . "prefix"}}{{counters . }} {{bar . }} {{percent . }}{{string . "suffix"}}`
|
||||||
|
)
|
83
vendor/github.com/cheggaaa/pb/v3/speed.go
generated
vendored
Normal file
83
vendor/github.com/cheggaaa/pb/v3/speed.go
generated
vendored
Normal file
|
@ -0,0 +1,83 @@
|
||||||
|
package pb
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/VividCortex/ewma"
|
||||||
|
)
|
||||||
|
|
||||||
|
var speedAddLimit = time.Second / 2
|
||||||
|
|
||||||
|
type speed struct {
|
||||||
|
ewma ewma.MovingAverage
|
||||||
|
lastStateId uint64
|
||||||
|
prevValue, startValue int64
|
||||||
|
prevTime, startTime time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *speed) value(state *State) float64 {
|
||||||
|
if s.ewma == nil {
|
||||||
|
s.ewma = ewma.NewMovingAverage()
|
||||||
|
}
|
||||||
|
if state.IsFirst() || state.Id() < s.lastStateId {
|
||||||
|
s.reset(state)
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
if state.Id() == s.lastStateId {
|
||||||
|
return s.ewma.Value()
|
||||||
|
}
|
||||||
|
if state.IsFinished() {
|
||||||
|
return s.absValue(state)
|
||||||
|
}
|
||||||
|
dur := state.Time().Sub(s.prevTime)
|
||||||
|
if dur < speedAddLimit {
|
||||||
|
return s.ewma.Value()
|
||||||
|
}
|
||||||
|
diff := math.Abs(float64(state.Value() - s.prevValue))
|
||||||
|
lastSpeed := diff / dur.Seconds()
|
||||||
|
s.prevTime = state.Time()
|
||||||
|
s.prevValue = state.Value()
|
||||||
|
s.lastStateId = state.Id()
|
||||||
|
s.ewma.Add(lastSpeed)
|
||||||
|
return s.ewma.Value()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *speed) reset(state *State) {
|
||||||
|
s.lastStateId = state.Id()
|
||||||
|
s.startTime = state.Time()
|
||||||
|
s.prevTime = state.Time()
|
||||||
|
s.startValue = state.Value()
|
||||||
|
s.prevValue = state.Value()
|
||||||
|
s.ewma = ewma.NewMovingAverage()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *speed) absValue(state *State) float64 {
|
||||||
|
if dur := state.Time().Sub(s.startTime); dur > 0 {
|
||||||
|
return float64(state.Value()) / dur.Seconds()
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func getSpeedObj(state *State) (s *speed) {
|
||||||
|
if sObj, ok := state.Get(speedObj).(*speed); ok {
|
||||||
|
return sObj
|
||||||
|
}
|
||||||
|
s = new(speed)
|
||||||
|
state.Set(speedObj, s)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// ElementSpeed calculates current speed by EWMA
|
||||||
|
// Optionally can take one or two string arguments.
|
||||||
|
// First string will be used as value for format speed, default is "%s p/s".
|
||||||
|
// Second string will be used when speed not available, default is "? p/s"
|
||||||
|
// In template use as follows: {{speed .}} or {{speed . "%s per second"}} or {{speed . "%s ps" "..."}
|
||||||
|
var ElementSpeed ElementFunc = func(state *State, args ...string) string {
|
||||||
|
sp := getSpeedObj(state).value(state)
|
||||||
|
if sp == 0 {
|
||||||
|
return argsHelper(args).getNotEmptyOr(1, "? p/s")
|
||||||
|
}
|
||||||
|
return fmt.Sprintf(argsHelper(args).getNotEmptyOr(0, "%s p/s"), state.Format(int64(round(sp))))
|
||||||
|
}
|
88
vendor/github.com/cheggaaa/pb/v3/template.go
generated
vendored
Normal file
88
vendor/github.com/cheggaaa/pb/v3/template.go
generated
vendored
Normal file
|
@ -0,0 +1,88 @@
|
||||||
|
package pb
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math/rand"
|
||||||
|
"sync"
|
||||||
|
"text/template"
|
||||||
|
|
||||||
|
"github.com/fatih/color"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ProgressBarTemplate that template string
|
||||||
|
type ProgressBarTemplate string
|
||||||
|
|
||||||
|
// New creates new bar from template
|
||||||
|
func (pbt ProgressBarTemplate) New(total int) *ProgressBar {
|
||||||
|
return New(total).SetTemplate(pbt)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start64 create and start new bar with given int64 total value
|
||||||
|
func (pbt ProgressBarTemplate) Start64(total int64) *ProgressBar {
|
||||||
|
return New64(total).SetTemplate(pbt).Start()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start create and start new bar with given int total value
|
||||||
|
func (pbt ProgressBarTemplate) Start(total int) *ProgressBar {
|
||||||
|
return pbt.Start64(int64(total))
|
||||||
|
}
|
||||||
|
|
||||||
|
var templateCacheMu sync.Mutex
|
||||||
|
var templateCache = make(map[string]*template.Template)
|
||||||
|
|
||||||
|
var defaultTemplateFuncs = template.FuncMap{
|
||||||
|
// colors
|
||||||
|
"black": color.New(color.FgBlack).SprintFunc(),
|
||||||
|
"red": color.New(color.FgRed).SprintFunc(),
|
||||||
|
"green": color.New(color.FgGreen).SprintFunc(),
|
||||||
|
"yellow": color.New(color.FgYellow).SprintFunc(),
|
||||||
|
"blue": color.New(color.FgBlue).SprintFunc(),
|
||||||
|
"magenta": color.New(color.FgMagenta).SprintFunc(),
|
||||||
|
"cyan": color.New(color.FgCyan).SprintFunc(),
|
||||||
|
"white": color.New(color.FgWhite).SprintFunc(),
|
||||||
|
"resetcolor": color.New(color.Reset).SprintFunc(),
|
||||||
|
"rndcolor": rndcolor,
|
||||||
|
"rnd": rnd,
|
||||||
|
}
|
||||||
|
|
||||||
|
func getTemplate(tmpl string) (t *template.Template, err error) {
|
||||||
|
templateCacheMu.Lock()
|
||||||
|
defer templateCacheMu.Unlock()
|
||||||
|
t = templateCache[tmpl]
|
||||||
|
if t != nil {
|
||||||
|
// found in cache
|
||||||
|
return
|
||||||
|
}
|
||||||
|
t = template.New("")
|
||||||
|
fillTemplateFuncs(t)
|
||||||
|
_, err = t.Parse(tmpl)
|
||||||
|
if err != nil {
|
||||||
|
t = nil
|
||||||
|
return
|
||||||
|
}
|
||||||
|
templateCache[tmpl] = t
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func fillTemplateFuncs(t *template.Template) {
|
||||||
|
t.Funcs(defaultTemplateFuncs)
|
||||||
|
emf := make(template.FuncMap)
|
||||||
|
elementsM.Lock()
|
||||||
|
for k, v := range elements {
|
||||||
|
emf[k] = v
|
||||||
|
}
|
||||||
|
elementsM.Unlock()
|
||||||
|
t.Funcs(emf)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func rndcolor(s string) string {
|
||||||
|
c := rand.Intn(int(color.FgWhite-color.FgBlack)) + int(color.FgBlack)
|
||||||
|
return color.New(color.Attribute(c)).Sprint(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
func rnd(args ...string) string {
|
||||||
|
if len(args) == 0 {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return args[rand.Intn(len(args))]
|
||||||
|
}
|
56
vendor/github.com/cheggaaa/pb/v3/termutil/term.go
generated
vendored
Normal file
56
vendor/github.com/cheggaaa/pb/v3/termutil/term.go
generated
vendored
Normal file
|
@ -0,0 +1,56 @@
|
||||||
|
package termutil
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"os"
|
||||||
|
"os/signal"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
var echoLocked bool
|
||||||
|
var echoLockMutex sync.Mutex
|
||||||
|
var errLocked = errors.New("terminal locked")
|
||||||
|
|
||||||
|
// RawModeOn switches terminal to raw mode
|
||||||
|
func RawModeOn() (quit chan struct{}, err error) {
|
||||||
|
echoLockMutex.Lock()
|
||||||
|
defer echoLockMutex.Unlock()
|
||||||
|
if echoLocked {
|
||||||
|
err = errLocked
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err = lockEcho(); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
echoLocked = true
|
||||||
|
quit = make(chan struct{}, 1)
|
||||||
|
go catchTerminate(quit)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// RawModeOff restore previous terminal state
|
||||||
|
func RawModeOff() (err error) {
|
||||||
|
echoLockMutex.Lock()
|
||||||
|
defer echoLockMutex.Unlock()
|
||||||
|
if !echoLocked {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err = unlockEcho(); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
echoLocked = false
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// listen exit signals and restore terminal state
|
||||||
|
func catchTerminate(quit chan struct{}) {
|
||||||
|
sig := make(chan os.Signal, 1)
|
||||||
|
signal.Notify(sig, unlockSignals...)
|
||||||
|
defer signal.Stop(sig)
|
||||||
|
select {
|
||||||
|
case <-quit:
|
||||||
|
RawModeOff()
|
||||||
|
case <-sig:
|
||||||
|
RawModeOff()
|
||||||
|
}
|
||||||
|
}
|
11
vendor/github.com/cheggaaa/pb/v3/termutil/term_appengine.go
generated
vendored
Normal file
11
vendor/github.com/cheggaaa/pb/v3/termutil/term_appengine.go
generated
vendored
Normal file
|
@ -0,0 +1,11 @@
|
||||||
|
// +build appengine
|
||||||
|
|
||||||
|
package termutil
|
||||||
|
|
||||||
|
import "errors"
|
||||||
|
|
||||||
|
// terminalWidth returns width of the terminal, which is not supported
|
||||||
|
// and should always failed on appengine classic which is a sandboxed PaaS.
|
||||||
|
func TerminalWidth() (int, error) {
|
||||||
|
return 0, errors.New("Not supported")
|
||||||
|
}
|
9
vendor/github.com/cheggaaa/pb/v3/termutil/term_bsd.go
generated
vendored
Normal file
9
vendor/github.com/cheggaaa/pb/v3/termutil/term_bsd.go
generated
vendored
Normal file
|
@ -0,0 +1,9 @@
|
||||||
|
// +build darwin freebsd netbsd openbsd dragonfly
|
||||||
|
// +build !appengine
|
||||||
|
|
||||||
|
package termutil
|
||||||
|
|
||||||
|
import "syscall"
|
||||||
|
|
||||||
|
const ioctlReadTermios = syscall.TIOCGETA
|
||||||
|
const ioctlWriteTermios = syscall.TIOCSETA
|
7
vendor/github.com/cheggaaa/pb/v3/termutil/term_linux.go
generated
vendored
Normal file
7
vendor/github.com/cheggaaa/pb/v3/termutil/term_linux.go
generated
vendored
Normal file
|
@ -0,0 +1,7 @@
|
||||||
|
// +build linux
|
||||||
|
// +build !appengine
|
||||||
|
|
||||||
|
package termutil
|
||||||
|
|
||||||
|
const ioctlReadTermios = 0x5401 // syscall.TCGETS
|
||||||
|
const ioctlWriteTermios = 0x5402 // syscall.TCSETS
|
8
vendor/github.com/cheggaaa/pb/v3/termutil/term_nix.go
generated
vendored
Normal file
8
vendor/github.com/cheggaaa/pb/v3/termutil/term_nix.go
generated
vendored
Normal file
|
@ -0,0 +1,8 @@
|
||||||
|
// +build linux darwin freebsd netbsd openbsd dragonfly
|
||||||
|
// +build !appengine
|
||||||
|
|
||||||
|
package termutil
|
||||||
|
|
||||||
|
import "syscall"
|
||||||
|
|
||||||
|
const sysIoctl = syscall.SYS_IOCTL
|
50
vendor/github.com/cheggaaa/pb/v3/termutil/term_plan9.go
generated
vendored
Normal file
50
vendor/github.com/cheggaaa/pb/v3/termutil/term_plan9.go
generated
vendored
Normal file
|
@ -0,0 +1,50 @@
|
||||||
|
package termutil
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"os"
|
||||||
|
"syscall"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
consctl *os.File
|
||||||
|
|
||||||
|
// Plan 9 doesn't have syscall.SIGQUIT
|
||||||
|
unlockSignals = []os.Signal{
|
||||||
|
os.Interrupt, syscall.SIGTERM, syscall.SIGKILL,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
// TerminalWidth returns width of the terminal.
|
||||||
|
func TerminalWidth() (int, error) {
|
||||||
|
return 0, errors.New("Not supported")
|
||||||
|
}
|
||||||
|
|
||||||
|
func lockEcho() error {
|
||||||
|
if consctl != nil {
|
||||||
|
return errors.New("consctl already open")
|
||||||
|
}
|
||||||
|
var err error
|
||||||
|
consctl, err = os.OpenFile("/dev/consctl", os.O_WRONLY, 0)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_, err = consctl.WriteString("rawon")
|
||||||
|
if err != nil {
|
||||||
|
consctl.Close()
|
||||||
|
consctl = nil
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func unlockEcho() error {
|
||||||
|
if consctl == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if err := consctl.Close(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
consctl = nil
|
||||||
|
return nil
|
||||||
|
}
|
8
vendor/github.com/cheggaaa/pb/v3/termutil/term_solaris.go
generated
vendored
Normal file
8
vendor/github.com/cheggaaa/pb/v3/termutil/term_solaris.go
generated
vendored
Normal file
|
@ -0,0 +1,8 @@
|
||||||
|
// +build solaris
|
||||||
|
// +build !appengine
|
||||||
|
|
||||||
|
package termutil
|
||||||
|
|
||||||
|
const ioctlReadTermios = 0x5401 // syscall.TCGETS
|
||||||
|
const ioctlWriteTermios = 0x5402 // syscall.TCSETS
|
||||||
|
const sysIoctl = 54
|
155
vendor/github.com/cheggaaa/pb/v3/termutil/term_win.go
generated
vendored
Normal file
155
vendor/github.com/cheggaaa/pb/v3/termutil/term_win.go
generated
vendored
Normal file
|
@ -0,0 +1,155 @@
|
||||||
|
// +build windows
|
||||||
|
|
||||||
|
package termutil
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"strconv"
|
||||||
|
"syscall"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
tty = os.Stdin
|
||||||
|
|
||||||
|
unlockSignals = []os.Signal{
|
||||||
|
os.Interrupt, syscall.SIGQUIT, syscall.SIGTERM, syscall.SIGKILL,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
kernel32 = syscall.NewLazyDLL("kernel32.dll")
|
||||||
|
|
||||||
|
// GetConsoleScreenBufferInfo retrieves information about the
|
||||||
|
// specified console screen buffer.
|
||||||
|
// http://msdn.microsoft.com/en-us/library/windows/desktop/ms683171(v=vs.85).aspx
|
||||||
|
procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo")
|
||||||
|
|
||||||
|
// GetConsoleMode retrieves the current input mode of a console's
|
||||||
|
// input buffer or the current output mode of a console screen buffer.
|
||||||
|
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx
|
||||||
|
getConsoleMode = kernel32.NewProc("GetConsoleMode")
|
||||||
|
|
||||||
|
// SetConsoleMode sets the input mode of a console's input buffer
|
||||||
|
// or the output mode of a console screen buffer.
|
||||||
|
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx
|
||||||
|
setConsoleMode = kernel32.NewProc("SetConsoleMode")
|
||||||
|
|
||||||
|
// SetConsoleCursorPosition sets the cursor position in the
|
||||||
|
// specified console screen buffer.
|
||||||
|
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms686025(v=vs.85).aspx
|
||||||
|
setConsoleCursorPosition = kernel32.NewProc("SetConsoleCursorPosition")
|
||||||
|
|
||||||
|
mingw = isMingw()
|
||||||
|
)
|
||||||
|
|
||||||
|
type (
|
||||||
|
// Defines the coordinates of the upper left and lower right corners
|
||||||
|
// of a rectangle.
|
||||||
|
// See
|
||||||
|
// http://msdn.microsoft.com/en-us/library/windows/desktop/ms686311(v=vs.85).aspx
|
||||||
|
smallRect struct {
|
||||||
|
Left, Top, Right, Bottom int16
|
||||||
|
}
|
||||||
|
|
||||||
|
// Defines the coordinates of a character cell in a console screen
|
||||||
|
// buffer. The origin of the coordinate system (0,0) is at the top, left cell
|
||||||
|
// of the buffer.
|
||||||
|
// See
|
||||||
|
// http://msdn.microsoft.com/en-us/library/windows/desktop/ms682119(v=vs.85).aspx
|
||||||
|
coordinates struct {
|
||||||
|
X, Y int16
|
||||||
|
}
|
||||||
|
|
||||||
|
word int16
|
||||||
|
|
||||||
|
// Contains information about a console screen buffer.
|
||||||
|
// http://msdn.microsoft.com/en-us/library/windows/desktop/ms682093(v=vs.85).aspx
|
||||||
|
consoleScreenBufferInfo struct {
|
||||||
|
dwSize coordinates
|
||||||
|
dwCursorPosition coordinates
|
||||||
|
wAttributes word
|
||||||
|
srWindow smallRect
|
||||||
|
dwMaximumWindowSize coordinates
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
// TerminalWidth returns width of the terminal.
|
||||||
|
func TerminalWidth() (width int, err error) {
|
||||||
|
if mingw {
|
||||||
|
return termWidthTPut()
|
||||||
|
}
|
||||||
|
return termWidthCmd()
|
||||||
|
}
|
||||||
|
|
||||||
|
func termWidthCmd() (width int, err error) {
|
||||||
|
var info consoleScreenBufferInfo
|
||||||
|
_, _, e := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(syscall.Stdout), uintptr(unsafe.Pointer(&info)), 0)
|
||||||
|
if e != 0 {
|
||||||
|
return 0, error(e)
|
||||||
|
}
|
||||||
|
return int(info.dwSize.X) - 1, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func isMingw() bool {
|
||||||
|
return os.Getenv("MINGW_PREFIX") != "" || os.Getenv("MSYSTEM") == "MINGW64"
|
||||||
|
}
|
||||||
|
|
||||||
|
func termWidthTPut() (width int, err error) {
|
||||||
|
// TODO: maybe anybody knows a better way to get it on mintty...
|
||||||
|
var res []byte
|
||||||
|
cmd := exec.Command("tput", "cols")
|
||||||
|
cmd.Stdin = os.Stdin
|
||||||
|
if res, err = cmd.CombinedOutput(); err != nil {
|
||||||
|
return 0, fmt.Errorf("%s: %v", string(res), err)
|
||||||
|
}
|
||||||
|
if len(res) > 1 {
|
||||||
|
res = res[:len(res)-1]
|
||||||
|
}
|
||||||
|
return strconv.Atoi(string(res))
|
||||||
|
}
|
||||||
|
|
||||||
|
func getCursorPos() (pos coordinates, err error) {
|
||||||
|
var info consoleScreenBufferInfo
|
||||||
|
_, _, e := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(syscall.Stdout), uintptr(unsafe.Pointer(&info)), 0)
|
||||||
|
if e != 0 {
|
||||||
|
return info.dwCursorPosition, error(e)
|
||||||
|
}
|
||||||
|
return info.dwCursorPosition, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func setCursorPos(pos coordinates) error {
|
||||||
|
_, _, e := syscall.Syscall(setConsoleCursorPosition.Addr(), 2, uintptr(syscall.Stdout), uintptr(uint32(uint16(pos.Y))<<16|uint32(uint16(pos.X))), 0)
|
||||||
|
if e != 0 {
|
||||||
|
return error(e)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var oldState word
|
||||||
|
|
||||||
|
func lockEcho() (err error) {
|
||||||
|
if _, _, e := syscall.Syscall(getConsoleMode.Addr(), 2, uintptr(syscall.Stdout), uintptr(unsafe.Pointer(&oldState)), 0); e != 0 {
|
||||||
|
err = fmt.Errorf("Can't get terminal settings: %v", e)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
newState := oldState
|
||||||
|
const ENABLE_ECHO_INPUT = 0x0004
|
||||||
|
const ENABLE_LINE_INPUT = 0x0002
|
||||||
|
newState = newState & (^(ENABLE_LINE_INPUT | ENABLE_ECHO_INPUT))
|
||||||
|
if _, _, e := syscall.Syscall(setConsoleMode.Addr(), 2, uintptr(syscall.Stdout), uintptr(newState), 0); e != 0 {
|
||||||
|
err = fmt.Errorf("Can't set terminal settings: %v", e)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func unlockEcho() (err error) {
|
||||||
|
if _, _, e := syscall.Syscall(setConsoleMode.Addr(), 2, uintptr(syscall.Stdout), uintptr(oldState), 0); e != 0 {
|
||||||
|
err = fmt.Errorf("Can't set terminal settings")
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
76
vendor/github.com/cheggaaa/pb/v3/termutil/term_x.go
generated
vendored
Normal file
76
vendor/github.com/cheggaaa/pb/v3/termutil/term_x.go
generated
vendored
Normal file
|
@ -0,0 +1,76 @@
|
||||||
|
// +build linux darwin freebsd netbsd openbsd solaris dragonfly
|
||||||
|
// +build !appengine
|
||||||
|
|
||||||
|
package termutil
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"syscall"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
tty *os.File
|
||||||
|
|
||||||
|
unlockSignals = []os.Signal{
|
||||||
|
os.Interrupt, syscall.SIGQUIT, syscall.SIGTERM, syscall.SIGKILL,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
type window struct {
|
||||||
|
Row uint16
|
||||||
|
Col uint16
|
||||||
|
Xpixel uint16
|
||||||
|
Ypixel uint16
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
var err error
|
||||||
|
tty, err = os.Open("/dev/tty")
|
||||||
|
if err != nil {
|
||||||
|
tty = os.Stdin
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TerminalWidth returns width of the terminal.
|
||||||
|
func TerminalWidth() (int, error) {
|
||||||
|
w := new(window)
|
||||||
|
res, _, err := syscall.Syscall(sysIoctl,
|
||||||
|
tty.Fd(),
|
||||||
|
uintptr(syscall.TIOCGWINSZ),
|
||||||
|
uintptr(unsafe.Pointer(w)),
|
||||||
|
)
|
||||||
|
if int(res) == -1 {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return int(w.Col), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var oldState syscall.Termios
|
||||||
|
|
||||||
|
func lockEcho() (err error) {
|
||||||
|
fd := tty.Fd()
|
||||||
|
if _, _, e := syscall.Syscall6(sysIoctl, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&oldState)), 0, 0, 0); e != 0 {
|
||||||
|
err = fmt.Errorf("Can't get terminal settings: %v", e)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
newState := oldState
|
||||||
|
newState.Lflag &^= syscall.ECHO
|
||||||
|
newState.Lflag |= syscall.ICANON | syscall.ISIG
|
||||||
|
newState.Iflag |= syscall.ICRNL
|
||||||
|
if _, _, e := syscall.Syscall6(sysIoctl, fd, ioctlWriteTermios, uintptr(unsafe.Pointer(&newState)), 0, 0, 0); e != 0 {
|
||||||
|
err = fmt.Errorf("Can't set terminal settings: %v", e)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func unlockEcho() (err error) {
|
||||||
|
fd := tty.Fd()
|
||||||
|
if _, _, e := syscall.Syscall6(sysIoctl, fd, ioctlWriteTermios, uintptr(unsafe.Pointer(&oldState)), 0, 0, 0); e != 0 {
|
||||||
|
err = fmt.Errorf("Can't set terminal settings")
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
115
vendor/github.com/cheggaaa/pb/v3/util.go
generated
vendored
Normal file
115
vendor/github.com/cheggaaa/pb/v3/util.go
generated
vendored
Normal file
|
@ -0,0 +1,115 @@
|
||||||
|
package pb
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"github.com/mattn/go-runewidth"
|
||||||
|
"math"
|
||||||
|
"regexp"
|
||||||
|
//"unicode/utf8"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
_KiB = 1024
|
||||||
|
_MiB = 1048576
|
||||||
|
_GiB = 1073741824
|
||||||
|
_TiB = 1099511627776
|
||||||
|
|
||||||
|
_kB = 1e3
|
||||||
|
_MB = 1e6
|
||||||
|
_GB = 1e9
|
||||||
|
_TB = 1e12
|
||||||
|
)
|
||||||
|
|
||||||
|
var ctrlFinder = regexp.MustCompile("\x1b\x5b[0-9]+\x6d")
|
||||||
|
|
||||||
|
func CellCount(s string) int {
|
||||||
|
n := runewidth.StringWidth(s)
|
||||||
|
for _, sm := range ctrlFinder.FindAllString(s, -1) {
|
||||||
|
n -= runewidth.StringWidth(sm)
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func StripString(s string, w int) string {
|
||||||
|
l := CellCount(s)
|
||||||
|
if l <= w {
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
var buf = bytes.NewBuffer(make([]byte, 0, len(s)))
|
||||||
|
StripStringToBuffer(s, w, buf)
|
||||||
|
return buf.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func StripStringToBuffer(s string, w int, buf *bytes.Buffer) {
|
||||||
|
var seqs = ctrlFinder.FindAllStringIndex(s, -1)
|
||||||
|
mainloop:
|
||||||
|
for i, r := range s {
|
||||||
|
for _, seq := range seqs {
|
||||||
|
if i >= seq[0] && i < seq[1] {
|
||||||
|
buf.WriteRune(r)
|
||||||
|
continue mainloop
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if rw := CellCount(string(r)); rw <= w {
|
||||||
|
w -= rw
|
||||||
|
buf.WriteRune(r)
|
||||||
|
} else {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for w > 0 {
|
||||||
|
buf.WriteByte(' ')
|
||||||
|
w--
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func round(val float64) (newVal float64) {
|
||||||
|
roundOn := 0.5
|
||||||
|
places := 0
|
||||||
|
var round float64
|
||||||
|
pow := math.Pow(10, float64(places))
|
||||||
|
digit := pow * val
|
||||||
|
_, div := math.Modf(digit)
|
||||||
|
if div >= roundOn {
|
||||||
|
round = math.Ceil(digit)
|
||||||
|
} else {
|
||||||
|
round = math.Floor(digit)
|
||||||
|
}
|
||||||
|
newVal = round / pow
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert bytes to human readable string. Like a 2 MiB, 64.2 KiB, or 2 MB, 64.2 kB
|
||||||
|
// if useSIPrefix is set to true
|
||||||
|
func formatBytes(i int64, useSIPrefix bool) (result string) {
|
||||||
|
if !useSIPrefix {
|
||||||
|
switch {
|
||||||
|
case i >= _TiB:
|
||||||
|
result = fmt.Sprintf("%.02f TiB", float64(i)/_TiB)
|
||||||
|
case i >= _GiB:
|
||||||
|
result = fmt.Sprintf("%.02f GiB", float64(i)/_GiB)
|
||||||
|
case i >= _MiB:
|
||||||
|
result = fmt.Sprintf("%.02f MiB", float64(i)/_MiB)
|
||||||
|
case i >= _KiB:
|
||||||
|
result = fmt.Sprintf("%.02f KiB", float64(i)/_KiB)
|
||||||
|
default:
|
||||||
|
result = fmt.Sprintf("%d B", i)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
switch {
|
||||||
|
case i >= _TB:
|
||||||
|
result = fmt.Sprintf("%.02f TB", float64(i)/_TB)
|
||||||
|
case i >= _GB:
|
||||||
|
result = fmt.Sprintf("%.02f GB", float64(i)/_GB)
|
||||||
|
case i >= _MB:
|
||||||
|
result = fmt.Sprintf("%.02f MB", float64(i)/_MB)
|
||||||
|
case i >= _kB:
|
||||||
|
result = fmt.Sprintf("%.02f kB", float64(i)/_kB)
|
||||||
|
default:
|
||||||
|
result = fmt.Sprintf("%d B", i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
21
vendor/github.com/cpuguy83/go-md2man/v2/LICENSE.md
generated
vendored
Normal file
21
vendor/github.com/cpuguy83/go-md2man/v2/LICENSE.md
generated
vendored
Normal file
|
@ -0,0 +1,21 @@
|
||||||
|
The MIT License (MIT)
|
||||||
|
|
||||||
|
Copyright (c) 2014 Brian Goff
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
14
vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go
generated
vendored
Normal file
14
vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go
generated
vendored
Normal file
|
@ -0,0 +1,14 @@
|
||||||
|
package md2man
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/russross/blackfriday/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Render converts a markdown document into a roff formatted document.
|
||||||
|
func Render(doc []byte) []byte {
|
||||||
|
renderer := NewRoffRenderer()
|
||||||
|
|
||||||
|
return blackfriday.Run(doc,
|
||||||
|
[]blackfriday.Option{blackfriday.WithRenderer(renderer),
|
||||||
|
blackfriday.WithExtensions(renderer.GetExtensions())}...)
|
||||||
|
}
|
345
vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go
generated
vendored
Normal file
345
vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go
generated
vendored
Normal file
|
@ -0,0 +1,345 @@
|
||||||
|
package md2man
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/russross/blackfriday/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
// roffRenderer implements the blackfriday.Renderer interface for creating
|
||||||
|
// roff format (manpages) from markdown text
|
||||||
|
type roffRenderer struct {
|
||||||
|
extensions blackfriday.Extensions
|
||||||
|
listCounters []int
|
||||||
|
firstHeader bool
|
||||||
|
defineTerm bool
|
||||||
|
listDepth int
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
titleHeader = ".TH "
|
||||||
|
topLevelHeader = "\n\n.SH "
|
||||||
|
secondLevelHdr = "\n.SH "
|
||||||
|
otherHeader = "\n.SS "
|
||||||
|
crTag = "\n"
|
||||||
|
emphTag = "\\fI"
|
||||||
|
emphCloseTag = "\\fP"
|
||||||
|
strongTag = "\\fB"
|
||||||
|
strongCloseTag = "\\fP"
|
||||||
|
breakTag = "\n.br\n"
|
||||||
|
paraTag = "\n.PP\n"
|
||||||
|
hruleTag = "\n.ti 0\n\\l'\\n(.lu'\n"
|
||||||
|
linkTag = "\n\\[la]"
|
||||||
|
linkCloseTag = "\\[ra]"
|
||||||
|
codespanTag = "\\fB\\fC"
|
||||||
|
codespanCloseTag = "\\fR"
|
||||||
|
codeTag = "\n.PP\n.RS\n\n.nf\n"
|
||||||
|
codeCloseTag = "\n.fi\n.RE\n"
|
||||||
|
quoteTag = "\n.PP\n.RS\n"
|
||||||
|
quoteCloseTag = "\n.RE\n"
|
||||||
|
listTag = "\n.RS\n"
|
||||||
|
listCloseTag = "\n.RE\n"
|
||||||
|
arglistTag = "\n.TP\n"
|
||||||
|
tableStart = "\n.TS\nallbox;\n"
|
||||||
|
tableEnd = ".TE\n"
|
||||||
|
tableCellStart = "T{\n"
|
||||||
|
tableCellEnd = "\nT}\n"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewRoffRenderer creates a new blackfriday Renderer for generating roff documents
|
||||||
|
// from markdown
|
||||||
|
func NewRoffRenderer() *roffRenderer { // nolint: golint
|
||||||
|
var extensions blackfriday.Extensions
|
||||||
|
|
||||||
|
extensions |= blackfriday.NoIntraEmphasis
|
||||||
|
extensions |= blackfriday.Tables
|
||||||
|
extensions |= blackfriday.FencedCode
|
||||||
|
extensions |= blackfriday.SpaceHeadings
|
||||||
|
extensions |= blackfriday.Footnotes
|
||||||
|
extensions |= blackfriday.Titleblock
|
||||||
|
extensions |= blackfriday.DefinitionLists
|
||||||
|
return &roffRenderer{
|
||||||
|
extensions: extensions,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetExtensions returns the list of extensions used by this renderer implementation
|
||||||
|
func (r *roffRenderer) GetExtensions() blackfriday.Extensions {
|
||||||
|
return r.extensions
|
||||||
|
}
|
||||||
|
|
||||||
|
// RenderHeader handles outputting the header at document start
|
||||||
|
func (r *roffRenderer) RenderHeader(w io.Writer, ast *blackfriday.Node) {
|
||||||
|
// disable hyphenation
|
||||||
|
out(w, ".nh\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
// RenderFooter handles outputting the footer at the document end; the roff
|
||||||
|
// renderer has no footer information
|
||||||
|
func (r *roffRenderer) RenderFooter(w io.Writer, ast *blackfriday.Node) {
|
||||||
|
}
|
||||||
|
|
||||||
|
// RenderNode is called for each node in a markdown document; based on the node
|
||||||
|
// type the equivalent roff output is sent to the writer
|
||||||
|
func (r *roffRenderer) RenderNode(w io.Writer, node *blackfriday.Node, entering bool) blackfriday.WalkStatus {
|
||||||
|
|
||||||
|
var walkAction = blackfriday.GoToNext
|
||||||
|
|
||||||
|
switch node.Type {
|
||||||
|
case blackfriday.Text:
|
||||||
|
r.handleText(w, node, entering)
|
||||||
|
case blackfriday.Softbreak:
|
||||||
|
out(w, crTag)
|
||||||
|
case blackfriday.Hardbreak:
|
||||||
|
out(w, breakTag)
|
||||||
|
case blackfriday.Emph:
|
||||||
|
if entering {
|
||||||
|
out(w, emphTag)
|
||||||
|
} else {
|
||||||
|
out(w, emphCloseTag)
|
||||||
|
}
|
||||||
|
case blackfriday.Strong:
|
||||||
|
if entering {
|
||||||
|
out(w, strongTag)
|
||||||
|
} else {
|
||||||
|
out(w, strongCloseTag)
|
||||||
|
}
|
||||||
|
case blackfriday.Link:
|
||||||
|
if !entering {
|
||||||
|
out(w, linkTag+string(node.LinkData.Destination)+linkCloseTag)
|
||||||
|
}
|
||||||
|
case blackfriday.Image:
|
||||||
|
// ignore images
|
||||||
|
walkAction = blackfriday.SkipChildren
|
||||||
|
case blackfriday.Code:
|
||||||
|
out(w, codespanTag)
|
||||||
|
escapeSpecialChars(w, node.Literal)
|
||||||
|
out(w, codespanCloseTag)
|
||||||
|
case blackfriday.Document:
|
||||||
|
break
|
||||||
|
case blackfriday.Paragraph:
|
||||||
|
// roff .PP markers break lists
|
||||||
|
if r.listDepth > 0 {
|
||||||
|
return blackfriday.GoToNext
|
||||||
|
}
|
||||||
|
if entering {
|
||||||
|
out(w, paraTag)
|
||||||
|
} else {
|
||||||
|
out(w, crTag)
|
||||||
|
}
|
||||||
|
case blackfriday.BlockQuote:
|
||||||
|
if entering {
|
||||||
|
out(w, quoteTag)
|
||||||
|
} else {
|
||||||
|
out(w, quoteCloseTag)
|
||||||
|
}
|
||||||
|
case blackfriday.Heading:
|
||||||
|
r.handleHeading(w, node, entering)
|
||||||
|
case blackfriday.HorizontalRule:
|
||||||
|
out(w, hruleTag)
|
||||||
|
case blackfriday.List:
|
||||||
|
r.handleList(w, node, entering)
|
||||||
|
case blackfriday.Item:
|
||||||
|
r.handleItem(w, node, entering)
|
||||||
|
case blackfriday.CodeBlock:
|
||||||
|
out(w, codeTag)
|
||||||
|
escapeSpecialChars(w, node.Literal)
|
||||||
|
out(w, codeCloseTag)
|
||||||
|
case blackfriday.Table:
|
||||||
|
r.handleTable(w, node, entering)
|
||||||
|
case blackfriday.TableCell:
|
||||||
|
r.handleTableCell(w, node, entering)
|
||||||
|
case blackfriday.TableHead:
|
||||||
|
case blackfriday.TableBody:
|
||||||
|
case blackfriday.TableRow:
|
||||||
|
// no action as cell entries do all the nroff formatting
|
||||||
|
return blackfriday.GoToNext
|
||||||
|
default:
|
||||||
|
fmt.Fprintln(os.Stderr, "WARNING: go-md2man does not handle node type "+node.Type.String())
|
||||||
|
}
|
||||||
|
return walkAction
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *roffRenderer) handleText(w io.Writer, node *blackfriday.Node, entering bool) {
|
||||||
|
var (
|
||||||
|
start, end string
|
||||||
|
)
|
||||||
|
// handle special roff table cell text encapsulation
|
||||||
|
if node.Parent.Type == blackfriday.TableCell {
|
||||||
|
if len(node.Literal) > 30 {
|
||||||
|
start = tableCellStart
|
||||||
|
end = tableCellEnd
|
||||||
|
} else {
|
||||||
|
// end rows that aren't terminated by "tableCellEnd" with a cr if end of row
|
||||||
|
if node.Parent.Next == nil && !node.Parent.IsHeader {
|
||||||
|
end = crTag
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
out(w, start)
|
||||||
|
escapeSpecialChars(w, node.Literal)
|
||||||
|
out(w, end)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *roffRenderer) handleHeading(w io.Writer, node *blackfriday.Node, entering bool) {
|
||||||
|
if entering {
|
||||||
|
switch node.Level {
|
||||||
|
case 1:
|
||||||
|
if !r.firstHeader {
|
||||||
|
out(w, titleHeader)
|
||||||
|
r.firstHeader = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
out(w, topLevelHeader)
|
||||||
|
case 2:
|
||||||
|
out(w, secondLevelHdr)
|
||||||
|
default:
|
||||||
|
out(w, otherHeader)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *roffRenderer) handleList(w io.Writer, node *blackfriday.Node, entering bool) {
|
||||||
|
openTag := listTag
|
||||||
|
closeTag := listCloseTag
|
||||||
|
if node.ListFlags&blackfriday.ListTypeDefinition != 0 {
|
||||||
|
// tags for definition lists handled within Item node
|
||||||
|
openTag = ""
|
||||||
|
closeTag = ""
|
||||||
|
}
|
||||||
|
if entering {
|
||||||
|
r.listDepth++
|
||||||
|
if node.ListFlags&blackfriday.ListTypeOrdered != 0 {
|
||||||
|
r.listCounters = append(r.listCounters, 1)
|
||||||
|
}
|
||||||
|
out(w, openTag)
|
||||||
|
} else {
|
||||||
|
if node.ListFlags&blackfriday.ListTypeOrdered != 0 {
|
||||||
|
r.listCounters = r.listCounters[:len(r.listCounters)-1]
|
||||||
|
}
|
||||||
|
out(w, closeTag)
|
||||||
|
r.listDepth--
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *roffRenderer) handleItem(w io.Writer, node *blackfriday.Node, entering bool) {
|
||||||
|
if entering {
|
||||||
|
if node.ListFlags&blackfriday.ListTypeOrdered != 0 {
|
||||||
|
out(w, fmt.Sprintf(".IP \"%3d.\" 5\n", r.listCounters[len(r.listCounters)-1]))
|
||||||
|
r.listCounters[len(r.listCounters)-1]++
|
||||||
|
} else if node.ListFlags&blackfriday.ListTypeDefinition != 0 {
|
||||||
|
// state machine for handling terms and following definitions
|
||||||
|
// since blackfriday does not distinguish them properly, nor
|
||||||
|
// does it seperate them into separate lists as it should
|
||||||
|
if !r.defineTerm {
|
||||||
|
out(w, arglistTag)
|
||||||
|
r.defineTerm = true
|
||||||
|
} else {
|
||||||
|
r.defineTerm = false
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
out(w, ".IP \\(bu 2\n")
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
out(w, "\n")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *roffRenderer) handleTable(w io.Writer, node *blackfriday.Node, entering bool) {
|
||||||
|
if entering {
|
||||||
|
out(w, tableStart)
|
||||||
|
//call walker to count cells (and rows?) so format section can be produced
|
||||||
|
columns := countColumns(node)
|
||||||
|
out(w, strings.Repeat("l ", columns)+"\n")
|
||||||
|
out(w, strings.Repeat("l ", columns)+".\n")
|
||||||
|
} else {
|
||||||
|
out(w, tableEnd)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *roffRenderer) handleTableCell(w io.Writer, node *blackfriday.Node, entering bool) {
|
||||||
|
var (
|
||||||
|
start, end string
|
||||||
|
)
|
||||||
|
if node.IsHeader {
|
||||||
|
start = codespanTag
|
||||||
|
end = codespanCloseTag
|
||||||
|
}
|
||||||
|
if entering {
|
||||||
|
if node.Prev != nil && node.Prev.Type == blackfriday.TableCell {
|
||||||
|
out(w, "\t"+start)
|
||||||
|
} else {
|
||||||
|
out(w, start)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// need to carriage return if we are at the end of the header row
|
||||||
|
if node.IsHeader && node.Next == nil {
|
||||||
|
end = end + crTag
|
||||||
|
}
|
||||||
|
out(w, end)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// because roff format requires knowing the column count before outputting any table
|
||||||
|
// data we need to walk a table tree and count the columns
|
||||||
|
func countColumns(node *blackfriday.Node) int {
|
||||||
|
var columns int
|
||||||
|
|
||||||
|
node.Walk(func(node *blackfriday.Node, entering bool) blackfriday.WalkStatus {
|
||||||
|
switch node.Type {
|
||||||
|
case blackfriday.TableRow:
|
||||||
|
if !entering {
|
||||||
|
return blackfriday.Terminate
|
||||||
|
}
|
||||||
|
case blackfriday.TableCell:
|
||||||
|
if entering {
|
||||||
|
columns++
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
return blackfriday.GoToNext
|
||||||
|
})
|
||||||
|
return columns
|
||||||
|
}
|
||||||
|
|
||||||
|
func out(w io.Writer, output string) {
|
||||||
|
io.WriteString(w, output) // nolint: errcheck
|
||||||
|
}
|
||||||
|
|
||||||
|
func needsBackslash(c byte) bool {
|
||||||
|
for _, r := range []byte("-_&\\~") {
|
||||||
|
if c == r {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func escapeSpecialChars(w io.Writer, text []byte) {
|
||||||
|
for i := 0; i < len(text); i++ {
|
||||||
|
// escape initial apostrophe or period
|
||||||
|
if len(text) >= 1 && (text[0] == '\'' || text[0] == '.') {
|
||||||
|
out(w, "\\&")
|
||||||
|
}
|
||||||
|
|
||||||
|
// directly copy normal characters
|
||||||
|
org := i
|
||||||
|
|
||||||
|
for i < len(text) && !needsBackslash(text[i]) {
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
if i > org {
|
||||||
|
w.Write(text[org:i]) // nolint: errcheck
|
||||||
|
}
|
||||||
|
|
||||||
|
// escape a character
|
||||||
|
if i >= len(text) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
w.Write([]byte{'\\', text[i]}) // nolint: errcheck
|
||||||
|
}
|
||||||
|
}
|
20
vendor/github.com/fatih/color/LICENSE.md
generated
vendored
Normal file
20
vendor/github.com/fatih/color/LICENSE.md
generated
vendored
Normal file
|
@ -0,0 +1,20 @@
|
||||||
|
The MIT License (MIT)
|
||||||
|
|
||||||
|
Copyright (c) 2013 Fatih Arslan
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||||
|
this software and associated documentation files (the "Software"), to deal in
|
||||||
|
the Software without restriction, including without limitation the rights to
|
||||||
|
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||||
|
the Software, and to permit persons to whom the Software is furnished to do so,
|
||||||
|
subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||||
|
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||||
|
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||||
|
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||||
|
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
182
vendor/github.com/fatih/color/README.md
generated
vendored
Normal file
182
vendor/github.com/fatih/color/README.md
generated
vendored
Normal file
|
@ -0,0 +1,182 @@
|
||||||
|
# Archived project. No maintenance.
|
||||||
|
|
||||||
|
This project is not maintained anymore and is archived. Feel free to fork and
|
||||||
|
make your own changes if needed. For more detail read my blog post: [Taking an indefinite sabbatical from my projects](https://arslan.io/2018/10/09/taking-an-indefinite-sabbatical-from-my-projects/)
|
||||||
|
|
||||||
|
Thanks to everyone for their valuable feedback and contributions.
|
||||||
|
|
||||||
|
|
||||||
|
# Color [![GoDoc](https://godoc.org/github.com/fatih/color?status.svg)](https://godoc.org/github.com/fatih/color)
|
||||||
|
|
||||||
|
Color lets you use colorized outputs in terms of [ANSI Escape
|
||||||
|
Codes](http://en.wikipedia.org/wiki/ANSI_escape_code#Colors) in Go (Golang). It
|
||||||
|
has support for Windows too! The API can be used in several ways, pick one that
|
||||||
|
suits you.
|
||||||
|
|
||||||
|
|
||||||
|
![Color](https://i.imgur.com/c1JI0lA.png)
|
||||||
|
|
||||||
|
|
||||||
|
## Install
|
||||||
|
|
||||||
|
```bash
|
||||||
|
go get github.com/fatih/color
|
||||||
|
```
|
||||||
|
|
||||||
|
## Examples
|
||||||
|
|
||||||
|
### Standard colors
|
||||||
|
|
||||||
|
```go
|
||||||
|
// Print with default helper functions
|
||||||
|
color.Cyan("Prints text in cyan.")
|
||||||
|
|
||||||
|
// A newline will be appended automatically
|
||||||
|
color.Blue("Prints %s in blue.", "text")
|
||||||
|
|
||||||
|
// These are using the default foreground colors
|
||||||
|
color.Red("We have red")
|
||||||
|
color.Magenta("And many others ..")
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
### Mix and reuse colors
|
||||||
|
|
||||||
|
```go
|
||||||
|
// Create a new color object
|
||||||
|
c := color.New(color.FgCyan).Add(color.Underline)
|
||||||
|
c.Println("Prints cyan text with an underline.")
|
||||||
|
|
||||||
|
// Or just add them to New()
|
||||||
|
d := color.New(color.FgCyan, color.Bold)
|
||||||
|
d.Printf("This prints bold cyan %s\n", "too!.")
|
||||||
|
|
||||||
|
// Mix up foreground and background colors, create new mixes!
|
||||||
|
red := color.New(color.FgRed)
|
||||||
|
|
||||||
|
boldRed := red.Add(color.Bold)
|
||||||
|
boldRed.Println("This will print text in bold red.")
|
||||||
|
|
||||||
|
whiteBackground := red.Add(color.BgWhite)
|
||||||
|
whiteBackground.Println("Red text with white background.")
|
||||||
|
```
|
||||||
|
|
||||||
|
### Use your own output (io.Writer)
|
||||||
|
|
||||||
|
```go
|
||||||
|
// Use your own io.Writer output
|
||||||
|
color.New(color.FgBlue).Fprintln(myWriter, "blue color!")
|
||||||
|
|
||||||
|
blue := color.New(color.FgBlue)
|
||||||
|
blue.Fprint(writer, "This will print text in blue.")
|
||||||
|
```
|
||||||
|
|
||||||
|
### Custom print functions (PrintFunc)
|
||||||
|
|
||||||
|
```go
|
||||||
|
// Create a custom print function for convenience
|
||||||
|
red := color.New(color.FgRed).PrintfFunc()
|
||||||
|
red("Warning")
|
||||||
|
red("Error: %s", err)
|
||||||
|
|
||||||
|
// Mix up multiple attributes
|
||||||
|
notice := color.New(color.Bold, color.FgGreen).PrintlnFunc()
|
||||||
|
notice("Don't forget this...")
|
||||||
|
```
|
||||||
|
|
||||||
|
### Custom fprint functions (FprintFunc)
|
||||||
|
|
||||||
|
```go
|
||||||
|
blue := color.New(FgBlue).FprintfFunc()
|
||||||
|
blue(myWriter, "important notice: %s", stars)
|
||||||
|
|
||||||
|
// Mix up with multiple attributes
|
||||||
|
success := color.New(color.Bold, color.FgGreen).FprintlnFunc()
|
||||||
|
success(myWriter, "Don't forget this...")
|
||||||
|
```
|
||||||
|
|
||||||
|
### Insert into noncolor strings (SprintFunc)
|
||||||
|
|
||||||
|
```go
|
||||||
|
// Create SprintXxx functions to mix strings with other non-colorized strings:
|
||||||
|
yellow := color.New(color.FgYellow).SprintFunc()
|
||||||
|
red := color.New(color.FgRed).SprintFunc()
|
||||||
|
fmt.Printf("This is a %s and this is %s.\n", yellow("warning"), red("error"))
|
||||||
|
|
||||||
|
info := color.New(color.FgWhite, color.BgGreen).SprintFunc()
|
||||||
|
fmt.Printf("This %s rocks!\n", info("package"))
|
||||||
|
|
||||||
|
// Use helper functions
|
||||||
|
fmt.Println("This", color.RedString("warning"), "should be not neglected.")
|
||||||
|
fmt.Printf("%v %v\n", color.GreenString("Info:"), "an important message.")
|
||||||
|
|
||||||
|
// Windows supported too! Just don't forget to change the output to color.Output
|
||||||
|
fmt.Fprintf(color.Output, "Windows support: %s", color.GreenString("PASS"))
|
||||||
|
```
|
||||||
|
|
||||||
|
### Plug into existing code
|
||||||
|
|
||||||
|
```go
|
||||||
|
// Use handy standard colors
|
||||||
|
color.Set(color.FgYellow)
|
||||||
|
|
||||||
|
fmt.Println("Existing text will now be in yellow")
|
||||||
|
fmt.Printf("This one %s\n", "too")
|
||||||
|
|
||||||
|
color.Unset() // Don't forget to unset
|
||||||
|
|
||||||
|
// You can mix up parameters
|
||||||
|
color.Set(color.FgMagenta, color.Bold)
|
||||||
|
defer color.Unset() // Use it in your function
|
||||||
|
|
||||||
|
fmt.Println("All text will now be bold magenta.")
|
||||||
|
```
|
||||||
|
|
||||||
|
### Disable/Enable color
|
||||||
|
|
||||||
|
There might be a case where you want to explicitly disable/enable color output. the
|
||||||
|
`go-isatty` package will automatically disable color output for non-tty output streams
|
||||||
|
(for example if the output were piped directly to `less`)
|
||||||
|
|
||||||
|
`Color` has support to disable/enable colors both globally and for single color
|
||||||
|
definitions. For example suppose you have a CLI app and a `--no-color` bool flag. You
|
||||||
|
can easily disable the color output with:
|
||||||
|
|
||||||
|
```go
|
||||||
|
|
||||||
|
var flagNoColor = flag.Bool("no-color", false, "Disable color output")
|
||||||
|
|
||||||
|
if *flagNoColor {
|
||||||
|
color.NoColor = true // disables colorized output
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
It also has support for single color definitions (local). You can
|
||||||
|
disable/enable color output on the fly:
|
||||||
|
|
||||||
|
```go
|
||||||
|
c := color.New(color.FgCyan)
|
||||||
|
c.Println("Prints cyan text")
|
||||||
|
|
||||||
|
c.DisableColor()
|
||||||
|
c.Println("This is printed without any color")
|
||||||
|
|
||||||
|
c.EnableColor()
|
||||||
|
c.Println("This prints again cyan...")
|
||||||
|
```
|
||||||
|
|
||||||
|
## Todo
|
||||||
|
|
||||||
|
* Save/Return previous values
|
||||||
|
* Evaluate fmt.Formatter interface
|
||||||
|
|
||||||
|
|
||||||
|
## Credits
|
||||||
|
|
||||||
|
* [Fatih Arslan](https://github.com/fatih)
|
||||||
|
* Windows support via @mattn: [colorable](https://github.com/mattn/go-colorable)
|
||||||
|
|
||||||
|
## License
|
||||||
|
|
||||||
|
The MIT License (MIT) - see [`LICENSE.md`](https://github.com/fatih/color/blob/master/LICENSE.md) for more details
|
||||||
|
|
603
vendor/github.com/fatih/color/color.go
generated
vendored
Normal file
603
vendor/github.com/fatih/color/color.go
generated
vendored
Normal file
|
@ -0,0 +1,603 @@
|
||||||
|
package color
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/mattn/go-colorable"
|
||||||
|
"github.com/mattn/go-isatty"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// NoColor defines if the output is colorized or not. It's dynamically set to
|
||||||
|
// false or true based on the stdout's file descriptor referring to a terminal
|
||||||
|
// or not. This is a global option and affects all colors. For more control
|
||||||
|
// over each color block use the methods DisableColor() individually.
|
||||||
|
NoColor = os.Getenv("TERM") == "dumb" ||
|
||||||
|
(!isatty.IsTerminal(os.Stdout.Fd()) && !isatty.IsCygwinTerminal(os.Stdout.Fd()))
|
||||||
|
|
||||||
|
// Output defines the standard output of the print functions. By default
|
||||||
|
// os.Stdout is used.
|
||||||
|
Output = colorable.NewColorableStdout()
|
||||||
|
|
||||||
|
// Error defines a color supporting writer for os.Stderr.
|
||||||
|
Error = colorable.NewColorableStderr()
|
||||||
|
|
||||||
|
// colorsCache is used to reduce the count of created Color objects and
|
||||||
|
// allows to reuse already created objects with required Attribute.
|
||||||
|
colorsCache = make(map[Attribute]*Color)
|
||||||
|
colorsCacheMu sync.Mutex // protects colorsCache
|
||||||
|
)
|
||||||
|
|
||||||
|
// Color defines a custom color object which is defined by SGR parameters.
|
||||||
|
type Color struct {
|
||||||
|
params []Attribute
|
||||||
|
noColor *bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// Attribute defines a single SGR Code
|
||||||
|
type Attribute int
|
||||||
|
|
||||||
|
const escape = "\x1b"
|
||||||
|
|
||||||
|
// Base attributes
|
||||||
|
const (
|
||||||
|
Reset Attribute = iota
|
||||||
|
Bold
|
||||||
|
Faint
|
||||||
|
Italic
|
||||||
|
Underline
|
||||||
|
BlinkSlow
|
||||||
|
BlinkRapid
|
||||||
|
ReverseVideo
|
||||||
|
Concealed
|
||||||
|
CrossedOut
|
||||||
|
)
|
||||||
|
|
||||||
|
// Foreground text colors
|
||||||
|
const (
|
||||||
|
FgBlack Attribute = iota + 30
|
||||||
|
FgRed
|
||||||
|
FgGreen
|
||||||
|
FgYellow
|
||||||
|
FgBlue
|
||||||
|
FgMagenta
|
||||||
|
FgCyan
|
||||||
|
FgWhite
|
||||||
|
)
|
||||||
|
|
||||||
|
// Foreground Hi-Intensity text colors
|
||||||
|
const (
|
||||||
|
FgHiBlack Attribute = iota + 90
|
||||||
|
FgHiRed
|
||||||
|
FgHiGreen
|
||||||
|
FgHiYellow
|
||||||
|
FgHiBlue
|
||||||
|
FgHiMagenta
|
||||||
|
FgHiCyan
|
||||||
|
FgHiWhite
|
||||||
|
)
|
||||||
|
|
||||||
|
// Background text colors
|
||||||
|
const (
|
||||||
|
BgBlack Attribute = iota + 40
|
||||||
|
BgRed
|
||||||
|
BgGreen
|
||||||
|
BgYellow
|
||||||
|
BgBlue
|
||||||
|
BgMagenta
|
||||||
|
BgCyan
|
||||||
|
BgWhite
|
||||||
|
)
|
||||||
|
|
||||||
|
// Background Hi-Intensity text colors
|
||||||
|
const (
|
||||||
|
BgHiBlack Attribute = iota + 100
|
||||||
|
BgHiRed
|
||||||
|
BgHiGreen
|
||||||
|
BgHiYellow
|
||||||
|
BgHiBlue
|
||||||
|
BgHiMagenta
|
||||||
|
BgHiCyan
|
||||||
|
BgHiWhite
|
||||||
|
)
|
||||||
|
|
||||||
|
// New returns a newly created color object.
|
||||||
|
func New(value ...Attribute) *Color {
|
||||||
|
c := &Color{params: make([]Attribute, 0)}
|
||||||
|
c.Add(value...)
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set sets the given parameters immediately. It will change the color of
|
||||||
|
// output with the given SGR parameters until color.Unset() is called.
|
||||||
|
func Set(p ...Attribute) *Color {
|
||||||
|
c := New(p...)
|
||||||
|
c.Set()
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unset resets all escape attributes and clears the output. Usually should
|
||||||
|
// be called after Set().
|
||||||
|
func Unset() {
|
||||||
|
if NoColor {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Fprintf(Output, "%s[%dm", escape, Reset)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set sets the SGR sequence.
|
||||||
|
func (c *Color) Set() *Color {
|
||||||
|
if c.isNoColorSet() {
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Fprintf(Output, c.format())
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Color) unset() {
|
||||||
|
if c.isNoColorSet() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
Unset()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Color) setWriter(w io.Writer) *Color {
|
||||||
|
if c.isNoColorSet() {
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Fprintf(w, c.format())
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Color) unsetWriter(w io.Writer) {
|
||||||
|
if c.isNoColorSet() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if NoColor {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Fprintf(w, "%s[%dm", escape, Reset)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add is used to chain SGR parameters. Use as many as parameters to combine
|
||||||
|
// and create custom color objects. Example: Add(color.FgRed, color.Underline).
|
||||||
|
func (c *Color) Add(value ...Attribute) *Color {
|
||||||
|
c.params = append(c.params, value...)
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Color) prepend(value Attribute) {
|
||||||
|
c.params = append(c.params, 0)
|
||||||
|
copy(c.params[1:], c.params[0:])
|
||||||
|
c.params[0] = value
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fprint formats using the default formats for its operands and writes to w.
|
||||||
|
// Spaces are added between operands when neither is a string.
|
||||||
|
// It returns the number of bytes written and any write error encountered.
|
||||||
|
// On Windows, users should wrap w with colorable.NewColorable() if w is of
|
||||||
|
// type *os.File.
|
||||||
|
func (c *Color) Fprint(w io.Writer, a ...interface{}) (n int, err error) {
|
||||||
|
c.setWriter(w)
|
||||||
|
defer c.unsetWriter(w)
|
||||||
|
|
||||||
|
return fmt.Fprint(w, a...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Print formats using the default formats for its operands and writes to
|
||||||
|
// standard output. Spaces are added between operands when neither is a
|
||||||
|
// string. It returns the number of bytes written and any write error
|
||||||
|
// encountered. This is the standard fmt.Print() method wrapped with the given
|
||||||
|
// color.
|
||||||
|
func (c *Color) Print(a ...interface{}) (n int, err error) {
|
||||||
|
c.Set()
|
||||||
|
defer c.unset()
|
||||||
|
|
||||||
|
return fmt.Fprint(Output, a...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fprintf formats according to a format specifier and writes to w.
|
||||||
|
// It returns the number of bytes written and any write error encountered.
|
||||||
|
// On Windows, users should wrap w with colorable.NewColorable() if w is of
|
||||||
|
// type *os.File.
|
||||||
|
func (c *Color) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
|
||||||
|
c.setWriter(w)
|
||||||
|
defer c.unsetWriter(w)
|
||||||
|
|
||||||
|
return fmt.Fprintf(w, format, a...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Printf formats according to a format specifier and writes to standard output.
|
||||||
|
// It returns the number of bytes written and any write error encountered.
|
||||||
|
// This is the standard fmt.Printf() method wrapped with the given color.
|
||||||
|
func (c *Color) Printf(format string, a ...interface{}) (n int, err error) {
|
||||||
|
c.Set()
|
||||||
|
defer c.unset()
|
||||||
|
|
||||||
|
return fmt.Fprintf(Output, format, a...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fprintln formats using the default formats for its operands and writes to w.
|
||||||
|
// Spaces are always added between operands and a newline is appended.
|
||||||
|
// On Windows, users should wrap w with colorable.NewColorable() if w is of
|
||||||
|
// type *os.File.
|
||||||
|
func (c *Color) Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
|
||||||
|
c.setWriter(w)
|
||||||
|
defer c.unsetWriter(w)
|
||||||
|
|
||||||
|
return fmt.Fprintln(w, a...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Println formats using the default formats for its operands and writes to
|
||||||
|
// standard output. Spaces are always added between operands and a newline is
|
||||||
|
// appended. It returns the number of bytes written and any write error
|
||||||
|
// encountered. This is the standard fmt.Print() method wrapped with the given
|
||||||
|
// color.
|
||||||
|
func (c *Color) Println(a ...interface{}) (n int, err error) {
|
||||||
|
c.Set()
|
||||||
|
defer c.unset()
|
||||||
|
|
||||||
|
return fmt.Fprintln(Output, a...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sprint is just like Print, but returns a string instead of printing it.
|
||||||
|
func (c *Color) Sprint(a ...interface{}) string {
|
||||||
|
return c.wrap(fmt.Sprint(a...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sprintln is just like Println, but returns a string instead of printing it.
|
||||||
|
func (c *Color) Sprintln(a ...interface{}) string {
|
||||||
|
return c.wrap(fmt.Sprintln(a...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sprintf is just like Printf, but returns a string instead of printing it.
|
||||||
|
func (c *Color) Sprintf(format string, a ...interface{}) string {
|
||||||
|
return c.wrap(fmt.Sprintf(format, a...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// FprintFunc returns a new function that prints the passed arguments as
|
||||||
|
// colorized with color.Fprint().
|
||||||
|
func (c *Color) FprintFunc() func(w io.Writer, a ...interface{}) {
|
||||||
|
return func(w io.Writer, a ...interface{}) {
|
||||||
|
c.Fprint(w, a...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// PrintFunc returns a new function that prints the passed arguments as
|
||||||
|
// colorized with color.Print().
|
||||||
|
func (c *Color) PrintFunc() func(a ...interface{}) {
|
||||||
|
return func(a ...interface{}) {
|
||||||
|
c.Print(a...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// FprintfFunc returns a new function that prints the passed arguments as
|
||||||
|
// colorized with color.Fprintf().
|
||||||
|
func (c *Color) FprintfFunc() func(w io.Writer, format string, a ...interface{}) {
|
||||||
|
return func(w io.Writer, format string, a ...interface{}) {
|
||||||
|
c.Fprintf(w, format, a...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// PrintfFunc returns a new function that prints the passed arguments as
|
||||||
|
// colorized with color.Printf().
|
||||||
|
func (c *Color) PrintfFunc() func(format string, a ...interface{}) {
|
||||||
|
return func(format string, a ...interface{}) {
|
||||||
|
c.Printf(format, a...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// FprintlnFunc returns a new function that prints the passed arguments as
|
||||||
|
// colorized with color.Fprintln().
|
||||||
|
func (c *Color) FprintlnFunc() func(w io.Writer, a ...interface{}) {
|
||||||
|
return func(w io.Writer, a ...interface{}) {
|
||||||
|
c.Fprintln(w, a...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// PrintlnFunc returns a new function that prints the passed arguments as
|
||||||
|
// colorized with color.Println().
|
||||||
|
func (c *Color) PrintlnFunc() func(a ...interface{}) {
|
||||||
|
return func(a ...interface{}) {
|
||||||
|
c.Println(a...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// SprintFunc returns a new function that returns colorized strings for the
|
||||||
|
// given arguments with fmt.Sprint(). Useful to put into or mix into other
|
||||||
|
// string. Windows users should use this in conjunction with color.Output, example:
|
||||||
|
//
|
||||||
|
// put := New(FgYellow).SprintFunc()
|
||||||
|
// fmt.Fprintf(color.Output, "This is a %s", put("warning"))
|
||||||
|
func (c *Color) SprintFunc() func(a ...interface{}) string {
|
||||||
|
return func(a ...interface{}) string {
|
||||||
|
return c.wrap(fmt.Sprint(a...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// SprintfFunc returns a new function that returns colorized strings for the
|
||||||
|
// given arguments with fmt.Sprintf(). Useful to put into or mix into other
|
||||||
|
// string. Windows users should use this in conjunction with color.Output.
|
||||||
|
func (c *Color) SprintfFunc() func(format string, a ...interface{}) string {
|
||||||
|
return func(format string, a ...interface{}) string {
|
||||||
|
return c.wrap(fmt.Sprintf(format, a...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// SprintlnFunc returns a new function that returns colorized strings for the
|
||||||
|
// given arguments with fmt.Sprintln(). Useful to put into or mix into other
|
||||||
|
// string. Windows users should use this in conjunction with color.Output.
|
||||||
|
func (c *Color) SprintlnFunc() func(a ...interface{}) string {
|
||||||
|
return func(a ...interface{}) string {
|
||||||
|
return c.wrap(fmt.Sprintln(a...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// sequence returns a formatted SGR sequence to be plugged into a "\x1b[...m"
|
||||||
|
// an example output might be: "1;36" -> bold cyan
|
||||||
|
func (c *Color) sequence() string {
|
||||||
|
format := make([]string, len(c.params))
|
||||||
|
for i, v := range c.params {
|
||||||
|
format[i] = strconv.Itoa(int(v))
|
||||||
|
}
|
||||||
|
|
||||||
|
return strings.Join(format, ";")
|
||||||
|
}
|
||||||
|
|
||||||
|
// wrap wraps the s string with the colors attributes. The string is ready to
|
||||||
|
// be printed.
|
||||||
|
func (c *Color) wrap(s string) string {
|
||||||
|
if c.isNoColorSet() {
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
return c.format() + s + c.unformat()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Color) format() string {
|
||||||
|
return fmt.Sprintf("%s[%sm", escape, c.sequence())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Color) unformat() string {
|
||||||
|
return fmt.Sprintf("%s[%dm", escape, Reset)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DisableColor disables the color output. Useful to not change any existing
|
||||||
|
// code and still being able to output. Can be used for flags like
|
||||||
|
// "--no-color". To enable back use EnableColor() method.
|
||||||
|
func (c *Color) DisableColor() {
|
||||||
|
c.noColor = boolPtr(true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// EnableColor enables the color output. Use it in conjunction with
|
||||||
|
// DisableColor(). Otherwise this method has no side effects.
|
||||||
|
func (c *Color) EnableColor() {
|
||||||
|
c.noColor = boolPtr(false)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Color) isNoColorSet() bool {
|
||||||
|
// check first if we have user setted action
|
||||||
|
if c.noColor != nil {
|
||||||
|
return *c.noColor
|
||||||
|
}
|
||||||
|
|
||||||
|
// if not return the global option, which is disabled by default
|
||||||
|
return NoColor
|
||||||
|
}
|
||||||
|
|
||||||
|
// Equals returns a boolean value indicating whether two colors are equal.
|
||||||
|
func (c *Color) Equals(c2 *Color) bool {
|
||||||
|
if len(c.params) != len(c2.params) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, attr := range c.params {
|
||||||
|
if !c2.attrExists(attr) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Color) attrExists(a Attribute) bool {
|
||||||
|
for _, attr := range c.params {
|
||||||
|
if attr == a {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func boolPtr(v bool) *bool {
|
||||||
|
return &v
|
||||||
|
}
|
||||||
|
|
||||||
|
func getCachedColor(p Attribute) *Color {
|
||||||
|
colorsCacheMu.Lock()
|
||||||
|
defer colorsCacheMu.Unlock()
|
||||||
|
|
||||||
|
c, ok := colorsCache[p]
|
||||||
|
if !ok {
|
||||||
|
c = New(p)
|
||||||
|
colorsCache[p] = c
|
||||||
|
}
|
||||||
|
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
func colorPrint(format string, p Attribute, a ...interface{}) {
|
||||||
|
c := getCachedColor(p)
|
||||||
|
|
||||||
|
if !strings.HasSuffix(format, "\n") {
|
||||||
|
format += "\n"
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(a) == 0 {
|
||||||
|
c.Print(format)
|
||||||
|
} else {
|
||||||
|
c.Printf(format, a...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func colorString(format string, p Attribute, a ...interface{}) string {
|
||||||
|
c := getCachedColor(p)
|
||||||
|
|
||||||
|
if len(a) == 0 {
|
||||||
|
return c.SprintFunc()(format)
|
||||||
|
}
|
||||||
|
|
||||||
|
return c.SprintfFunc()(format, a...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Black is a convenient helper function to print with black foreground. A
|
||||||
|
// newline is appended to format by default.
|
||||||
|
func Black(format string, a ...interface{}) { colorPrint(format, FgBlack, a...) }
|
||||||
|
|
||||||
|
// Red is a convenient helper function to print with red foreground. A
|
||||||
|
// newline is appended to format by default.
|
||||||
|
func Red(format string, a ...interface{}) { colorPrint(format, FgRed, a...) }
|
||||||
|
|
||||||
|
// Green is a convenient helper function to print with green foreground. A
|
||||||
|
// newline is appended to format by default.
|
||||||
|
func Green(format string, a ...interface{}) { colorPrint(format, FgGreen, a...) }
|
||||||
|
|
||||||
|
// Yellow is a convenient helper function to print with yellow foreground.
|
||||||
|
// A newline is appended to format by default.
|
||||||
|
func Yellow(format string, a ...interface{}) { colorPrint(format, FgYellow, a...) }
|
||||||
|
|
||||||
|
// Blue is a convenient helper function to print with blue foreground. A
|
||||||
|
// newline is appended to format by default.
|
||||||
|
func Blue(format string, a ...interface{}) { colorPrint(format, FgBlue, a...) }
|
||||||
|
|
||||||
|
// Magenta is a convenient helper function to print with magenta foreground.
|
||||||
|
// A newline is appended to format by default.
|
||||||
|
func Magenta(format string, a ...interface{}) { colorPrint(format, FgMagenta, a...) }
|
||||||
|
|
||||||
|
// Cyan is a convenient helper function to print with cyan foreground. A
|
||||||
|
// newline is appended to format by default.
|
||||||
|
func Cyan(format string, a ...interface{}) { colorPrint(format, FgCyan, a...) }
|
||||||
|
|
||||||
|
// White is a convenient helper function to print with white foreground. A
|
||||||
|
// newline is appended to format by default.
|
||||||
|
func White(format string, a ...interface{}) { colorPrint(format, FgWhite, a...) }
|
||||||
|
|
||||||
|
// BlackString is a convenient helper function to return a string with black
|
||||||
|
// foreground.
|
||||||
|
func BlackString(format string, a ...interface{}) string { return colorString(format, FgBlack, a...) }
|
||||||
|
|
||||||
|
// RedString is a convenient helper function to return a string with red
|
||||||
|
// foreground.
|
||||||
|
func RedString(format string, a ...interface{}) string { return colorString(format, FgRed, a...) }
|
||||||
|
|
||||||
|
// GreenString is a convenient helper function to return a string with green
|
||||||
|
// foreground.
|
||||||
|
func GreenString(format string, a ...interface{}) string { return colorString(format, FgGreen, a...) }
|
||||||
|
|
||||||
|
// YellowString is a convenient helper function to return a string with yellow
|
||||||
|
// foreground.
|
||||||
|
func YellowString(format string, a ...interface{}) string { return colorString(format, FgYellow, a...) }
|
||||||
|
|
||||||
|
// BlueString is a convenient helper function to return a string with blue
|
||||||
|
// foreground.
|
||||||
|
func BlueString(format string, a ...interface{}) string { return colorString(format, FgBlue, a...) }
|
||||||
|
|
||||||
|
// MagentaString is a convenient helper function to return a string with magenta
|
||||||
|
// foreground.
|
||||||
|
func MagentaString(format string, a ...interface{}) string {
|
||||||
|
return colorString(format, FgMagenta, a...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CyanString is a convenient helper function to return a string with cyan
|
||||||
|
// foreground.
|
||||||
|
func CyanString(format string, a ...interface{}) string { return colorString(format, FgCyan, a...) }
|
||||||
|
|
||||||
|
// WhiteString is a convenient helper function to return a string with white
|
||||||
|
// foreground.
|
||||||
|
func WhiteString(format string, a ...interface{}) string { return colorString(format, FgWhite, a...) }
|
||||||
|
|
||||||
|
// HiBlack is a convenient helper function to print with hi-intensity black foreground. A
|
||||||
|
// newline is appended to format by default.
|
||||||
|
func HiBlack(format string, a ...interface{}) { colorPrint(format, FgHiBlack, a...) }
|
||||||
|
|
||||||
|
// HiRed is a convenient helper function to print with hi-intensity red foreground. A
|
||||||
|
// newline is appended to format by default.
|
||||||
|
func HiRed(format string, a ...interface{}) { colorPrint(format, FgHiRed, a...) }
|
||||||
|
|
||||||
|
// HiGreen is a convenient helper function to print with hi-intensity green foreground. A
|
||||||
|
// newline is appended to format by default.
|
||||||
|
func HiGreen(format string, a ...interface{}) { colorPrint(format, FgHiGreen, a...) }
|
||||||
|
|
||||||
|
// HiYellow is a convenient helper function to print with hi-intensity yellow foreground.
|
||||||
|
// A newline is appended to format by default.
|
||||||
|
func HiYellow(format string, a ...interface{}) { colorPrint(format, FgHiYellow, a...) }
|
||||||
|
|
||||||
|
// HiBlue is a convenient helper function to print with hi-intensity blue foreground. A
|
||||||
|
// newline is appended to format by default.
|
||||||
|
func HiBlue(format string, a ...interface{}) { colorPrint(format, FgHiBlue, a...) }
|
||||||
|
|
||||||
|
// HiMagenta is a convenient helper function to print with hi-intensity magenta foreground.
|
||||||
|
// A newline is appended to format by default.
|
||||||
|
func HiMagenta(format string, a ...interface{}) { colorPrint(format, FgHiMagenta, a...) }
|
||||||
|
|
||||||
|
// HiCyan is a convenient helper function to print with hi-intensity cyan foreground. A
|
||||||
|
// newline is appended to format by default.
|
||||||
|
func HiCyan(format string, a ...interface{}) { colorPrint(format, FgHiCyan, a...) }
|
||||||
|
|
||||||
|
// HiWhite is a convenient helper function to print with hi-intensity white foreground. A
|
||||||
|
// newline is appended to format by default.
|
||||||
|
func HiWhite(format string, a ...interface{}) { colorPrint(format, FgHiWhite, a...) }
|
||||||
|
|
||||||
|
// HiBlackString is a convenient helper function to return a string with hi-intensity black
|
||||||
|
// foreground.
|
||||||
|
func HiBlackString(format string, a ...interface{}) string {
|
||||||
|
return colorString(format, FgHiBlack, a...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// HiRedString is a convenient helper function to return a string with hi-intensity red
|
||||||
|
// foreground.
|
||||||
|
func HiRedString(format string, a ...interface{}) string { return colorString(format, FgHiRed, a...) }
|
||||||
|
|
||||||
|
// HiGreenString is a convenient helper function to return a string with hi-intensity green
|
||||||
|
// foreground.
|
||||||
|
func HiGreenString(format string, a ...interface{}) string {
|
||||||
|
return colorString(format, FgHiGreen, a...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// HiYellowString is a convenient helper function to return a string with hi-intensity yellow
|
||||||
|
// foreground.
|
||||||
|
func HiYellowString(format string, a ...interface{}) string {
|
||||||
|
return colorString(format, FgHiYellow, a...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// HiBlueString is a convenient helper function to return a string with hi-intensity blue
|
||||||
|
// foreground.
|
||||||
|
func HiBlueString(format string, a ...interface{}) string { return colorString(format, FgHiBlue, a...) }
|
||||||
|
|
||||||
|
// HiMagentaString is a convenient helper function to return a string with hi-intensity magenta
|
||||||
|
// foreground.
|
||||||
|
func HiMagentaString(format string, a ...interface{}) string {
|
||||||
|
return colorString(format, FgHiMagenta, a...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// HiCyanString is a convenient helper function to return a string with hi-intensity cyan
|
||||||
|
// foreground.
|
||||||
|
func HiCyanString(format string, a ...interface{}) string { return colorString(format, FgHiCyan, a...) }
|
||||||
|
|
||||||
|
// HiWhiteString is a convenient helper function to return a string with hi-intensity white
|
||||||
|
// foreground.
|
||||||
|
func HiWhiteString(format string, a ...interface{}) string {
|
||||||
|
return colorString(format, FgHiWhite, a...)
|
||||||
|
}
|
133
vendor/github.com/fatih/color/doc.go
generated
vendored
Normal file
133
vendor/github.com/fatih/color/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,133 @@
|
||||||
|
/*
|
||||||
|
Package color is an ANSI color package to output colorized or SGR defined
|
||||||
|
output to the standard output. The API can be used in several way, pick one
|
||||||
|
that suits you.
|
||||||
|
|
||||||
|
Use simple and default helper functions with predefined foreground colors:
|
||||||
|
|
||||||
|
color.Cyan("Prints text in cyan.")
|
||||||
|
|
||||||
|
// a newline will be appended automatically
|
||||||
|
color.Blue("Prints %s in blue.", "text")
|
||||||
|
|
||||||
|
// More default foreground colors..
|
||||||
|
color.Red("We have red")
|
||||||
|
color.Yellow("Yellow color too!")
|
||||||
|
color.Magenta("And many others ..")
|
||||||
|
|
||||||
|
// Hi-intensity colors
|
||||||
|
color.HiGreen("Bright green color.")
|
||||||
|
color.HiBlack("Bright black means gray..")
|
||||||
|
color.HiWhite("Shiny white color!")
|
||||||
|
|
||||||
|
However there are times where custom color mixes are required. Below are some
|
||||||
|
examples to create custom color objects and use the print functions of each
|
||||||
|
separate color object.
|
||||||
|
|
||||||
|
// Create a new color object
|
||||||
|
c := color.New(color.FgCyan).Add(color.Underline)
|
||||||
|
c.Println("Prints cyan text with an underline.")
|
||||||
|
|
||||||
|
// Or just add them to New()
|
||||||
|
d := color.New(color.FgCyan, color.Bold)
|
||||||
|
d.Printf("This prints bold cyan %s\n", "too!.")
|
||||||
|
|
||||||
|
|
||||||
|
// Mix up foreground and background colors, create new mixes!
|
||||||
|
red := color.New(color.FgRed)
|
||||||
|
|
||||||
|
boldRed := red.Add(color.Bold)
|
||||||
|
boldRed.Println("This will print text in bold red.")
|
||||||
|
|
||||||
|
whiteBackground := red.Add(color.BgWhite)
|
||||||
|
whiteBackground.Println("Red text with White background.")
|
||||||
|
|
||||||
|
// Use your own io.Writer output
|
||||||
|
color.New(color.FgBlue).Fprintln(myWriter, "blue color!")
|
||||||
|
|
||||||
|
blue := color.New(color.FgBlue)
|
||||||
|
blue.Fprint(myWriter, "This will print text in blue.")
|
||||||
|
|
||||||
|
You can create PrintXxx functions to simplify even more:
|
||||||
|
|
||||||
|
// Create a custom print function for convenient
|
||||||
|
red := color.New(color.FgRed).PrintfFunc()
|
||||||
|
red("warning")
|
||||||
|
red("error: %s", err)
|
||||||
|
|
||||||
|
// Mix up multiple attributes
|
||||||
|
notice := color.New(color.Bold, color.FgGreen).PrintlnFunc()
|
||||||
|
notice("don't forget this...")
|
||||||
|
|
||||||
|
You can also FprintXxx functions to pass your own io.Writer:
|
||||||
|
|
||||||
|
blue := color.New(FgBlue).FprintfFunc()
|
||||||
|
blue(myWriter, "important notice: %s", stars)
|
||||||
|
|
||||||
|
// Mix up with multiple attributes
|
||||||
|
success := color.New(color.Bold, color.FgGreen).FprintlnFunc()
|
||||||
|
success(myWriter, don't forget this...")
|
||||||
|
|
||||||
|
|
||||||
|
Or create SprintXxx functions to mix strings with other non-colorized strings:
|
||||||
|
|
||||||
|
yellow := New(FgYellow).SprintFunc()
|
||||||
|
red := New(FgRed).SprintFunc()
|
||||||
|
|
||||||
|
fmt.Printf("this is a %s and this is %s.\n", yellow("warning"), red("error"))
|
||||||
|
|
||||||
|
info := New(FgWhite, BgGreen).SprintFunc()
|
||||||
|
fmt.Printf("this %s rocks!\n", info("package"))
|
||||||
|
|
||||||
|
Windows support is enabled by default. All Print functions work as intended.
|
||||||
|
However only for color.SprintXXX functions, user should use fmt.FprintXXX and
|
||||||
|
set the output to color.Output:
|
||||||
|
|
||||||
|
fmt.Fprintf(color.Output, "Windows support: %s", color.GreenString("PASS"))
|
||||||
|
|
||||||
|
info := New(FgWhite, BgGreen).SprintFunc()
|
||||||
|
fmt.Fprintf(color.Output, "this %s rocks!\n", info("package"))
|
||||||
|
|
||||||
|
Using with existing code is possible. Just use the Set() method to set the
|
||||||
|
standard output to the given parameters. That way a rewrite of an existing
|
||||||
|
code is not required.
|
||||||
|
|
||||||
|
// Use handy standard colors.
|
||||||
|
color.Set(color.FgYellow)
|
||||||
|
|
||||||
|
fmt.Println("Existing text will be now in Yellow")
|
||||||
|
fmt.Printf("This one %s\n", "too")
|
||||||
|
|
||||||
|
color.Unset() // don't forget to unset
|
||||||
|
|
||||||
|
// You can mix up parameters
|
||||||
|
color.Set(color.FgMagenta, color.Bold)
|
||||||
|
defer color.Unset() // use it in your function
|
||||||
|
|
||||||
|
fmt.Println("All text will be now bold magenta.")
|
||||||
|
|
||||||
|
There might be a case where you want to disable color output (for example to
|
||||||
|
pipe the standard output of your app to somewhere else). `Color` has support to
|
||||||
|
disable colors both globally and for single color definition. For example
|
||||||
|
suppose you have a CLI app and a `--no-color` bool flag. You can easily disable
|
||||||
|
the color output with:
|
||||||
|
|
||||||
|
var flagNoColor = flag.Bool("no-color", false, "Disable color output")
|
||||||
|
|
||||||
|
if *flagNoColor {
|
||||||
|
color.NoColor = true // disables colorized output
|
||||||
|
}
|
||||||
|
|
||||||
|
It also has support for single color definitions (local). You can
|
||||||
|
disable/enable color output on the fly:
|
||||||
|
|
||||||
|
c := color.New(color.FgCyan)
|
||||||
|
c.Println("Prints cyan text")
|
||||||
|
|
||||||
|
c.DisableColor()
|
||||||
|
c.Println("This is printed without any color")
|
||||||
|
|
||||||
|
c.EnableColor()
|
||||||
|
c.Println("This prints again cyan...")
|
||||||
|
*/
|
||||||
|
package color
|
8
vendor/github.com/fatih/color/go.mod
generated
vendored
Normal file
8
vendor/github.com/fatih/color/go.mod
generated
vendored
Normal file
|
@ -0,0 +1,8 @@
|
||||||
|
module github.com/fatih/color
|
||||||
|
|
||||||
|
go 1.13
|
||||||
|
|
||||||
|
require (
|
||||||
|
github.com/mattn/go-colorable v0.1.4
|
||||||
|
github.com/mattn/go-isatty v0.0.11
|
||||||
|
)
|
8
vendor/github.com/fatih/color/go.sum
generated
vendored
Normal file
8
vendor/github.com/fatih/color/go.sum
generated
vendored
Normal file
|
@ -0,0 +1,8 @@
|
||||||
|
github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA=
|
||||||
|
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||||
|
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||||
|
github.com/mattn/go-isatty v0.0.11 h1:FxPOTFNqGkuDUGi3H/qkUbQO4ZiBa2brKq5r0l8TGeM=
|
||||||
|
github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
|
||||||
|
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037 h1:YyJpGZS1sBuBCzLAR1VEpK193GlqGZbnPFnPV/5Rsb4=
|
||||||
|
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
22
vendor/github.com/go-kit/kit/LICENSE
generated
vendored
Normal file
22
vendor/github.com/go-kit/kit/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,22 @@
|
||||||
|
The MIT License (MIT)
|
||||||
|
|
||||||
|
Copyright (c) 2015 Peter Bourgon
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
|
|
151
vendor/github.com/go-kit/kit/log/README.md
generated
vendored
Normal file
151
vendor/github.com/go-kit/kit/log/README.md
generated
vendored
Normal file
|
@ -0,0 +1,151 @@
|
||||||
|
# package log
|
||||||
|
|
||||||
|
`package log` provides a minimal interface for structured logging in services.
|
||||||
|
It may be wrapped to encode conventions, enforce type-safety, provide leveled
|
||||||
|
logging, and so on. It can be used for both typical application log events,
|
||||||
|
and log-structured data streams.
|
||||||
|
|
||||||
|
## Structured logging
|
||||||
|
|
||||||
|
Structured logging is, basically, conceding to the reality that logs are
|
||||||
|
_data_, and warrant some level of schematic rigor. Using a stricter,
|
||||||
|
key/value-oriented message format for our logs, containing contextual and
|
||||||
|
semantic information, makes it much easier to get insight into the
|
||||||
|
operational activity of the systems we build. Consequently, `package log` is
|
||||||
|
of the strong belief that "[the benefits of structured logging outweigh the
|
||||||
|
minimal effort involved](https://www.thoughtworks.com/radar/techniques/structured-logging)".
|
||||||
|
|
||||||
|
Migrating from unstructured to structured logging is probably a lot easier
|
||||||
|
than you'd expect.
|
||||||
|
|
||||||
|
```go
|
||||||
|
// Unstructured
|
||||||
|
log.Printf("HTTP server listening on %s", addr)
|
||||||
|
|
||||||
|
// Structured
|
||||||
|
logger.Log("transport", "HTTP", "addr", addr, "msg", "listening")
|
||||||
|
```
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
### Typical application logging
|
||||||
|
|
||||||
|
```go
|
||||||
|
w := log.NewSyncWriter(os.Stderr)
|
||||||
|
logger := log.NewLogfmtLogger(w)
|
||||||
|
logger.Log("question", "what is the meaning of life?", "answer", 42)
|
||||||
|
|
||||||
|
// Output:
|
||||||
|
// question="what is the meaning of life?" answer=42
|
||||||
|
```
|
||||||
|
|
||||||
|
### Contextual Loggers
|
||||||
|
|
||||||
|
```go
|
||||||
|
func main() {
|
||||||
|
var logger log.Logger
|
||||||
|
logger = log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr))
|
||||||
|
logger = log.With(logger, "instance_id", 123)
|
||||||
|
|
||||||
|
logger.Log("msg", "starting")
|
||||||
|
NewWorker(log.With(logger, "component", "worker")).Run()
|
||||||
|
NewSlacker(log.With(logger, "component", "slacker")).Run()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Output:
|
||||||
|
// instance_id=123 msg=starting
|
||||||
|
// instance_id=123 component=worker msg=running
|
||||||
|
// instance_id=123 component=slacker msg=running
|
||||||
|
```
|
||||||
|
|
||||||
|
### Interact with stdlib logger
|
||||||
|
|
||||||
|
Redirect stdlib logger to Go kit logger.
|
||||||
|
|
||||||
|
```go
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
stdlog "log"
|
||||||
|
kitlog "github.com/go-kit/kit/log"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
logger := kitlog.NewJSONLogger(kitlog.NewSyncWriter(os.Stdout))
|
||||||
|
stdlog.SetOutput(kitlog.NewStdlibAdapter(logger))
|
||||||
|
stdlog.Print("I sure like pie")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Output:
|
||||||
|
// {"msg":"I sure like pie","ts":"2016/01/01 12:34:56"}
|
||||||
|
```
|
||||||
|
|
||||||
|
Or, if, for legacy reasons, you need to pipe all of your logging through the
|
||||||
|
stdlib log package, you can redirect Go kit logger to the stdlib logger.
|
||||||
|
|
||||||
|
```go
|
||||||
|
logger := kitlog.NewLogfmtLogger(kitlog.StdlibWriter{})
|
||||||
|
logger.Log("legacy", true, "msg", "at least it's something")
|
||||||
|
|
||||||
|
// Output:
|
||||||
|
// 2016/01/01 12:34:56 legacy=true msg="at least it's something"
|
||||||
|
```
|
||||||
|
|
||||||
|
### Timestamps and callers
|
||||||
|
|
||||||
|
```go
|
||||||
|
var logger log.Logger
|
||||||
|
logger = log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr))
|
||||||
|
logger = log.With(logger, "ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller)
|
||||||
|
|
||||||
|
logger.Log("msg", "hello")
|
||||||
|
|
||||||
|
// Output:
|
||||||
|
// ts=2016-01-01T12:34:56Z caller=main.go:15 msg=hello
|
||||||
|
```
|
||||||
|
|
||||||
|
## Levels
|
||||||
|
|
||||||
|
Log levels are supported via the [level package](https://godoc.org/github.com/go-kit/kit/log/level).
|
||||||
|
|
||||||
|
## Supported output formats
|
||||||
|
|
||||||
|
- [Logfmt](https://brandur.org/logfmt) ([see also](https://blog.codeship.com/logfmt-a-log-format-thats-easy-to-read-and-write))
|
||||||
|
- JSON
|
||||||
|
|
||||||
|
## Enhancements
|
||||||
|
|
||||||
|
`package log` is centered on the one-method Logger interface.
|
||||||
|
|
||||||
|
```go
|
||||||
|
type Logger interface {
|
||||||
|
Log(keyvals ...interface{}) error
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
This interface, and its supporting code like is the product of much iteration
|
||||||
|
and evaluation. For more details on the evolution of the Logger interface,
|
||||||
|
see [The Hunt for a Logger Interface](http://go-talks.appspot.com/github.com/ChrisHines/talks/structured-logging/structured-logging.slide#1),
|
||||||
|
a talk by [Chris Hines](https://github.com/ChrisHines).
|
||||||
|
Also, please see
|
||||||
|
[#63](https://github.com/go-kit/kit/issues/63),
|
||||||
|
[#76](https://github.com/go-kit/kit/pull/76),
|
||||||
|
[#131](https://github.com/go-kit/kit/issues/131),
|
||||||
|
[#157](https://github.com/go-kit/kit/pull/157),
|
||||||
|
[#164](https://github.com/go-kit/kit/issues/164), and
|
||||||
|
[#252](https://github.com/go-kit/kit/pull/252)
|
||||||
|
to review historical conversations about package log and the Logger interface.
|
||||||
|
|
||||||
|
Value-add packages and suggestions,
|
||||||
|
like improvements to [the leveled logger](https://godoc.org/github.com/go-kit/kit/log/level),
|
||||||
|
are of course welcome. Good proposals should
|
||||||
|
|
||||||
|
- Be composable with [contextual loggers](https://godoc.org/github.com/go-kit/kit/log#With),
|
||||||
|
- Not break the behavior of [log.Caller](https://godoc.org/github.com/go-kit/kit/log#Caller) in any wrapped contextual loggers, and
|
||||||
|
- Be friendly to packages that accept only an unadorned log.Logger.
|
||||||
|
|
||||||
|
## Benchmarks & comparisons
|
||||||
|
|
||||||
|
There are a few Go logging benchmarks and comparisons that include Go kit's package log.
|
||||||
|
|
||||||
|
- [imkira/go-loggers-bench](https://github.com/imkira/go-loggers-bench) includes kit/log
|
||||||
|
- [uber-common/zap](https://github.com/uber-common/zap), a zero-alloc logging library, includes a comparison with kit/log
|
116
vendor/github.com/go-kit/kit/log/doc.go
generated
vendored
Normal file
116
vendor/github.com/go-kit/kit/log/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,116 @@
|
||||||
|
// Package log provides a structured logger.
|
||||||
|
//
|
||||||
|
// Structured logging produces logs easily consumed later by humans or
|
||||||
|
// machines. Humans might be interested in debugging errors, or tracing
|
||||||
|
// specific requests. Machines might be interested in counting interesting
|
||||||
|
// events, or aggregating information for off-line processing. In both cases,
|
||||||
|
// it is important that the log messages are structured and actionable.
|
||||||
|
// Package log is designed to encourage both of these best practices.
|
||||||
|
//
|
||||||
|
// Basic Usage
|
||||||
|
//
|
||||||
|
// The fundamental interface is Logger. Loggers create log events from
|
||||||
|
// key/value data. The Logger interface has a single method, Log, which
|
||||||
|
// accepts a sequence of alternating key/value pairs, which this package names
|
||||||
|
// keyvals.
|
||||||
|
//
|
||||||
|
// type Logger interface {
|
||||||
|
// Log(keyvals ...interface{}) error
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// Here is an example of a function using a Logger to create log events.
|
||||||
|
//
|
||||||
|
// func RunTask(task Task, logger log.Logger) string {
|
||||||
|
// logger.Log("taskID", task.ID, "event", "starting task")
|
||||||
|
// ...
|
||||||
|
// logger.Log("taskID", task.ID, "event", "task complete")
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// The keys in the above example are "taskID" and "event". The values are
|
||||||
|
// task.ID, "starting task", and "task complete". Every key is followed
|
||||||
|
// immediately by its value.
|
||||||
|
//
|
||||||
|
// Keys are usually plain strings. Values may be any type that has a sensible
|
||||||
|
// encoding in the chosen log format. With structured logging it is a good
|
||||||
|
// idea to log simple values without formatting them. This practice allows
|
||||||
|
// the chosen logger to encode values in the most appropriate way.
|
||||||
|
//
|
||||||
|
// Contextual Loggers
|
||||||
|
//
|
||||||
|
// A contextual logger stores keyvals that it includes in all log events.
|
||||||
|
// Building appropriate contextual loggers reduces repetition and aids
|
||||||
|
// consistency in the resulting log output. With and WithPrefix add context to
|
||||||
|
// a logger. We can use With to improve the RunTask example.
|
||||||
|
//
|
||||||
|
// func RunTask(task Task, logger log.Logger) string {
|
||||||
|
// logger = log.With(logger, "taskID", task.ID)
|
||||||
|
// logger.Log("event", "starting task")
|
||||||
|
// ...
|
||||||
|
// taskHelper(task.Cmd, logger)
|
||||||
|
// ...
|
||||||
|
// logger.Log("event", "task complete")
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// The improved version emits the same log events as the original for the
|
||||||
|
// first and last calls to Log. Passing the contextual logger to taskHelper
|
||||||
|
// enables each log event created by taskHelper to include the task.ID even
|
||||||
|
// though taskHelper does not have access to that value. Using contextual
|
||||||
|
// loggers this way simplifies producing log output that enables tracing the
|
||||||
|
// life cycle of individual tasks. (See the Contextual example for the full
|
||||||
|
// code of the above snippet.)
|
||||||
|
//
|
||||||
|
// Dynamic Contextual Values
|
||||||
|
//
|
||||||
|
// A Valuer function stored in a contextual logger generates a new value each
|
||||||
|
// time an event is logged. The Valuer example demonstrates how this feature
|
||||||
|
// works.
|
||||||
|
//
|
||||||
|
// Valuers provide the basis for consistently logging timestamps and source
|
||||||
|
// code location. The log package defines several valuers for that purpose.
|
||||||
|
// See Timestamp, DefaultTimestamp, DefaultTimestampUTC, Caller, and
|
||||||
|
// DefaultCaller. A common logger initialization sequence that ensures all log
|
||||||
|
// entries contain a timestamp and source location looks like this:
|
||||||
|
//
|
||||||
|
// logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stdout))
|
||||||
|
// logger = log.With(logger, "ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller)
|
||||||
|
//
|
||||||
|
// Concurrent Safety
|
||||||
|
//
|
||||||
|
// Applications with multiple goroutines want each log event written to the
|
||||||
|
// same logger to remain separate from other log events. Package log provides
|
||||||
|
// two simple solutions for concurrent safe logging.
|
||||||
|
//
|
||||||
|
// NewSyncWriter wraps an io.Writer and serializes each call to its Write
|
||||||
|
// method. Using a SyncWriter has the benefit that the smallest practical
|
||||||
|
// portion of the logging logic is performed within a mutex, but it requires
|
||||||
|
// the formatting Logger to make only one call to Write per log event.
|
||||||
|
//
|
||||||
|
// NewSyncLogger wraps any Logger and serializes each call to its Log method.
|
||||||
|
// Using a SyncLogger has the benefit that it guarantees each log event is
|
||||||
|
// handled atomically within the wrapped logger, but it typically serializes
|
||||||
|
// both the formatting and output logic. Use a SyncLogger if the formatting
|
||||||
|
// logger may perform multiple writes per log event.
|
||||||
|
//
|
||||||
|
// Error Handling
|
||||||
|
//
|
||||||
|
// This package relies on the practice of wrapping or decorating loggers with
|
||||||
|
// other loggers to provide composable pieces of functionality. It also means
|
||||||
|
// that Logger.Log must return an error because some
|
||||||
|
// implementations—especially those that output log data to an io.Writer—may
|
||||||
|
// encounter errors that cannot be handled locally. This in turn means that
|
||||||
|
// Loggers that wrap other loggers should return errors from the wrapped
|
||||||
|
// logger up the stack.
|
||||||
|
//
|
||||||
|
// Fortunately, the decorator pattern also provides a way to avoid the
|
||||||
|
// necessity to check for errors every time an application calls Logger.Log.
|
||||||
|
// An application required to panic whenever its Logger encounters
|
||||||
|
// an error could initialize its logger as follows.
|
||||||
|
//
|
||||||
|
// fmtlogger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stdout))
|
||||||
|
// logger := log.LoggerFunc(func(keyvals ...interface{}) error {
|
||||||
|
// if err := fmtlogger.Log(keyvals...); err != nil {
|
||||||
|
// panic(err)
|
||||||
|
// }
|
||||||
|
// return nil
|
||||||
|
// })
|
||||||
|
package log
|
91
vendor/github.com/go-kit/kit/log/json_logger.go
generated
vendored
Normal file
91
vendor/github.com/go-kit/kit/log/json_logger.go
generated
vendored
Normal file
|
@ -0,0 +1,91 @@
|
||||||
|
package log
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"reflect"
|
||||||
|
)
|
||||||
|
|
||||||
|
type jsonLogger struct {
|
||||||
|
io.Writer
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewJSONLogger returns a Logger that encodes keyvals to the Writer as a
|
||||||
|
// single JSON object. Each log event produces no more than one call to
|
||||||
|
// w.Write. The passed Writer must be safe for concurrent use by multiple
|
||||||
|
// goroutines if the returned Logger will be used concurrently.
|
||||||
|
func NewJSONLogger(w io.Writer) Logger {
|
||||||
|
return &jsonLogger{w}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *jsonLogger) Log(keyvals ...interface{}) error {
|
||||||
|
n := (len(keyvals) + 1) / 2 // +1 to handle case when len is odd
|
||||||
|
m := make(map[string]interface{}, n)
|
||||||
|
for i := 0; i < len(keyvals); i += 2 {
|
||||||
|
k := keyvals[i]
|
||||||
|
var v interface{} = ErrMissingValue
|
||||||
|
if i+1 < len(keyvals) {
|
||||||
|
v = keyvals[i+1]
|
||||||
|
}
|
||||||
|
merge(m, k, v)
|
||||||
|
}
|
||||||
|
enc := json.NewEncoder(l.Writer)
|
||||||
|
enc.SetEscapeHTML(false)
|
||||||
|
return enc.Encode(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
func merge(dst map[string]interface{}, k, v interface{}) {
|
||||||
|
var key string
|
||||||
|
switch x := k.(type) {
|
||||||
|
case string:
|
||||||
|
key = x
|
||||||
|
case fmt.Stringer:
|
||||||
|
key = safeString(x)
|
||||||
|
default:
|
||||||
|
key = fmt.Sprint(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
// We want json.Marshaler and encoding.TextMarshaller to take priority over
|
||||||
|
// err.Error() and v.String(). But json.Marshall (called later) does that by
|
||||||
|
// default so we force a no-op if it's one of those 2 case.
|
||||||
|
switch x := v.(type) {
|
||||||
|
case json.Marshaler:
|
||||||
|
case encoding.TextMarshaler:
|
||||||
|
case error:
|
||||||
|
v = safeError(x)
|
||||||
|
case fmt.Stringer:
|
||||||
|
v = safeString(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
dst[key] = v
|
||||||
|
}
|
||||||
|
|
||||||
|
func safeString(str fmt.Stringer) (s string) {
|
||||||
|
defer func() {
|
||||||
|
if panicVal := recover(); panicVal != nil {
|
||||||
|
if v := reflect.ValueOf(str); v.Kind() == reflect.Ptr && v.IsNil() {
|
||||||
|
s = "NULL"
|
||||||
|
} else {
|
||||||
|
panic(panicVal)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
s = str.String()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func safeError(err error) (s interface{}) {
|
||||||
|
defer func() {
|
||||||
|
if panicVal := recover(); panicVal != nil {
|
||||||
|
if v := reflect.ValueOf(err); v.Kind() == reflect.Ptr && v.IsNil() {
|
||||||
|
s = nil
|
||||||
|
} else {
|
||||||
|
panic(panicVal)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
s = err.Error()
|
||||||
|
return
|
||||||
|
}
|
22
vendor/github.com/go-kit/kit/log/level/doc.go
generated
vendored
Normal file
22
vendor/github.com/go-kit/kit/log/level/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,22 @@
|
||||||
|
// Package level implements leveled logging on top of Go kit's log package. To
|
||||||
|
// use the level package, create a logger as per normal in your func main, and
|
||||||
|
// wrap it with level.NewFilter.
|
||||||
|
//
|
||||||
|
// var logger log.Logger
|
||||||
|
// logger = log.NewLogfmtLogger(os.Stderr)
|
||||||
|
// logger = level.NewFilter(logger, level.AllowInfo()) // <--
|
||||||
|
// logger = log.With(logger, "ts", log.DefaultTimestampUTC)
|
||||||
|
//
|
||||||
|
// Then, at the callsites, use one of the level.Debug, Info, Warn, or Error
|
||||||
|
// helper methods to emit leveled log events.
|
||||||
|
//
|
||||||
|
// logger.Log("foo", "bar") // as normal, no level
|
||||||
|
// level.Debug(logger).Log("request_id", reqID, "trace_data", trace.Get())
|
||||||
|
// if value > 100 {
|
||||||
|
// level.Error(logger).Log("value", value)
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// NewFilter allows precise control over what happens when a log event is
|
||||||
|
// emitted without a level key, or if a squelched level is used. Check the
|
||||||
|
// Option functions for details.
|
||||||
|
package level
|
205
vendor/github.com/go-kit/kit/log/level/level.go
generated
vendored
Normal file
205
vendor/github.com/go-kit/kit/log/level/level.go
generated
vendored
Normal file
|
@ -0,0 +1,205 @@
|
||||||
|
package level
|
||||||
|
|
||||||
|
import "github.com/go-kit/kit/log"
|
||||||
|
|
||||||
|
// Error returns a logger that includes a Key/ErrorValue pair.
|
||||||
|
func Error(logger log.Logger) log.Logger {
|
||||||
|
return log.WithPrefix(logger, Key(), ErrorValue())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Warn returns a logger that includes a Key/WarnValue pair.
|
||||||
|
func Warn(logger log.Logger) log.Logger {
|
||||||
|
return log.WithPrefix(logger, Key(), WarnValue())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Info returns a logger that includes a Key/InfoValue pair.
|
||||||
|
func Info(logger log.Logger) log.Logger {
|
||||||
|
return log.WithPrefix(logger, Key(), InfoValue())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Debug returns a logger that includes a Key/DebugValue pair.
|
||||||
|
func Debug(logger log.Logger) log.Logger {
|
||||||
|
return log.WithPrefix(logger, Key(), DebugValue())
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewFilter wraps next and implements level filtering. See the commentary on
|
||||||
|
// the Option functions for a detailed description of how to configure levels.
|
||||||
|
// If no options are provided, all leveled log events created with Debug,
|
||||||
|
// Info, Warn or Error helper methods are squelched and non-leveled log
|
||||||
|
// events are passed to next unmodified.
|
||||||
|
func NewFilter(next log.Logger, options ...Option) log.Logger {
|
||||||
|
l := &logger{
|
||||||
|
next: next,
|
||||||
|
}
|
||||||
|
for _, option := range options {
|
||||||
|
option(l)
|
||||||
|
}
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
|
||||||
|
type logger struct {
|
||||||
|
next log.Logger
|
||||||
|
allowed level
|
||||||
|
squelchNoLevel bool
|
||||||
|
errNotAllowed error
|
||||||
|
errNoLevel error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *logger) Log(keyvals ...interface{}) error {
|
||||||
|
var hasLevel, levelAllowed bool
|
||||||
|
for i := 1; i < len(keyvals); i += 2 {
|
||||||
|
if v, ok := keyvals[i].(*levelValue); ok {
|
||||||
|
hasLevel = true
|
||||||
|
levelAllowed = l.allowed&v.level != 0
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !hasLevel && l.squelchNoLevel {
|
||||||
|
return l.errNoLevel
|
||||||
|
}
|
||||||
|
if hasLevel && !levelAllowed {
|
||||||
|
return l.errNotAllowed
|
||||||
|
}
|
||||||
|
return l.next.Log(keyvals...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Option sets a parameter for the leveled logger.
|
||||||
|
type Option func(*logger)
|
||||||
|
|
||||||
|
// AllowAll is an alias for AllowDebug.
|
||||||
|
func AllowAll() Option {
|
||||||
|
return AllowDebug()
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllowDebug allows error, warn, info and debug level log events to pass.
|
||||||
|
func AllowDebug() Option {
|
||||||
|
return allowed(levelError | levelWarn | levelInfo | levelDebug)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllowInfo allows error, warn and info level log events to pass.
|
||||||
|
func AllowInfo() Option {
|
||||||
|
return allowed(levelError | levelWarn | levelInfo)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllowWarn allows error and warn level log events to pass.
|
||||||
|
func AllowWarn() Option {
|
||||||
|
return allowed(levelError | levelWarn)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllowError allows only error level log events to pass.
|
||||||
|
func AllowError() Option {
|
||||||
|
return allowed(levelError)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllowNone allows no leveled log events to pass.
|
||||||
|
func AllowNone() Option {
|
||||||
|
return allowed(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func allowed(allowed level) Option {
|
||||||
|
return func(l *logger) { l.allowed = allowed }
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrNotAllowed sets the error to return from Log when it squelches a log
|
||||||
|
// event disallowed by the configured Allow[Level] option. By default,
|
||||||
|
// ErrNotAllowed is nil; in this case the log event is squelched with no
|
||||||
|
// error.
|
||||||
|
func ErrNotAllowed(err error) Option {
|
||||||
|
return func(l *logger) { l.errNotAllowed = err }
|
||||||
|
}
|
||||||
|
|
||||||
|
// SquelchNoLevel instructs Log to squelch log events with no level, so that
|
||||||
|
// they don't proceed through to the wrapped logger. If SquelchNoLevel is set
|
||||||
|
// to true and a log event is squelched in this way, the error value
|
||||||
|
// configured with ErrNoLevel is returned to the caller.
|
||||||
|
func SquelchNoLevel(squelch bool) Option {
|
||||||
|
return func(l *logger) { l.squelchNoLevel = squelch }
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrNoLevel sets the error to return from Log when it squelches a log event
|
||||||
|
// with no level. By default, ErrNoLevel is nil; in this case the log event is
|
||||||
|
// squelched with no error.
|
||||||
|
func ErrNoLevel(err error) Option {
|
||||||
|
return func(l *logger) { l.errNoLevel = err }
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewInjector wraps next and returns a logger that adds a Key/level pair to
|
||||||
|
// the beginning of log events that don't already contain a level. In effect,
|
||||||
|
// this gives a default level to logs without a level.
|
||||||
|
func NewInjector(next log.Logger, level Value) log.Logger {
|
||||||
|
return &injector{
|
||||||
|
next: next,
|
||||||
|
level: level,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type injector struct {
|
||||||
|
next log.Logger
|
||||||
|
level interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *injector) Log(keyvals ...interface{}) error {
|
||||||
|
for i := 1; i < len(keyvals); i += 2 {
|
||||||
|
if _, ok := keyvals[i].(*levelValue); ok {
|
||||||
|
return l.next.Log(keyvals...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
kvs := make([]interface{}, len(keyvals)+2)
|
||||||
|
kvs[0], kvs[1] = key, l.level
|
||||||
|
copy(kvs[2:], keyvals)
|
||||||
|
return l.next.Log(kvs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Value is the interface that each of the canonical level values implement.
|
||||||
|
// It contains unexported methods that prevent types from other packages from
|
||||||
|
// implementing it and guaranteeing that NewFilter can distinguish the levels
|
||||||
|
// defined in this package from all other values.
|
||||||
|
type Value interface {
|
||||||
|
String() string
|
||||||
|
levelVal()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Key returns the unique key added to log events by the loggers in this
|
||||||
|
// package.
|
||||||
|
func Key() interface{} { return key }
|
||||||
|
|
||||||
|
// ErrorValue returns the unique value added to log events by Error.
|
||||||
|
func ErrorValue() Value { return errorValue }
|
||||||
|
|
||||||
|
// WarnValue returns the unique value added to log events by Warn.
|
||||||
|
func WarnValue() Value { return warnValue }
|
||||||
|
|
||||||
|
// InfoValue returns the unique value added to log events by Info.
|
||||||
|
func InfoValue() Value { return infoValue }
|
||||||
|
|
||||||
|
// DebugValue returns the unique value added to log events by Warn.
|
||||||
|
func DebugValue() Value { return debugValue }
|
||||||
|
|
||||||
|
var (
|
||||||
|
// key is of type interface{} so that it allocates once during package
|
||||||
|
// initialization and avoids allocating every time the value is added to a
|
||||||
|
// []interface{} later.
|
||||||
|
key interface{} = "level"
|
||||||
|
|
||||||
|
errorValue = &levelValue{level: levelError, name: "error"}
|
||||||
|
warnValue = &levelValue{level: levelWarn, name: "warn"}
|
||||||
|
infoValue = &levelValue{level: levelInfo, name: "info"}
|
||||||
|
debugValue = &levelValue{level: levelDebug, name: "debug"}
|
||||||
|
)
|
||||||
|
|
||||||
|
type level byte
|
||||||
|
|
||||||
|
const (
|
||||||
|
levelDebug level = 1 << iota
|
||||||
|
levelInfo
|
||||||
|
levelWarn
|
||||||
|
levelError
|
||||||
|
)
|
||||||
|
|
||||||
|
type levelValue struct {
|
||||||
|
name string
|
||||||
|
level
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v *levelValue) String() string { return v.name }
|
||||||
|
func (v *levelValue) levelVal() {}
|
135
vendor/github.com/go-kit/kit/log/log.go
generated
vendored
Normal file
135
vendor/github.com/go-kit/kit/log/log.go
generated
vendored
Normal file
|
@ -0,0 +1,135 @@
|
||||||
|
package log
|
||||||
|
|
||||||
|
import "errors"
|
||||||
|
|
||||||
|
// Logger is the fundamental interface for all log operations. Log creates a
|
||||||
|
// log event from keyvals, a variadic sequence of alternating keys and values.
|
||||||
|
// Implementations must be safe for concurrent use by multiple goroutines. In
|
||||||
|
// particular, any implementation of Logger that appends to keyvals or
|
||||||
|
// modifies or retains any of its elements must make a copy first.
|
||||||
|
type Logger interface {
|
||||||
|
Log(keyvals ...interface{}) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrMissingValue is appended to keyvals slices with odd length to substitute
|
||||||
|
// the missing value.
|
||||||
|
var ErrMissingValue = errors.New("(MISSING)")
|
||||||
|
|
||||||
|
// With returns a new contextual logger with keyvals prepended to those passed
|
||||||
|
// to calls to Log. If logger is also a contextual logger created by With or
|
||||||
|
// WithPrefix, keyvals is appended to the existing context.
|
||||||
|
//
|
||||||
|
// The returned Logger replaces all value elements (odd indexes) containing a
|
||||||
|
// Valuer with their generated value for each call to its Log method.
|
||||||
|
func With(logger Logger, keyvals ...interface{}) Logger {
|
||||||
|
if len(keyvals) == 0 {
|
||||||
|
return logger
|
||||||
|
}
|
||||||
|
l := newContext(logger)
|
||||||
|
kvs := append(l.keyvals, keyvals...)
|
||||||
|
if len(kvs)%2 != 0 {
|
||||||
|
kvs = append(kvs, ErrMissingValue)
|
||||||
|
}
|
||||||
|
return &context{
|
||||||
|
logger: l.logger,
|
||||||
|
// Limiting the capacity of the stored keyvals ensures that a new
|
||||||
|
// backing array is created if the slice must grow in Log or With.
|
||||||
|
// Using the extra capacity without copying risks a data race that
|
||||||
|
// would violate the Logger interface contract.
|
||||||
|
keyvals: kvs[:len(kvs):len(kvs)],
|
||||||
|
hasValuer: l.hasValuer || containsValuer(keyvals),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithPrefix returns a new contextual logger with keyvals prepended to those
|
||||||
|
// passed to calls to Log. If logger is also a contextual logger created by
|
||||||
|
// With or WithPrefix, keyvals is prepended to the existing context.
|
||||||
|
//
|
||||||
|
// The returned Logger replaces all value elements (odd indexes) containing a
|
||||||
|
// Valuer with their generated value for each call to its Log method.
|
||||||
|
func WithPrefix(logger Logger, keyvals ...interface{}) Logger {
|
||||||
|
if len(keyvals) == 0 {
|
||||||
|
return logger
|
||||||
|
}
|
||||||
|
l := newContext(logger)
|
||||||
|
// Limiting the capacity of the stored keyvals ensures that a new
|
||||||
|
// backing array is created if the slice must grow in Log or With.
|
||||||
|
// Using the extra capacity without copying risks a data race that
|
||||||
|
// would violate the Logger interface contract.
|
||||||
|
n := len(l.keyvals) + len(keyvals)
|
||||||
|
if len(keyvals)%2 != 0 {
|
||||||
|
n++
|
||||||
|
}
|
||||||
|
kvs := make([]interface{}, 0, n)
|
||||||
|
kvs = append(kvs, keyvals...)
|
||||||
|
if len(kvs)%2 != 0 {
|
||||||
|
kvs = append(kvs, ErrMissingValue)
|
||||||
|
}
|
||||||
|
kvs = append(kvs, l.keyvals...)
|
||||||
|
return &context{
|
||||||
|
logger: l.logger,
|
||||||
|
keyvals: kvs,
|
||||||
|
hasValuer: l.hasValuer || containsValuer(keyvals),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// context is the Logger implementation returned by With and WithPrefix. It
|
||||||
|
// wraps a Logger and holds keyvals that it includes in all log events. Its
|
||||||
|
// Log method calls bindValues to generate values for each Valuer in the
|
||||||
|
// context keyvals.
|
||||||
|
//
|
||||||
|
// A context must always have the same number of stack frames between calls to
|
||||||
|
// its Log method and the eventual binding of Valuers to their value. This
|
||||||
|
// requirement comes from the functional requirement to allow a context to
|
||||||
|
// resolve application call site information for a Caller stored in the
|
||||||
|
// context. To do this we must be able to predict the number of logging
|
||||||
|
// functions on the stack when bindValues is called.
|
||||||
|
//
|
||||||
|
// Two implementation details provide the needed stack depth consistency.
|
||||||
|
//
|
||||||
|
// 1. newContext avoids introducing an additional layer when asked to
|
||||||
|
// wrap another context.
|
||||||
|
// 2. With and WithPrefix avoid introducing an additional layer by
|
||||||
|
// returning a newly constructed context with a merged keyvals rather
|
||||||
|
// than simply wrapping the existing context.
|
||||||
|
type context struct {
|
||||||
|
logger Logger
|
||||||
|
keyvals []interface{}
|
||||||
|
hasValuer bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func newContext(logger Logger) *context {
|
||||||
|
if c, ok := logger.(*context); ok {
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
return &context{logger: logger}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Log replaces all value elements (odd indexes) containing a Valuer in the
|
||||||
|
// stored context with their generated value, appends keyvals, and passes the
|
||||||
|
// result to the wrapped Logger.
|
||||||
|
func (l *context) Log(keyvals ...interface{}) error {
|
||||||
|
kvs := append(l.keyvals, keyvals...)
|
||||||
|
if len(kvs)%2 != 0 {
|
||||||
|
kvs = append(kvs, ErrMissingValue)
|
||||||
|
}
|
||||||
|
if l.hasValuer {
|
||||||
|
// If no keyvals were appended above then we must copy l.keyvals so
|
||||||
|
// that future log events will reevaluate the stored Valuers.
|
||||||
|
if len(keyvals) == 0 {
|
||||||
|
kvs = append([]interface{}{}, l.keyvals...)
|
||||||
|
}
|
||||||
|
bindValues(kvs[:len(l.keyvals)])
|
||||||
|
}
|
||||||
|
return l.logger.Log(kvs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoggerFunc is an adapter to allow use of ordinary functions as Loggers. If
|
||||||
|
// f is a function with the appropriate signature, LoggerFunc(f) is a Logger
|
||||||
|
// object that calls f.
|
||||||
|
type LoggerFunc func(...interface{}) error
|
||||||
|
|
||||||
|
// Log implements Logger by calling f(keyvals...).
|
||||||
|
func (f LoggerFunc) Log(keyvals ...interface{}) error {
|
||||||
|
return f(keyvals...)
|
||||||
|
}
|
62
vendor/github.com/go-kit/kit/log/logfmt_logger.go
generated
vendored
Normal file
62
vendor/github.com/go-kit/kit/log/logfmt_logger.go
generated
vendored
Normal file
|
@ -0,0 +1,62 @@
|
||||||
|
package log
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"io"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/go-logfmt/logfmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
type logfmtEncoder struct {
|
||||||
|
*logfmt.Encoder
|
||||||
|
buf bytes.Buffer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *logfmtEncoder) Reset() {
|
||||||
|
l.Encoder.Reset()
|
||||||
|
l.buf.Reset()
|
||||||
|
}
|
||||||
|
|
||||||
|
var logfmtEncoderPool = sync.Pool{
|
||||||
|
New: func() interface{} {
|
||||||
|
var enc logfmtEncoder
|
||||||
|
enc.Encoder = logfmt.NewEncoder(&enc.buf)
|
||||||
|
return &enc
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
type logfmtLogger struct {
|
||||||
|
w io.Writer
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewLogfmtLogger returns a logger that encodes keyvals to the Writer in
|
||||||
|
// logfmt format. Each log event produces no more than one call to w.Write.
|
||||||
|
// The passed Writer must be safe for concurrent use by multiple goroutines if
|
||||||
|
// the returned Logger will be used concurrently.
|
||||||
|
func NewLogfmtLogger(w io.Writer) Logger {
|
||||||
|
return &logfmtLogger{w}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l logfmtLogger) Log(keyvals ...interface{}) error {
|
||||||
|
enc := logfmtEncoderPool.Get().(*logfmtEncoder)
|
||||||
|
enc.Reset()
|
||||||
|
defer logfmtEncoderPool.Put(enc)
|
||||||
|
|
||||||
|
if err := enc.EncodeKeyvals(keyvals...); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add newline to the end of the buffer
|
||||||
|
if err := enc.EndRecord(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// The Logger interface requires implementations to be safe for concurrent
|
||||||
|
// use by multiple goroutines. For this implementation that means making
|
||||||
|
// only one call to l.w.Write() for each call to Log.
|
||||||
|
if _, err := l.w.Write(enc.buf.Bytes()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
8
vendor/github.com/go-kit/kit/log/nop_logger.go
generated
vendored
Normal file
8
vendor/github.com/go-kit/kit/log/nop_logger.go
generated
vendored
Normal file
|
@ -0,0 +1,8 @@
|
||||||
|
package log
|
||||||
|
|
||||||
|
type nopLogger struct{}
|
||||||
|
|
||||||
|
// NewNopLogger returns a logger that doesn't do anything.
|
||||||
|
func NewNopLogger() Logger { return nopLogger{} }
|
||||||
|
|
||||||
|
func (nopLogger) Log(...interface{}) error { return nil }
|
116
vendor/github.com/go-kit/kit/log/stdlib.go
generated
vendored
Normal file
116
vendor/github.com/go-kit/kit/log/stdlib.go
generated
vendored
Normal file
|
@ -0,0 +1,116 @@
|
||||||
|
package log
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"log"
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// StdlibWriter implements io.Writer by invoking the stdlib log.Print. It's
|
||||||
|
// designed to be passed to a Go kit logger as the writer, for cases where
|
||||||
|
// it's necessary to redirect all Go kit log output to the stdlib logger.
|
||||||
|
//
|
||||||
|
// If you have any choice in the matter, you shouldn't use this. Prefer to
|
||||||
|
// redirect the stdlib log to the Go kit logger via NewStdlibAdapter.
|
||||||
|
type StdlibWriter struct{}
|
||||||
|
|
||||||
|
// Write implements io.Writer.
|
||||||
|
func (w StdlibWriter) Write(p []byte) (int, error) {
|
||||||
|
log.Print(strings.TrimSpace(string(p)))
|
||||||
|
return len(p), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// StdlibAdapter wraps a Logger and allows it to be passed to the stdlib
|
||||||
|
// logger's SetOutput. It will extract date/timestamps, filenames, and
|
||||||
|
// messages, and place them under relevant keys.
|
||||||
|
type StdlibAdapter struct {
|
||||||
|
Logger
|
||||||
|
timestampKey string
|
||||||
|
fileKey string
|
||||||
|
messageKey string
|
||||||
|
}
|
||||||
|
|
||||||
|
// StdlibAdapterOption sets a parameter for the StdlibAdapter.
|
||||||
|
type StdlibAdapterOption func(*StdlibAdapter)
|
||||||
|
|
||||||
|
// TimestampKey sets the key for the timestamp field. By default, it's "ts".
|
||||||
|
func TimestampKey(key string) StdlibAdapterOption {
|
||||||
|
return func(a *StdlibAdapter) { a.timestampKey = key }
|
||||||
|
}
|
||||||
|
|
||||||
|
// FileKey sets the key for the file and line field. By default, it's "caller".
|
||||||
|
func FileKey(key string) StdlibAdapterOption {
|
||||||
|
return func(a *StdlibAdapter) { a.fileKey = key }
|
||||||
|
}
|
||||||
|
|
||||||
|
// MessageKey sets the key for the actual log message. By default, it's "msg".
|
||||||
|
func MessageKey(key string) StdlibAdapterOption {
|
||||||
|
return func(a *StdlibAdapter) { a.messageKey = key }
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewStdlibAdapter returns a new StdlibAdapter wrapper around the passed
|
||||||
|
// logger. It's designed to be passed to log.SetOutput.
|
||||||
|
func NewStdlibAdapter(logger Logger, options ...StdlibAdapterOption) io.Writer {
|
||||||
|
a := StdlibAdapter{
|
||||||
|
Logger: logger,
|
||||||
|
timestampKey: "ts",
|
||||||
|
fileKey: "caller",
|
||||||
|
messageKey: "msg",
|
||||||
|
}
|
||||||
|
for _, option := range options {
|
||||||
|
option(&a)
|
||||||
|
}
|
||||||
|
return a
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a StdlibAdapter) Write(p []byte) (int, error) {
|
||||||
|
result := subexps(p)
|
||||||
|
keyvals := []interface{}{}
|
||||||
|
var timestamp string
|
||||||
|
if date, ok := result["date"]; ok && date != "" {
|
||||||
|
timestamp = date
|
||||||
|
}
|
||||||
|
if time, ok := result["time"]; ok && time != "" {
|
||||||
|
if timestamp != "" {
|
||||||
|
timestamp += " "
|
||||||
|
}
|
||||||
|
timestamp += time
|
||||||
|
}
|
||||||
|
if timestamp != "" {
|
||||||
|
keyvals = append(keyvals, a.timestampKey, timestamp)
|
||||||
|
}
|
||||||
|
if file, ok := result["file"]; ok && file != "" {
|
||||||
|
keyvals = append(keyvals, a.fileKey, file)
|
||||||
|
}
|
||||||
|
if msg, ok := result["msg"]; ok {
|
||||||
|
keyvals = append(keyvals, a.messageKey, msg)
|
||||||
|
}
|
||||||
|
if err := a.Logger.Log(keyvals...); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return len(p), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
logRegexpDate = `(?P<date>[0-9]{4}/[0-9]{2}/[0-9]{2})?[ ]?`
|
||||||
|
logRegexpTime = `(?P<time>[0-9]{2}:[0-9]{2}:[0-9]{2}(\.[0-9]+)?)?[ ]?`
|
||||||
|
logRegexpFile = `(?P<file>.+?:[0-9]+)?`
|
||||||
|
logRegexpMsg = `(: )?(?P<msg>.*)`
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
logRegexp = regexp.MustCompile(logRegexpDate + logRegexpTime + logRegexpFile + logRegexpMsg)
|
||||||
|
)
|
||||||
|
|
||||||
|
func subexps(line []byte) map[string]string {
|
||||||
|
m := logRegexp.FindSubmatch(line)
|
||||||
|
if len(m) < len(logRegexp.SubexpNames()) {
|
||||||
|
return map[string]string{}
|
||||||
|
}
|
||||||
|
result := map[string]string{}
|
||||||
|
for i, name := range logRegexp.SubexpNames() {
|
||||||
|
result[name] = string(m[i])
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
}
|
116
vendor/github.com/go-kit/kit/log/sync.go
generated
vendored
Normal file
116
vendor/github.com/go-kit/kit/log/sync.go
generated
vendored
Normal file
|
@ -0,0 +1,116 @@
|
||||||
|
package log
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
)
|
||||||
|
|
||||||
|
// SwapLogger wraps another logger that may be safely replaced while other
|
||||||
|
// goroutines use the SwapLogger concurrently. The zero value for a SwapLogger
|
||||||
|
// will discard all log events without error.
|
||||||
|
//
|
||||||
|
// SwapLogger serves well as a package global logger that can be changed by
|
||||||
|
// importers.
|
||||||
|
type SwapLogger struct {
|
||||||
|
logger atomic.Value
|
||||||
|
}
|
||||||
|
|
||||||
|
type loggerStruct struct {
|
||||||
|
Logger
|
||||||
|
}
|
||||||
|
|
||||||
|
// Log implements the Logger interface by forwarding keyvals to the currently
|
||||||
|
// wrapped logger. It does not log anything if the wrapped logger is nil.
|
||||||
|
func (l *SwapLogger) Log(keyvals ...interface{}) error {
|
||||||
|
s, ok := l.logger.Load().(loggerStruct)
|
||||||
|
if !ok || s.Logger == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return s.Log(keyvals...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Swap replaces the currently wrapped logger with logger. Swap may be called
|
||||||
|
// concurrently with calls to Log from other goroutines.
|
||||||
|
func (l *SwapLogger) Swap(logger Logger) {
|
||||||
|
l.logger.Store(loggerStruct{logger})
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSyncWriter returns a new writer that is safe for concurrent use by
|
||||||
|
// multiple goroutines. Writes to the returned writer are passed on to w. If
|
||||||
|
// another write is already in progress, the calling goroutine blocks until
|
||||||
|
// the writer is available.
|
||||||
|
//
|
||||||
|
// If w implements the following interface, so does the returned writer.
|
||||||
|
//
|
||||||
|
// interface {
|
||||||
|
// Fd() uintptr
|
||||||
|
// }
|
||||||
|
func NewSyncWriter(w io.Writer) io.Writer {
|
||||||
|
switch w := w.(type) {
|
||||||
|
case fdWriter:
|
||||||
|
return &fdSyncWriter{fdWriter: w}
|
||||||
|
default:
|
||||||
|
return &syncWriter{Writer: w}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// syncWriter synchronizes concurrent writes to an io.Writer.
|
||||||
|
type syncWriter struct {
|
||||||
|
sync.Mutex
|
||||||
|
io.Writer
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write writes p to the underlying io.Writer. If another write is already in
|
||||||
|
// progress, the calling goroutine blocks until the syncWriter is available.
|
||||||
|
func (w *syncWriter) Write(p []byte) (n int, err error) {
|
||||||
|
w.Lock()
|
||||||
|
n, err = w.Writer.Write(p)
|
||||||
|
w.Unlock()
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// fdWriter is an io.Writer that also has an Fd method. The most common
|
||||||
|
// example of an fdWriter is an *os.File.
|
||||||
|
type fdWriter interface {
|
||||||
|
io.Writer
|
||||||
|
Fd() uintptr
|
||||||
|
}
|
||||||
|
|
||||||
|
// fdSyncWriter synchronizes concurrent writes to an fdWriter.
|
||||||
|
type fdSyncWriter struct {
|
||||||
|
sync.Mutex
|
||||||
|
fdWriter
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write writes p to the underlying io.Writer. If another write is already in
|
||||||
|
// progress, the calling goroutine blocks until the fdSyncWriter is available.
|
||||||
|
func (w *fdSyncWriter) Write(p []byte) (n int, err error) {
|
||||||
|
w.Lock()
|
||||||
|
n, err = w.fdWriter.Write(p)
|
||||||
|
w.Unlock()
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// syncLogger provides concurrent safe logging for another Logger.
|
||||||
|
type syncLogger struct {
|
||||||
|
mu sync.Mutex
|
||||||
|
logger Logger
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSyncLogger returns a logger that synchronizes concurrent use of the
|
||||||
|
// wrapped logger. When multiple goroutines use the SyncLogger concurrently
|
||||||
|
// only one goroutine will be allowed to log to the wrapped logger at a time.
|
||||||
|
// The other goroutines will block until the logger is available.
|
||||||
|
func NewSyncLogger(logger Logger) Logger {
|
||||||
|
return &syncLogger{logger: logger}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Log logs keyvals to the underlying Logger. If another log is already in
|
||||||
|
// progress, the calling goroutine blocks until the syncLogger is available.
|
||||||
|
func (l *syncLogger) Log(keyvals ...interface{}) error {
|
||||||
|
l.mu.Lock()
|
||||||
|
err := l.logger.Log(keyvals...)
|
||||||
|
l.mu.Unlock()
|
||||||
|
return err
|
||||||
|
}
|
110
vendor/github.com/go-kit/kit/log/value.go
generated
vendored
Normal file
110
vendor/github.com/go-kit/kit/log/value.go
generated
vendored
Normal file
|
@ -0,0 +1,110 @@
|
||||||
|
package log
|
||||||
|
|
||||||
|
import (
|
||||||
|
"runtime"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A Valuer generates a log value. When passed to With or WithPrefix in a
|
||||||
|
// value element (odd indexes), it represents a dynamic value which is re-
|
||||||
|
// evaluated with each log event.
|
||||||
|
type Valuer func() interface{}
|
||||||
|
|
||||||
|
// bindValues replaces all value elements (odd indexes) containing a Valuer
|
||||||
|
// with their generated value.
|
||||||
|
func bindValues(keyvals []interface{}) {
|
||||||
|
for i := 1; i < len(keyvals); i += 2 {
|
||||||
|
if v, ok := keyvals[i].(Valuer); ok {
|
||||||
|
keyvals[i] = v()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// containsValuer returns true if any of the value elements (odd indexes)
|
||||||
|
// contain a Valuer.
|
||||||
|
func containsValuer(keyvals []interface{}) bool {
|
||||||
|
for i := 1; i < len(keyvals); i += 2 {
|
||||||
|
if _, ok := keyvals[i].(Valuer); ok {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Timestamp returns a timestamp Valuer. It invokes the t function to get the
|
||||||
|
// time; unless you are doing something tricky, pass time.Now.
|
||||||
|
//
|
||||||
|
// Most users will want to use DefaultTimestamp or DefaultTimestampUTC, which
|
||||||
|
// are TimestampFormats that use the RFC3339Nano format.
|
||||||
|
func Timestamp(t func() time.Time) Valuer {
|
||||||
|
return func() interface{} { return t() }
|
||||||
|
}
|
||||||
|
|
||||||
|
// TimestampFormat returns a timestamp Valuer with a custom time format. It
|
||||||
|
// invokes the t function to get the time to format; unless you are doing
|
||||||
|
// something tricky, pass time.Now. The layout string is passed to
|
||||||
|
// Time.Format.
|
||||||
|
//
|
||||||
|
// Most users will want to use DefaultTimestamp or DefaultTimestampUTC, which
|
||||||
|
// are TimestampFormats that use the RFC3339Nano format.
|
||||||
|
func TimestampFormat(t func() time.Time, layout string) Valuer {
|
||||||
|
return func() interface{} {
|
||||||
|
return timeFormat{
|
||||||
|
time: t(),
|
||||||
|
layout: layout,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// A timeFormat represents an instant in time and a layout used when
|
||||||
|
// marshaling to a text format.
|
||||||
|
type timeFormat struct {
|
||||||
|
time time.Time
|
||||||
|
layout string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tf timeFormat) String() string {
|
||||||
|
return tf.time.Format(tf.layout)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalText implements encoding.TextMarshaller.
|
||||||
|
func (tf timeFormat) MarshalText() (text []byte, err error) {
|
||||||
|
// The following code adapted from the standard library time.Time.Format
|
||||||
|
// method. Using the same undocumented magic constant to extend the size
|
||||||
|
// of the buffer as seen there.
|
||||||
|
b := make([]byte, 0, len(tf.layout)+10)
|
||||||
|
b = tf.time.AppendFormat(b, tf.layout)
|
||||||
|
return b, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Caller returns a Valuer that returns a file and line from a specified depth
|
||||||
|
// in the callstack. Users will probably want to use DefaultCaller.
|
||||||
|
func Caller(depth int) Valuer {
|
||||||
|
return func() interface{} {
|
||||||
|
_, file, line, _ := runtime.Caller(depth)
|
||||||
|
idx := strings.LastIndexByte(file, '/')
|
||||||
|
// using idx+1 below handles both of following cases:
|
||||||
|
// idx == -1 because no "/" was found, or
|
||||||
|
// idx >= 0 and we want to start at the character after the found "/".
|
||||||
|
return file[idx+1:] + ":" + strconv.Itoa(line)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
// DefaultTimestamp is a Valuer that returns the current wallclock time,
|
||||||
|
// respecting time zones, when bound.
|
||||||
|
DefaultTimestamp = TimestampFormat(time.Now, time.RFC3339Nano)
|
||||||
|
|
||||||
|
// DefaultTimestampUTC is a Valuer that returns the current time in UTC
|
||||||
|
// when bound.
|
||||||
|
DefaultTimestampUTC = TimestampFormat(
|
||||||
|
func() time.Time { return time.Now().UTC() },
|
||||||
|
time.RFC3339Nano,
|
||||||
|
)
|
||||||
|
|
||||||
|
// DefaultCaller is a Valuer that returns the file and line where the Log
|
||||||
|
// method was invoked. It can only be used with log.With.
|
||||||
|
DefaultCaller = Caller(3)
|
||||||
|
)
|
1
vendor/github.com/go-logfmt/logfmt/.gitignore
generated
vendored
Normal file
1
vendor/github.com/go-logfmt/logfmt/.gitignore
generated
vendored
Normal file
|
@ -0,0 +1 @@
|
||||||
|
.vscode/
|
18
vendor/github.com/go-logfmt/logfmt/.travis.yml
generated
vendored
Normal file
18
vendor/github.com/go-logfmt/logfmt/.travis.yml
generated
vendored
Normal file
|
@ -0,0 +1,18 @@
|
||||||
|
language: go
|
||||||
|
sudo: false
|
||||||
|
go:
|
||||||
|
- "1.7.x"
|
||||||
|
- "1.8.x"
|
||||||
|
- "1.9.x"
|
||||||
|
- "1.10.x"
|
||||||
|
- "1.11.x"
|
||||||
|
- "1.12.x"
|
||||||
|
- "1.13.x"
|
||||||
|
- "tip"
|
||||||
|
|
||||||
|
before_install:
|
||||||
|
- go get github.com/mattn/goveralls
|
||||||
|
- go get golang.org/x/tools/cmd/cover
|
||||||
|
|
||||||
|
script:
|
||||||
|
- goveralls -service=travis-ci
|
48
vendor/github.com/go-logfmt/logfmt/CHANGELOG.md
generated
vendored
Normal file
48
vendor/github.com/go-logfmt/logfmt/CHANGELOG.md
generated
vendored
Normal file
|
@ -0,0 +1,48 @@
|
||||||
|
# Changelog
|
||||||
|
All notable changes to this project will be documented in this file.
|
||||||
|
|
||||||
|
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
||||||
|
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||||
|
|
||||||
|
## [0.5.0] - 2020-01-03
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Remove the dependency on github.com/kr/logfmt by [@ChrisHines]
|
||||||
|
- Move fuzz code to github.com/go-logfmt/fuzzlogfmt by [@ChrisHines]
|
||||||
|
|
||||||
|
## [0.4.0] - 2018-11-21
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- Go module support by [@ChrisHines]
|
||||||
|
- CHANGELOG by [@ChrisHines]
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Drop invalid runes from keys instead of returning ErrInvalidKey by [@ChrisHines]
|
||||||
|
- On panic while printing, attempt to print panic value by [@bboreham]
|
||||||
|
|
||||||
|
## [0.3.0] - 2016-11-15
|
||||||
|
### Added
|
||||||
|
- Pool buffers for quoted strings and byte slices by [@nussjustin]
|
||||||
|
### Fixed
|
||||||
|
- Fuzz fix, quote invalid UTF-8 values by [@judwhite]
|
||||||
|
|
||||||
|
## [0.2.0] - 2016-05-08
|
||||||
|
### Added
|
||||||
|
- Encoder.EncodeKeyvals by [@ChrisHines]
|
||||||
|
|
||||||
|
## [0.1.0] - 2016-03-28
|
||||||
|
### Added
|
||||||
|
- Encoder by [@ChrisHines]
|
||||||
|
- Decoder by [@ChrisHines]
|
||||||
|
- MarshalKeyvals by [@ChrisHines]
|
||||||
|
|
||||||
|
[0.5.0]: https://github.com/go-logfmt/logfmt/compare/v0.4.0...v0.5.0
|
||||||
|
[0.4.0]: https://github.com/go-logfmt/logfmt/compare/v0.3.0...v0.4.0
|
||||||
|
[0.3.0]: https://github.com/go-logfmt/logfmt/compare/v0.2.0...v0.3.0
|
||||||
|
[0.2.0]: https://github.com/go-logfmt/logfmt/compare/v0.1.0...v0.2.0
|
||||||
|
[0.1.0]: https://github.com/go-logfmt/logfmt/commits/v0.1.0
|
||||||
|
|
||||||
|
[@ChrisHines]: https://github.com/ChrisHines
|
||||||
|
[@bboreham]: https://github.com/bboreham
|
||||||
|
[@judwhite]: https://github.com/judwhite
|
||||||
|
[@nussjustin]: https://github.com/nussjustin
|
22
vendor/github.com/go-logfmt/logfmt/LICENSE
generated
vendored
Normal file
22
vendor/github.com/go-logfmt/logfmt/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,22 @@
|
||||||
|
The MIT License (MIT)
|
||||||
|
|
||||||
|
Copyright (c) 2015 go-logfmt
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
|
|
33
vendor/github.com/go-logfmt/logfmt/README.md
generated
vendored
Normal file
33
vendor/github.com/go-logfmt/logfmt/README.md
generated
vendored
Normal file
|
@ -0,0 +1,33 @@
|
||||||
|
[![GoDoc](https://godoc.org/github.com/go-logfmt/logfmt?status.svg)](https://godoc.org/github.com/go-logfmt/logfmt)
|
||||||
|
[![Go Report Card](https://goreportcard.com/badge/go-logfmt/logfmt)](https://goreportcard.com/report/go-logfmt/logfmt)
|
||||||
|
[![TravisCI](https://travis-ci.org/go-logfmt/logfmt.svg?branch=master)](https://travis-ci.org/go-logfmt/logfmt)
|
||||||
|
[![Coverage Status](https://coveralls.io/repos/github/go-logfmt/logfmt/badge.svg?branch=master)](https://coveralls.io/github/go-logfmt/logfmt?branch=master)
|
||||||
|
|
||||||
|
# logfmt
|
||||||
|
|
||||||
|
Package logfmt implements utilities to marshal and unmarshal data in the [logfmt
|
||||||
|
format](https://brandur.org/logfmt). It provides an API similar to
|
||||||
|
[encoding/json](http://golang.org/pkg/encoding/json/) and
|
||||||
|
[encoding/xml](http://golang.org/pkg/encoding/xml/).
|
||||||
|
|
||||||
|
The logfmt format was first documented by Brandur Leach in [this
|
||||||
|
article](https://brandur.org/logfmt). The format has not been formally
|
||||||
|
standardized. The most authoritative public specification to date has been the
|
||||||
|
documentation of a Go Language [package](http://godoc.org/github.com/kr/logfmt)
|
||||||
|
written by Blake Mizerany and Keith Rarick.
|
||||||
|
|
||||||
|
## Goals
|
||||||
|
|
||||||
|
This project attempts to conform as closely as possible to the prior art, while
|
||||||
|
also removing ambiguity where necessary to provide well behaved encoder and
|
||||||
|
decoder implementations.
|
||||||
|
|
||||||
|
## Non-goals
|
||||||
|
|
||||||
|
This project does not attempt to formally standardize the logfmt format. In the
|
||||||
|
event that logfmt is standardized this project would take conforming to the
|
||||||
|
standard as a goal.
|
||||||
|
|
||||||
|
## Versioning
|
||||||
|
|
||||||
|
Package logfmt publishes releases via [semver](http://semver.org/) compatible Git tags prefixed with a single 'v'.
|
237
vendor/github.com/go-logfmt/logfmt/decode.go
generated
vendored
Normal file
237
vendor/github.com/go-logfmt/logfmt/decode.go
generated
vendored
Normal file
|
@ -0,0 +1,237 @@
|
||||||
|
package logfmt
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"unicode/utf8"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A Decoder reads and decodes logfmt records from an input stream.
|
||||||
|
type Decoder struct {
|
||||||
|
pos int
|
||||||
|
key []byte
|
||||||
|
value []byte
|
||||||
|
lineNum int
|
||||||
|
s *bufio.Scanner
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewDecoder returns a new decoder that reads from r.
|
||||||
|
//
|
||||||
|
// The decoder introduces its own buffering and may read data from r beyond
|
||||||
|
// the logfmt records requested.
|
||||||
|
func NewDecoder(r io.Reader) *Decoder {
|
||||||
|
dec := &Decoder{
|
||||||
|
s: bufio.NewScanner(r),
|
||||||
|
}
|
||||||
|
return dec
|
||||||
|
}
|
||||||
|
|
||||||
|
// ScanRecord advances the Decoder to the next record, which can then be
|
||||||
|
// parsed with the ScanKeyval method. It returns false when decoding stops,
|
||||||
|
// either by reaching the end of the input or an error. After ScanRecord
|
||||||
|
// returns false, the Err method will return any error that occurred during
|
||||||
|
// decoding, except that if it was io.EOF, Err will return nil.
|
||||||
|
func (dec *Decoder) ScanRecord() bool {
|
||||||
|
if dec.err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if !dec.s.Scan() {
|
||||||
|
dec.err = dec.s.Err()
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
dec.lineNum++
|
||||||
|
dec.pos = 0
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// ScanKeyval advances the Decoder to the next key/value pair of the current
|
||||||
|
// record, which can then be retrieved with the Key and Value methods. It
|
||||||
|
// returns false when decoding stops, either by reaching the end of the
|
||||||
|
// current record or an error.
|
||||||
|
func (dec *Decoder) ScanKeyval() bool {
|
||||||
|
dec.key, dec.value = nil, nil
|
||||||
|
if dec.err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
line := dec.s.Bytes()
|
||||||
|
|
||||||
|
// garbage
|
||||||
|
for p, c := range line[dec.pos:] {
|
||||||
|
if c > ' ' {
|
||||||
|
dec.pos += p
|
||||||
|
goto key
|
||||||
|
}
|
||||||
|
}
|
||||||
|
dec.pos = len(line)
|
||||||
|
return false
|
||||||
|
|
||||||
|
key:
|
||||||
|
const invalidKeyError = "invalid key"
|
||||||
|
|
||||||
|
start, multibyte := dec.pos, false
|
||||||
|
for p, c := range line[dec.pos:] {
|
||||||
|
switch {
|
||||||
|
case c == '=':
|
||||||
|
dec.pos += p
|
||||||
|
if dec.pos > start {
|
||||||
|
dec.key = line[start:dec.pos]
|
||||||
|
if multibyte && bytes.ContainsRune(dec.key, utf8.RuneError) {
|
||||||
|
dec.syntaxError(invalidKeyError)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if dec.key == nil {
|
||||||
|
dec.unexpectedByte(c)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
goto equal
|
||||||
|
case c == '"':
|
||||||
|
dec.pos += p
|
||||||
|
dec.unexpectedByte(c)
|
||||||
|
return false
|
||||||
|
case c <= ' ':
|
||||||
|
dec.pos += p
|
||||||
|
if dec.pos > start {
|
||||||
|
dec.key = line[start:dec.pos]
|
||||||
|
if multibyte && bytes.ContainsRune(dec.key, utf8.RuneError) {
|
||||||
|
dec.syntaxError(invalidKeyError)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
case c >= utf8.RuneSelf:
|
||||||
|
multibyte = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
dec.pos = len(line)
|
||||||
|
if dec.pos > start {
|
||||||
|
dec.key = line[start:dec.pos]
|
||||||
|
if multibyte && bytes.ContainsRune(dec.key, utf8.RuneError) {
|
||||||
|
dec.syntaxError(invalidKeyError)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
|
||||||
|
equal:
|
||||||
|
dec.pos++
|
||||||
|
if dec.pos >= len(line) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
switch c := line[dec.pos]; {
|
||||||
|
case c <= ' ':
|
||||||
|
return true
|
||||||
|
case c == '"':
|
||||||
|
goto qvalue
|
||||||
|
}
|
||||||
|
|
||||||
|
// value
|
||||||
|
start = dec.pos
|
||||||
|
for p, c := range line[dec.pos:] {
|
||||||
|
switch {
|
||||||
|
case c == '=' || c == '"':
|
||||||
|
dec.pos += p
|
||||||
|
dec.unexpectedByte(c)
|
||||||
|
return false
|
||||||
|
case c <= ' ':
|
||||||
|
dec.pos += p
|
||||||
|
if dec.pos > start {
|
||||||
|
dec.value = line[start:dec.pos]
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
dec.pos = len(line)
|
||||||
|
if dec.pos > start {
|
||||||
|
dec.value = line[start:dec.pos]
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
|
||||||
|
qvalue:
|
||||||
|
const (
|
||||||
|
untermQuote = "unterminated quoted value"
|
||||||
|
invalidQuote = "invalid quoted value"
|
||||||
|
)
|
||||||
|
|
||||||
|
hasEsc, esc := false, false
|
||||||
|
start = dec.pos
|
||||||
|
for p, c := range line[dec.pos+1:] {
|
||||||
|
switch {
|
||||||
|
case esc:
|
||||||
|
esc = false
|
||||||
|
case c == '\\':
|
||||||
|
hasEsc, esc = true, true
|
||||||
|
case c == '"':
|
||||||
|
dec.pos += p + 2
|
||||||
|
if hasEsc {
|
||||||
|
v, ok := unquoteBytes(line[start:dec.pos])
|
||||||
|
if !ok {
|
||||||
|
dec.syntaxError(invalidQuote)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
dec.value = v
|
||||||
|
} else {
|
||||||
|
start++
|
||||||
|
end := dec.pos - 1
|
||||||
|
if end > start {
|
||||||
|
dec.value = line[start:end]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
dec.pos = len(line)
|
||||||
|
dec.syntaxError(untermQuote)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Key returns the most recent key found by a call to ScanKeyval. The returned
|
||||||
|
// slice may point to internal buffers and is only valid until the next call
|
||||||
|
// to ScanRecord. It does no allocation.
|
||||||
|
func (dec *Decoder) Key() []byte {
|
||||||
|
return dec.key
|
||||||
|
}
|
||||||
|
|
||||||
|
// Value returns the most recent value found by a call to ScanKeyval. The
|
||||||
|
// returned slice may point to internal buffers and is only valid until the
|
||||||
|
// next call to ScanRecord. It does no allocation when the value has no
|
||||||
|
// escape sequences.
|
||||||
|
func (dec *Decoder) Value() []byte {
|
||||||
|
return dec.value
|
||||||
|
}
|
||||||
|
|
||||||
|
// Err returns the first non-EOF error that was encountered by the Scanner.
|
||||||
|
func (dec *Decoder) Err() error {
|
||||||
|
return dec.err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dec *Decoder) syntaxError(msg string) {
|
||||||
|
dec.err = &SyntaxError{
|
||||||
|
Msg: msg,
|
||||||
|
Line: dec.lineNum,
|
||||||
|
Pos: dec.pos + 1,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dec *Decoder) unexpectedByte(c byte) {
|
||||||
|
dec.err = &SyntaxError{
|
||||||
|
Msg: fmt.Sprintf("unexpected %q", c),
|
||||||
|
Line: dec.lineNum,
|
||||||
|
Pos: dec.pos + 1,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// A SyntaxError represents a syntax error in the logfmt input stream.
|
||||||
|
type SyntaxError struct {
|
||||||
|
Msg string
|
||||||
|
Line int
|
||||||
|
Pos int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *SyntaxError) Error() string {
|
||||||
|
return fmt.Sprintf("logfmt syntax error at pos %d on line %d: %s", e.Pos, e.Line, e.Msg)
|
||||||
|
}
|
6
vendor/github.com/go-logfmt/logfmt/doc.go
generated
vendored
Normal file
6
vendor/github.com/go-logfmt/logfmt/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,6 @@
|
||||||
|
// Package logfmt implements utilities to marshal and unmarshal data in the
|
||||||
|
// logfmt format. The logfmt format records key/value pairs in a way that
|
||||||
|
// balances readability for humans and simplicity of computer parsing. It is
|
||||||
|
// most commonly used as a more human friendly alternative to JSON for
|
||||||
|
// structured logging.
|
||||||
|
package logfmt
|
322
vendor/github.com/go-logfmt/logfmt/encode.go
generated
vendored
Normal file
322
vendor/github.com/go-logfmt/logfmt/encode.go
generated
vendored
Normal file
|
@ -0,0 +1,322 @@
|
||||||
|
package logfmt
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"reflect"
|
||||||
|
"strings"
|
||||||
|
"unicode/utf8"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MarshalKeyvals returns the logfmt encoding of keyvals, a variadic sequence
|
||||||
|
// of alternating keys and values.
|
||||||
|
func MarshalKeyvals(keyvals ...interface{}) ([]byte, error) {
|
||||||
|
buf := &bytes.Buffer{}
|
||||||
|
if err := NewEncoder(buf).EncodeKeyvals(keyvals...); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return buf.Bytes(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// An Encoder writes logfmt data to an output stream.
|
||||||
|
type Encoder struct {
|
||||||
|
w io.Writer
|
||||||
|
scratch bytes.Buffer
|
||||||
|
needSep bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewEncoder returns a new encoder that writes to w.
|
||||||
|
func NewEncoder(w io.Writer) *Encoder {
|
||||||
|
return &Encoder{
|
||||||
|
w: w,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
space = []byte(" ")
|
||||||
|
equals = []byte("=")
|
||||||
|
newline = []byte("\n")
|
||||||
|
null = []byte("null")
|
||||||
|
)
|
||||||
|
|
||||||
|
// EncodeKeyval writes the logfmt encoding of key and value to the stream. A
|
||||||
|
// single space is written before the second and subsequent keys in a record.
|
||||||
|
// Nothing is written if a non-nil error is returned.
|
||||||
|
func (enc *Encoder) EncodeKeyval(key, value interface{}) error {
|
||||||
|
enc.scratch.Reset()
|
||||||
|
if enc.needSep {
|
||||||
|
if _, err := enc.scratch.Write(space); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := writeKey(&enc.scratch, key); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, err := enc.scratch.Write(equals); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := writeValue(&enc.scratch, value); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_, err := enc.w.Write(enc.scratch.Bytes())
|
||||||
|
enc.needSep = true
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// EncodeKeyvals writes the logfmt encoding of keyvals to the stream. Keyvals
|
||||||
|
// is a variadic sequence of alternating keys and values. Keys of unsupported
|
||||||
|
// type are skipped along with their corresponding value. Values of
|
||||||
|
// unsupported type or that cause a MarshalerError are replaced by their error
|
||||||
|
// but do not cause EncodeKeyvals to return an error. If a non-nil error is
|
||||||
|
// returned some key/value pairs may not have be written.
|
||||||
|
func (enc *Encoder) EncodeKeyvals(keyvals ...interface{}) error {
|
||||||
|
if len(keyvals) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if len(keyvals)%2 == 1 {
|
||||||
|
keyvals = append(keyvals, nil)
|
||||||
|
}
|
||||||
|
for i := 0; i < len(keyvals); i += 2 {
|
||||||
|
k, v := keyvals[i], keyvals[i+1]
|
||||||
|
err := enc.EncodeKeyval(k, v)
|
||||||
|
if err == ErrUnsupportedKeyType {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if _, ok := err.(*MarshalerError); ok || err == ErrUnsupportedValueType {
|
||||||
|
v = err
|
||||||
|
err = enc.EncodeKeyval(k, v)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalerError represents an error encountered while marshaling a value.
|
||||||
|
type MarshalerError struct {
|
||||||
|
Type reflect.Type
|
||||||
|
Err error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *MarshalerError) Error() string {
|
||||||
|
return "error marshaling value of type " + e.Type.String() + ": " + e.Err.Error()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrNilKey is returned by Marshal functions and Encoder methods if a key is
|
||||||
|
// a nil interface or pointer value.
|
||||||
|
var ErrNilKey = errors.New("nil key")
|
||||||
|
|
||||||
|
// ErrInvalidKey is returned by Marshal functions and Encoder methods if, after
|
||||||
|
// dropping invalid runes, a key is empty.
|
||||||
|
var ErrInvalidKey = errors.New("invalid key")
|
||||||
|
|
||||||
|
// ErrUnsupportedKeyType is returned by Encoder methods if a key has an
|
||||||
|
// unsupported type.
|
||||||
|
var ErrUnsupportedKeyType = errors.New("unsupported key type")
|
||||||
|
|
||||||
|
// ErrUnsupportedValueType is returned by Encoder methods if a value has an
|
||||||
|
// unsupported type.
|
||||||
|
var ErrUnsupportedValueType = errors.New("unsupported value type")
|
||||||
|
|
||||||
|
func writeKey(w io.Writer, key interface{}) error {
|
||||||
|
if key == nil {
|
||||||
|
return ErrNilKey
|
||||||
|
}
|
||||||
|
|
||||||
|
switch k := key.(type) {
|
||||||
|
case string:
|
||||||
|
return writeStringKey(w, k)
|
||||||
|
case []byte:
|
||||||
|
if k == nil {
|
||||||
|
return ErrNilKey
|
||||||
|
}
|
||||||
|
return writeBytesKey(w, k)
|
||||||
|
case encoding.TextMarshaler:
|
||||||
|
kb, err := safeMarshal(k)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if kb == nil {
|
||||||
|
return ErrNilKey
|
||||||
|
}
|
||||||
|
return writeBytesKey(w, kb)
|
||||||
|
case fmt.Stringer:
|
||||||
|
ks, ok := safeString(k)
|
||||||
|
if !ok {
|
||||||
|
return ErrNilKey
|
||||||
|
}
|
||||||
|
return writeStringKey(w, ks)
|
||||||
|
default:
|
||||||
|
rkey := reflect.ValueOf(key)
|
||||||
|
switch rkey.Kind() {
|
||||||
|
case reflect.Array, reflect.Chan, reflect.Func, reflect.Map, reflect.Slice, reflect.Struct:
|
||||||
|
return ErrUnsupportedKeyType
|
||||||
|
case reflect.Ptr:
|
||||||
|
if rkey.IsNil() {
|
||||||
|
return ErrNilKey
|
||||||
|
}
|
||||||
|
return writeKey(w, rkey.Elem().Interface())
|
||||||
|
}
|
||||||
|
return writeStringKey(w, fmt.Sprint(k))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// keyRuneFilter returns r for all valid key runes, and -1 for all invalid key
|
||||||
|
// runes. When used as the mapping function for strings.Map and bytes.Map
|
||||||
|
// functions it causes them to remove invalid key runes from strings or byte
|
||||||
|
// slices respectively.
|
||||||
|
func keyRuneFilter(r rune) rune {
|
||||||
|
if r <= ' ' || r == '=' || r == '"' || r == utf8.RuneError {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
func writeStringKey(w io.Writer, key string) error {
|
||||||
|
k := strings.Map(keyRuneFilter, key)
|
||||||
|
if k == "" {
|
||||||
|
return ErrInvalidKey
|
||||||
|
}
|
||||||
|
_, err := io.WriteString(w, k)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func writeBytesKey(w io.Writer, key []byte) error {
|
||||||
|
k := bytes.Map(keyRuneFilter, key)
|
||||||
|
if len(k) == 0 {
|
||||||
|
return ErrInvalidKey
|
||||||
|
}
|
||||||
|
_, err := w.Write(k)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func writeValue(w io.Writer, value interface{}) error {
|
||||||
|
switch v := value.(type) {
|
||||||
|
case nil:
|
||||||
|
return writeBytesValue(w, null)
|
||||||
|
case string:
|
||||||
|
return writeStringValue(w, v, true)
|
||||||
|
case []byte:
|
||||||
|
return writeBytesValue(w, v)
|
||||||
|
case encoding.TextMarshaler:
|
||||||
|
vb, err := safeMarshal(v)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if vb == nil {
|
||||||
|
vb = null
|
||||||
|
}
|
||||||
|
return writeBytesValue(w, vb)
|
||||||
|
case error:
|
||||||
|
se, ok := safeError(v)
|
||||||
|
return writeStringValue(w, se, ok)
|
||||||
|
case fmt.Stringer:
|
||||||
|
ss, ok := safeString(v)
|
||||||
|
return writeStringValue(w, ss, ok)
|
||||||
|
default:
|
||||||
|
rvalue := reflect.ValueOf(value)
|
||||||
|
switch rvalue.Kind() {
|
||||||
|
case reflect.Array, reflect.Chan, reflect.Func, reflect.Map, reflect.Slice, reflect.Struct:
|
||||||
|
return ErrUnsupportedValueType
|
||||||
|
case reflect.Ptr:
|
||||||
|
if rvalue.IsNil() {
|
||||||
|
return writeBytesValue(w, null)
|
||||||
|
}
|
||||||
|
return writeValue(w, rvalue.Elem().Interface())
|
||||||
|
}
|
||||||
|
return writeStringValue(w, fmt.Sprint(v), true)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func needsQuotedValueRune(r rune) bool {
|
||||||
|
return r <= ' ' || r == '=' || r == '"' || r == utf8.RuneError
|
||||||
|
}
|
||||||
|
|
||||||
|
func writeStringValue(w io.Writer, value string, ok bool) error {
|
||||||
|
var err error
|
||||||
|
if ok && value == "null" {
|
||||||
|
_, err = io.WriteString(w, `"null"`)
|
||||||
|
} else if strings.IndexFunc(value, needsQuotedValueRune) != -1 {
|
||||||
|
_, err = writeQuotedString(w, value)
|
||||||
|
} else {
|
||||||
|
_, err = io.WriteString(w, value)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func writeBytesValue(w io.Writer, value []byte) error {
|
||||||
|
var err error
|
||||||
|
if bytes.IndexFunc(value, needsQuotedValueRune) != -1 {
|
||||||
|
_, err = writeQuotedBytes(w, value)
|
||||||
|
} else {
|
||||||
|
_, err = w.Write(value)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// EndRecord writes a newline character to the stream and resets the encoder
|
||||||
|
// to the beginning of a new record.
|
||||||
|
func (enc *Encoder) EndRecord() error {
|
||||||
|
_, err := enc.w.Write(newline)
|
||||||
|
if err == nil {
|
||||||
|
enc.needSep = false
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reset resets the encoder to the beginning of a new record.
|
||||||
|
func (enc *Encoder) Reset() {
|
||||||
|
enc.needSep = false
|
||||||
|
}
|
||||||
|
|
||||||
|
func safeError(err error) (s string, ok bool) {
|
||||||
|
defer func() {
|
||||||
|
if panicVal := recover(); panicVal != nil {
|
||||||
|
if v := reflect.ValueOf(err); v.Kind() == reflect.Ptr && v.IsNil() {
|
||||||
|
s, ok = "null", false
|
||||||
|
} else {
|
||||||
|
s, ok = fmt.Sprintf("PANIC:%v", panicVal), false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
s, ok = err.Error(), true
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func safeString(str fmt.Stringer) (s string, ok bool) {
|
||||||
|
defer func() {
|
||||||
|
if panicVal := recover(); panicVal != nil {
|
||||||
|
if v := reflect.ValueOf(str); v.Kind() == reflect.Ptr && v.IsNil() {
|
||||||
|
s, ok = "null", false
|
||||||
|
} else {
|
||||||
|
s, ok = fmt.Sprintf("PANIC:%v", panicVal), true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
s, ok = str.String(), true
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func safeMarshal(tm encoding.TextMarshaler) (b []byte, err error) {
|
||||||
|
defer func() {
|
||||||
|
if panicVal := recover(); panicVal != nil {
|
||||||
|
if v := reflect.ValueOf(tm); v.Kind() == reflect.Ptr && v.IsNil() {
|
||||||
|
b, err = nil, nil
|
||||||
|
} else {
|
||||||
|
b, err = nil, fmt.Errorf("panic when marshalling: %s", panicVal)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
b, err = tm.MarshalText()
|
||||||
|
if err != nil {
|
||||||
|
return nil, &MarshalerError{
|
||||||
|
Type: reflect.TypeOf(tm),
|
||||||
|
Err: err,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
3
vendor/github.com/go-logfmt/logfmt/go.mod
generated
vendored
Normal file
3
vendor/github.com/go-logfmt/logfmt/go.mod
generated
vendored
Normal file
|
@ -0,0 +1,3 @@
|
||||||
|
module github.com/go-logfmt/logfmt
|
||||||
|
|
||||||
|
go 1.13
|
277
vendor/github.com/go-logfmt/logfmt/jsonstring.go
generated
vendored
Normal file
277
vendor/github.com/go-logfmt/logfmt/jsonstring.go
generated
vendored
Normal file
|
@ -0,0 +1,277 @@
|
||||||
|
package logfmt
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"io"
|
||||||
|
"strconv"
|
||||||
|
"sync"
|
||||||
|
"unicode"
|
||||||
|
"unicode/utf16"
|
||||||
|
"unicode/utf8"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Taken from Go's encoding/json and modified for use here.
|
||||||
|
|
||||||
|
// Copyright 2010 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
var hex = "0123456789abcdef"
|
||||||
|
|
||||||
|
var bufferPool = sync.Pool{
|
||||||
|
New: func() interface{} {
|
||||||
|
return &bytes.Buffer{}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func getBuffer() *bytes.Buffer {
|
||||||
|
return bufferPool.Get().(*bytes.Buffer)
|
||||||
|
}
|
||||||
|
|
||||||
|
func poolBuffer(buf *bytes.Buffer) {
|
||||||
|
buf.Reset()
|
||||||
|
bufferPool.Put(buf)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NOTE: keep in sync with writeQuotedBytes below.
|
||||||
|
func writeQuotedString(w io.Writer, s string) (int, error) {
|
||||||
|
buf := getBuffer()
|
||||||
|
buf.WriteByte('"')
|
||||||
|
start := 0
|
||||||
|
for i := 0; i < len(s); {
|
||||||
|
if b := s[i]; b < utf8.RuneSelf {
|
||||||
|
if 0x20 <= b && b != '\\' && b != '"' {
|
||||||
|
i++
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if start < i {
|
||||||
|
buf.WriteString(s[start:i])
|
||||||
|
}
|
||||||
|
switch b {
|
||||||
|
case '\\', '"':
|
||||||
|
buf.WriteByte('\\')
|
||||||
|
buf.WriteByte(b)
|
||||||
|
case '\n':
|
||||||
|
buf.WriteByte('\\')
|
||||||
|
buf.WriteByte('n')
|
||||||
|
case '\r':
|
||||||
|
buf.WriteByte('\\')
|
||||||
|
buf.WriteByte('r')
|
||||||
|
case '\t':
|
||||||
|
buf.WriteByte('\\')
|
||||||
|
buf.WriteByte('t')
|
||||||
|
default:
|
||||||
|
// This encodes bytes < 0x20 except for \n, \r, and \t.
|
||||||
|
buf.WriteString(`\u00`)
|
||||||
|
buf.WriteByte(hex[b>>4])
|
||||||
|
buf.WriteByte(hex[b&0xF])
|
||||||
|
}
|
||||||
|
i++
|
||||||
|
start = i
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
c, size := utf8.DecodeRuneInString(s[i:])
|
||||||
|
if c == utf8.RuneError {
|
||||||
|
if start < i {
|
||||||
|
buf.WriteString(s[start:i])
|
||||||
|
}
|
||||||
|
buf.WriteString(`\ufffd`)
|
||||||
|
i += size
|
||||||
|
start = i
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
i += size
|
||||||
|
}
|
||||||
|
if start < len(s) {
|
||||||
|
buf.WriteString(s[start:])
|
||||||
|
}
|
||||||
|
buf.WriteByte('"')
|
||||||
|
n, err := w.Write(buf.Bytes())
|
||||||
|
poolBuffer(buf)
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// NOTE: keep in sync with writeQuoteString above.
|
||||||
|
func writeQuotedBytes(w io.Writer, s []byte) (int, error) {
|
||||||
|
buf := getBuffer()
|
||||||
|
buf.WriteByte('"')
|
||||||
|
start := 0
|
||||||
|
for i := 0; i < len(s); {
|
||||||
|
if b := s[i]; b < utf8.RuneSelf {
|
||||||
|
if 0x20 <= b && b != '\\' && b != '"' {
|
||||||
|
i++
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if start < i {
|
||||||
|
buf.Write(s[start:i])
|
||||||
|
}
|
||||||
|
switch b {
|
||||||
|
case '\\', '"':
|
||||||
|
buf.WriteByte('\\')
|
||||||
|
buf.WriteByte(b)
|
||||||
|
case '\n':
|
||||||
|
buf.WriteByte('\\')
|
||||||
|
buf.WriteByte('n')
|
||||||
|
case '\r':
|
||||||
|
buf.WriteByte('\\')
|
||||||
|
buf.WriteByte('r')
|
||||||
|
case '\t':
|
||||||
|
buf.WriteByte('\\')
|
||||||
|
buf.WriteByte('t')
|
||||||
|
default:
|
||||||
|
// This encodes bytes < 0x20 except for \n, \r, and \t.
|
||||||
|
buf.WriteString(`\u00`)
|
||||||
|
buf.WriteByte(hex[b>>4])
|
||||||
|
buf.WriteByte(hex[b&0xF])
|
||||||
|
}
|
||||||
|
i++
|
||||||
|
start = i
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
c, size := utf8.DecodeRune(s[i:])
|
||||||
|
if c == utf8.RuneError {
|
||||||
|
if start < i {
|
||||||
|
buf.Write(s[start:i])
|
||||||
|
}
|
||||||
|
buf.WriteString(`\ufffd`)
|
||||||
|
i += size
|
||||||
|
start = i
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
i += size
|
||||||
|
}
|
||||||
|
if start < len(s) {
|
||||||
|
buf.Write(s[start:])
|
||||||
|
}
|
||||||
|
buf.WriteByte('"')
|
||||||
|
n, err := w.Write(buf.Bytes())
|
||||||
|
poolBuffer(buf)
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// getu4 decodes \uXXXX from the beginning of s, returning the hex value,
|
||||||
|
// or it returns -1.
|
||||||
|
func getu4(s []byte) rune {
|
||||||
|
if len(s) < 6 || s[0] != '\\' || s[1] != 'u' {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
r, err := strconv.ParseUint(string(s[2:6]), 16, 64)
|
||||||
|
if err != nil {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
return rune(r)
|
||||||
|
}
|
||||||
|
|
||||||
|
func unquoteBytes(s []byte) (t []byte, ok bool) {
|
||||||
|
if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
s = s[1 : len(s)-1]
|
||||||
|
|
||||||
|
// Check for unusual characters. If there are none,
|
||||||
|
// then no unquoting is needed, so return a slice of the
|
||||||
|
// original bytes.
|
||||||
|
r := 0
|
||||||
|
for r < len(s) {
|
||||||
|
c := s[r]
|
||||||
|
if c == '\\' || c == '"' || c < ' ' {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if c < utf8.RuneSelf {
|
||||||
|
r++
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
rr, size := utf8.DecodeRune(s[r:])
|
||||||
|
if rr == utf8.RuneError {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
r += size
|
||||||
|
}
|
||||||
|
if r == len(s) {
|
||||||
|
return s, true
|
||||||
|
}
|
||||||
|
|
||||||
|
b := make([]byte, len(s)+2*utf8.UTFMax)
|
||||||
|
w := copy(b, s[0:r])
|
||||||
|
for r < len(s) {
|
||||||
|
// Out of room? Can only happen if s is full of
|
||||||
|
// malformed UTF-8 and we're replacing each
|
||||||
|
// byte with RuneError.
|
||||||
|
if w >= len(b)-2*utf8.UTFMax {
|
||||||
|
nb := make([]byte, (len(b)+utf8.UTFMax)*2)
|
||||||
|
copy(nb, b[0:w])
|
||||||
|
b = nb
|
||||||
|
}
|
||||||
|
switch c := s[r]; {
|
||||||
|
case c == '\\':
|
||||||
|
r++
|
||||||
|
if r >= len(s) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
switch s[r] {
|
||||||
|
default:
|
||||||
|
return
|
||||||
|
case '"', '\\', '/', '\'':
|
||||||
|
b[w] = s[r]
|
||||||
|
r++
|
||||||
|
w++
|
||||||
|
case 'b':
|
||||||
|
b[w] = '\b'
|
||||||
|
r++
|
||||||
|
w++
|
||||||
|
case 'f':
|
||||||
|
b[w] = '\f'
|
||||||
|
r++
|
||||||
|
w++
|
||||||
|
case 'n':
|
||||||
|
b[w] = '\n'
|
||||||
|
r++
|
||||||
|
w++
|
||||||
|
case 'r':
|
||||||
|
b[w] = '\r'
|
||||||
|
r++
|
||||||
|
w++
|
||||||
|
case 't':
|
||||||
|
b[w] = '\t'
|
||||||
|
r++
|
||||||
|
w++
|
||||||
|
case 'u':
|
||||||
|
r--
|
||||||
|
rr := getu4(s[r:])
|
||||||
|
if rr < 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
r += 6
|
||||||
|
if utf16.IsSurrogate(rr) {
|
||||||
|
rr1 := getu4(s[r:])
|
||||||
|
if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar {
|
||||||
|
// A valid pair; consume.
|
||||||
|
r += 6
|
||||||
|
w += utf8.EncodeRune(b[w:], dec)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
// Invalid surrogate; fall back to replacement rune.
|
||||||
|
rr = unicode.ReplacementChar
|
||||||
|
}
|
||||||
|
w += utf8.EncodeRune(b[w:], rr)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Quote, control characters are invalid.
|
||||||
|
case c == '"', c < ' ':
|
||||||
|
return
|
||||||
|
|
||||||
|
// ASCII
|
||||||
|
case c < utf8.RuneSelf:
|
||||||
|
b[w] = c
|
||||||
|
r++
|
||||||
|
w++
|
||||||
|
|
||||||
|
// Coerce to well-formed UTF-8.
|
||||||
|
default:
|
||||||
|
rr, size := utf8.DecodeRune(s[r:])
|
||||||
|
r += size
|
||||||
|
w += utf8.EncodeRune(b[w:], rr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return b[0:w], true
|
||||||
|
}
|
20
vendor/github.com/influxdata/influxdb/LICENSE
generated
vendored
Normal file
20
vendor/github.com/influxdata/influxdb/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,20 @@
|
||||||
|
The MIT License (MIT)
|
||||||
|
|
||||||
|
Copyright (c) 2013-2018 InfluxData Inc.
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||||
|
this software and associated documentation files (the "Software"), to deal in
|
||||||
|
the Software without restriction, including without limitation the rights to
|
||||||
|
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||||
|
the Software, and to permit persons to whom the Software is furnished to do so,
|
||||||
|
subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||||
|
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||||
|
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||||
|
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||||
|
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
747
vendor/github.com/influxdata/influxdb/client/v2/client.go
generated
vendored
Normal file
747
vendor/github.com/influxdata/influxdb/client/v2/client.go
generated
vendored
Normal file
|
@ -0,0 +1,747 @@
|
||||||
|
// Package client (v2) is the current official Go client for InfluxDB.
|
||||||
|
package client // import "github.com/influxdata/influxdb/client/v2"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"crypto/tls"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"mime"
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"path"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/influxdata/influxdb/models"
|
||||||
|
)
|
||||||
|
|
||||||
|
// HTTPConfig is the config data needed to create an HTTP Client.
|
||||||
|
type HTTPConfig struct {
|
||||||
|
// Addr should be of the form "http://host:port"
|
||||||
|
// or "http://[ipv6-host%zone]:port".
|
||||||
|
Addr string
|
||||||
|
|
||||||
|
// Username is the influxdb username, optional.
|
||||||
|
Username string
|
||||||
|
|
||||||
|
// Password is the influxdb password, optional.
|
||||||
|
Password string
|
||||||
|
|
||||||
|
// UserAgent is the http User Agent, defaults to "InfluxDBClient".
|
||||||
|
UserAgent string
|
||||||
|
|
||||||
|
// Timeout for influxdb writes, defaults to no timeout.
|
||||||
|
Timeout time.Duration
|
||||||
|
|
||||||
|
// InsecureSkipVerify gets passed to the http client, if true, it will
|
||||||
|
// skip https certificate verification. Defaults to false.
|
||||||
|
InsecureSkipVerify bool
|
||||||
|
|
||||||
|
// TLSConfig allows the user to set their own TLS config for the HTTP
|
||||||
|
// Client. If set, this option overrides InsecureSkipVerify.
|
||||||
|
TLSConfig *tls.Config
|
||||||
|
|
||||||
|
// Proxy configures the Proxy function on the HTTP client.
|
||||||
|
Proxy func(req *http.Request) (*url.URL, error)
|
||||||
|
|
||||||
|
// DialContext specifies the dial function for creating unencrypted TCP connections.
|
||||||
|
// If DialContext is nil then the transport dials using package net.
|
||||||
|
DialContext func(ctx context.Context, network, addr string) (net.Conn, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// BatchPointsConfig is the config data needed to create an instance of the BatchPoints struct.
|
||||||
|
type BatchPointsConfig struct {
|
||||||
|
// Precision is the write precision of the points, defaults to "ns".
|
||||||
|
Precision string
|
||||||
|
|
||||||
|
// Database is the database to write points to.
|
||||||
|
Database string
|
||||||
|
|
||||||
|
// RetentionPolicy is the retention policy of the points.
|
||||||
|
RetentionPolicy string
|
||||||
|
|
||||||
|
// Write consistency is the number of servers required to confirm write.
|
||||||
|
WriteConsistency string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Client is a client interface for writing & querying the database.
|
||||||
|
type Client interface {
|
||||||
|
// Ping checks that status of cluster, and will always return 0 time and no
|
||||||
|
// error for UDP clients.
|
||||||
|
Ping(timeout time.Duration) (time.Duration, string, error)
|
||||||
|
|
||||||
|
// Write takes a BatchPoints object and writes all Points to InfluxDB.
|
||||||
|
Write(bp BatchPoints) error
|
||||||
|
|
||||||
|
// Query makes an InfluxDB Query on the database. This will fail if using
|
||||||
|
// the UDP client.
|
||||||
|
Query(q Query) (*Response, error)
|
||||||
|
|
||||||
|
// QueryCtx makes an InfluxDB Query on the database. This will fail if using
|
||||||
|
// the UDP client.
|
||||||
|
QueryCtx(ctx context.Context, q Query) (*Response, error)
|
||||||
|
|
||||||
|
// QueryAsChunk makes an InfluxDB Query on the database. This will fail if using
|
||||||
|
// the UDP client.
|
||||||
|
QueryAsChunk(q Query) (*ChunkedResponse, error)
|
||||||
|
|
||||||
|
// Close releases any resources a Client may be using.
|
||||||
|
Close() error
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewHTTPClient returns a new Client from the provided config.
|
||||||
|
// Client is safe for concurrent use by multiple goroutines.
|
||||||
|
func NewHTTPClient(conf HTTPConfig) (Client, error) {
|
||||||
|
if conf.UserAgent == "" {
|
||||||
|
conf.UserAgent = "InfluxDBClient"
|
||||||
|
}
|
||||||
|
|
||||||
|
u, err := url.Parse(conf.Addr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
} else if u.Scheme != "http" && u.Scheme != "https" {
|
||||||
|
m := fmt.Sprintf("Unsupported protocol scheme: %s, your address"+
|
||||||
|
" must start with http:// or https://", u.Scheme)
|
||||||
|
return nil, errors.New(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
tr := &http.Transport{
|
||||||
|
TLSClientConfig: &tls.Config{
|
||||||
|
InsecureSkipVerify: conf.InsecureSkipVerify,
|
||||||
|
},
|
||||||
|
Proxy: conf.Proxy,
|
||||||
|
DialContext: conf.DialContext,
|
||||||
|
}
|
||||||
|
if conf.TLSConfig != nil {
|
||||||
|
tr.TLSClientConfig = conf.TLSConfig
|
||||||
|
// Make sure to preserve the InsecureSkipVerify setting from the config.
|
||||||
|
tr.TLSClientConfig.InsecureSkipVerify = conf.InsecureSkipVerify
|
||||||
|
}
|
||||||
|
return &client{
|
||||||
|
url: *u,
|
||||||
|
username: conf.Username,
|
||||||
|
password: conf.Password,
|
||||||
|
useragent: conf.UserAgent,
|
||||||
|
httpClient: &http.Client{
|
||||||
|
Timeout: conf.Timeout,
|
||||||
|
Transport: tr,
|
||||||
|
},
|
||||||
|
transport: tr,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ping will check to see if the server is up with an optional timeout on waiting for leader.
|
||||||
|
// Ping returns how long the request took, the version of the server it connected to, and an error if one occurred.
|
||||||
|
func (c *client) Ping(timeout time.Duration) (time.Duration, string, error) {
|
||||||
|
now := time.Now()
|
||||||
|
|
||||||
|
u := c.url
|
||||||
|
u.Path = path.Join(u.Path, "ping")
|
||||||
|
|
||||||
|
req, err := http.NewRequest("GET", u.String(), nil)
|
||||||
|
if err != nil {
|
||||||
|
return 0, "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
req.Header.Set("User-Agent", c.useragent)
|
||||||
|
|
||||||
|
if c.username != "" {
|
||||||
|
req.SetBasicAuth(c.username, c.password)
|
||||||
|
}
|
||||||
|
|
||||||
|
if timeout > 0 {
|
||||||
|
params := req.URL.Query()
|
||||||
|
params.Set("wait_for_leader", fmt.Sprintf("%.0fs", timeout.Seconds()))
|
||||||
|
req.URL.RawQuery = params.Encode()
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := c.httpClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return 0, "", err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
body, err := ioutil.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return 0, "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusNoContent {
|
||||||
|
var err = errors.New(string(body))
|
||||||
|
return 0, "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
version := resp.Header.Get("X-Influxdb-Version")
|
||||||
|
return time.Since(now), version, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close releases the client's resources.
|
||||||
|
func (c *client) Close() error {
|
||||||
|
c.transport.CloseIdleConnections()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// client is safe for concurrent use as the fields are all read-only
|
||||||
|
// once the client is instantiated.
|
||||||
|
type client struct {
|
||||||
|
// N.B - if url.UserInfo is accessed in future modifications to the
|
||||||
|
// methods on client, you will need to synchronize access to url.
|
||||||
|
url url.URL
|
||||||
|
username string
|
||||||
|
password string
|
||||||
|
useragent string
|
||||||
|
httpClient *http.Client
|
||||||
|
transport *http.Transport
|
||||||
|
}
|
||||||
|
|
||||||
|
// BatchPoints is an interface into a batched grouping of points to write into
|
||||||
|
// InfluxDB together. BatchPoints is NOT thread-safe, you must create a separate
|
||||||
|
// batch for each goroutine.
|
||||||
|
type BatchPoints interface {
|
||||||
|
// AddPoint adds the given point to the Batch of points.
|
||||||
|
AddPoint(p *Point)
|
||||||
|
// AddPoints adds the given points to the Batch of points.
|
||||||
|
AddPoints(ps []*Point)
|
||||||
|
// Points lists the points in the Batch.
|
||||||
|
Points() []*Point
|
||||||
|
|
||||||
|
// Precision returns the currently set precision of this Batch.
|
||||||
|
Precision() string
|
||||||
|
// SetPrecision sets the precision of this batch.
|
||||||
|
SetPrecision(s string) error
|
||||||
|
|
||||||
|
// Database returns the currently set database of this Batch.
|
||||||
|
Database() string
|
||||||
|
// SetDatabase sets the database of this Batch.
|
||||||
|
SetDatabase(s string)
|
||||||
|
|
||||||
|
// WriteConsistency returns the currently set write consistency of this Batch.
|
||||||
|
WriteConsistency() string
|
||||||
|
// SetWriteConsistency sets the write consistency of this Batch.
|
||||||
|
SetWriteConsistency(s string)
|
||||||
|
|
||||||
|
// RetentionPolicy returns the currently set retention policy of this Batch.
|
||||||
|
RetentionPolicy() string
|
||||||
|
// SetRetentionPolicy sets the retention policy of this Batch.
|
||||||
|
SetRetentionPolicy(s string)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewBatchPoints returns a BatchPoints interface based on the given config.
|
||||||
|
func NewBatchPoints(conf BatchPointsConfig) (BatchPoints, error) {
|
||||||
|
if conf.Precision == "" {
|
||||||
|
conf.Precision = "ns"
|
||||||
|
}
|
||||||
|
if _, err := time.ParseDuration("1" + conf.Precision); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
bp := &batchpoints{
|
||||||
|
database: conf.Database,
|
||||||
|
precision: conf.Precision,
|
||||||
|
retentionPolicy: conf.RetentionPolicy,
|
||||||
|
writeConsistency: conf.WriteConsistency,
|
||||||
|
}
|
||||||
|
return bp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type batchpoints struct {
|
||||||
|
points []*Point
|
||||||
|
database string
|
||||||
|
precision string
|
||||||
|
retentionPolicy string
|
||||||
|
writeConsistency string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bp *batchpoints) AddPoint(p *Point) {
|
||||||
|
bp.points = append(bp.points, p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bp *batchpoints) AddPoints(ps []*Point) {
|
||||||
|
bp.points = append(bp.points, ps...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bp *batchpoints) Points() []*Point {
|
||||||
|
return bp.points
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bp *batchpoints) Precision() string {
|
||||||
|
return bp.precision
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bp *batchpoints) Database() string {
|
||||||
|
return bp.database
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bp *batchpoints) WriteConsistency() string {
|
||||||
|
return bp.writeConsistency
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bp *batchpoints) RetentionPolicy() string {
|
||||||
|
return bp.retentionPolicy
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bp *batchpoints) SetPrecision(p string) error {
|
||||||
|
if _, err := time.ParseDuration("1" + p); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
bp.precision = p
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bp *batchpoints) SetDatabase(db string) {
|
||||||
|
bp.database = db
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bp *batchpoints) SetWriteConsistency(wc string) {
|
||||||
|
bp.writeConsistency = wc
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bp *batchpoints) SetRetentionPolicy(rp string) {
|
||||||
|
bp.retentionPolicy = rp
|
||||||
|
}
|
||||||
|
|
||||||
|
// Point represents a single data point.
|
||||||
|
type Point struct {
|
||||||
|
pt models.Point
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewPoint returns a point with the given timestamp. If a timestamp is not
|
||||||
|
// given, then data is sent to the database without a timestamp, in which case
|
||||||
|
// the server will assign local time upon reception. NOTE: it is recommended to
|
||||||
|
// send data with a timestamp.
|
||||||
|
func NewPoint(
|
||||||
|
name string,
|
||||||
|
tags map[string]string,
|
||||||
|
fields map[string]interface{},
|
||||||
|
t ...time.Time,
|
||||||
|
) (*Point, error) {
|
||||||
|
var T time.Time
|
||||||
|
if len(t) > 0 {
|
||||||
|
T = t[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
pt, err := models.NewPoint(name, models.NewTags(tags), fields, T)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &Point{
|
||||||
|
pt: pt,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns a line-protocol string of the Point.
|
||||||
|
func (p *Point) String() string {
|
||||||
|
return p.pt.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// PrecisionString returns a line-protocol string of the Point,
|
||||||
|
// with the timestamp formatted for the given precision.
|
||||||
|
func (p *Point) PrecisionString(precision string) string {
|
||||||
|
return p.pt.PrecisionString(precision)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Name returns the measurement name of the point.
|
||||||
|
func (p *Point) Name() string {
|
||||||
|
return string(p.pt.Name())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tags returns the tags associated with the point.
|
||||||
|
func (p *Point) Tags() map[string]string {
|
||||||
|
return p.pt.Tags().Map()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Time return the timestamp for the point.
|
||||||
|
func (p *Point) Time() time.Time {
|
||||||
|
return p.pt.Time()
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnixNano returns timestamp of the point in nanoseconds since Unix epoch.
|
||||||
|
func (p *Point) UnixNano() int64 {
|
||||||
|
return p.pt.UnixNano()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fields returns the fields for the point.
|
||||||
|
func (p *Point) Fields() (map[string]interface{}, error) {
|
||||||
|
return p.pt.Fields()
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewPointFrom returns a point from the provided models.Point.
|
||||||
|
func NewPointFrom(pt models.Point) *Point {
|
||||||
|
return &Point{pt: pt}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *client) Write(bp BatchPoints) error {
|
||||||
|
var b bytes.Buffer
|
||||||
|
|
||||||
|
for _, p := range bp.Points() {
|
||||||
|
if p == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if _, err := b.WriteString(p.pt.PrecisionString(bp.Precision())); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := b.WriteByte('\n'); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
u := c.url
|
||||||
|
u.Path = path.Join(u.Path, "write")
|
||||||
|
|
||||||
|
req, err := http.NewRequest("POST", u.String(), &b)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
req.Header.Set("Content-Type", "")
|
||||||
|
req.Header.Set("User-Agent", c.useragent)
|
||||||
|
if c.username != "" {
|
||||||
|
req.SetBasicAuth(c.username, c.password)
|
||||||
|
}
|
||||||
|
|
||||||
|
params := req.URL.Query()
|
||||||
|
params.Set("db", bp.Database())
|
||||||
|
params.Set("rp", bp.RetentionPolicy())
|
||||||
|
params.Set("precision", bp.Precision())
|
||||||
|
params.Set("consistency", bp.WriteConsistency())
|
||||||
|
req.URL.RawQuery = params.Encode()
|
||||||
|
|
||||||
|
resp, err := c.httpClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
body, err := ioutil.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK {
|
||||||
|
var err = errors.New(string(body))
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Query defines a query to send to the server.
|
||||||
|
type Query struct {
|
||||||
|
Command string
|
||||||
|
Database string
|
||||||
|
RetentionPolicy string
|
||||||
|
Precision string
|
||||||
|
Chunked bool
|
||||||
|
ChunkSize int
|
||||||
|
Parameters map[string]interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Params is a type alias to the query parameters.
|
||||||
|
type Params map[string]interface{}
|
||||||
|
|
||||||
|
// NewQuery returns a query object.
|
||||||
|
// The database and precision arguments can be empty strings if they are not needed for the query.
|
||||||
|
func NewQuery(command, database, precision string) Query {
|
||||||
|
return Query{
|
||||||
|
Command: command,
|
||||||
|
Database: database,
|
||||||
|
Precision: precision,
|
||||||
|
Parameters: make(map[string]interface{}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewQueryWithRP returns a query object.
|
||||||
|
// The database, retention policy, and precision arguments can be empty strings if they are not needed
|
||||||
|
// for the query. Setting the retention policy only works on InfluxDB versions 1.6 or greater.
|
||||||
|
func NewQueryWithRP(command, database, retentionPolicy, precision string) Query {
|
||||||
|
return Query{
|
||||||
|
Command: command,
|
||||||
|
Database: database,
|
||||||
|
RetentionPolicy: retentionPolicy,
|
||||||
|
Precision: precision,
|
||||||
|
Parameters: make(map[string]interface{}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewQueryWithParameters returns a query object.
|
||||||
|
// The database and precision arguments can be empty strings if they are not needed for the query.
|
||||||
|
// parameters is a map of the parameter names used in the command to their values.
|
||||||
|
func NewQueryWithParameters(command, database, precision string, parameters map[string]interface{}) Query {
|
||||||
|
return Query{
|
||||||
|
Command: command,
|
||||||
|
Database: database,
|
||||||
|
Precision: precision,
|
||||||
|
Parameters: parameters,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Response represents a list of statement results.
|
||||||
|
type Response struct {
|
||||||
|
Results []Result
|
||||||
|
Err string `json:"error,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error returns the first error from any statement.
|
||||||
|
// It returns nil if no errors occurred on any statements.
|
||||||
|
func (r *Response) Error() error {
|
||||||
|
if r.Err != "" {
|
||||||
|
return errors.New(r.Err)
|
||||||
|
}
|
||||||
|
for _, result := range r.Results {
|
||||||
|
if result.Err != "" {
|
||||||
|
return errors.New(result.Err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Message represents a user message.
|
||||||
|
type Message struct {
|
||||||
|
Level string
|
||||||
|
Text string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Result represents a resultset returned from a single statement.
|
||||||
|
type Result struct {
|
||||||
|
Series []models.Row
|
||||||
|
Messages []*Message
|
||||||
|
Err string `json:"error,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Query sends a command to the server and returns the Response.
|
||||||
|
func (c *client) Query(q Query) (*Response, error) {
|
||||||
|
return c.QueryCtx(nil, q)
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryCtx sends a command to the server and returns the Response.
|
||||||
|
func (c *client) QueryCtx(ctx context.Context, q Query) (*Response, error) {
|
||||||
|
req, err := c.createDefaultRequest(ctx, q)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
params := req.URL.Query()
|
||||||
|
if q.Chunked {
|
||||||
|
params.Set("chunked", "true")
|
||||||
|
if q.ChunkSize > 0 {
|
||||||
|
params.Set("chunk_size", strconv.Itoa(q.ChunkSize))
|
||||||
|
}
|
||||||
|
req.URL.RawQuery = params.Encode()
|
||||||
|
}
|
||||||
|
resp, err := c.httpClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
if err := checkResponse(resp); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var response Response
|
||||||
|
if q.Chunked {
|
||||||
|
cr := NewChunkedResponse(resp.Body)
|
||||||
|
for {
|
||||||
|
r, err := cr.NextResponse()
|
||||||
|
if err != nil {
|
||||||
|
if err == io.EOF {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
// If we got an error while decoding the response, send that back.
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if r == nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
response.Results = append(response.Results, r.Results...)
|
||||||
|
if r.Err != "" {
|
||||||
|
response.Err = r.Err
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
dec := json.NewDecoder(resp.Body)
|
||||||
|
dec.UseNumber()
|
||||||
|
decErr := dec.Decode(&response)
|
||||||
|
|
||||||
|
// ignore this error if we got an invalid status code
|
||||||
|
if decErr != nil && decErr.Error() == "EOF" && resp.StatusCode != http.StatusOK {
|
||||||
|
decErr = nil
|
||||||
|
}
|
||||||
|
// If we got a valid decode error, send that back
|
||||||
|
if decErr != nil {
|
||||||
|
return nil, fmt.Errorf("unable to decode json: received status code %d err: %s", resp.StatusCode, decErr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we don't have an error in our json response, and didn't get statusOK
|
||||||
|
// then send back an error
|
||||||
|
if resp.StatusCode != http.StatusOK && response.Error() == nil {
|
||||||
|
return &response, fmt.Errorf("received status code %d from server", resp.StatusCode)
|
||||||
|
}
|
||||||
|
return &response, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryAsChunk sends a command to the server and returns the Response.
|
||||||
|
func (c *client) QueryAsChunk(q Query) (*ChunkedResponse, error) {
|
||||||
|
req, err := c.createDefaultRequest(nil, q)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
params := req.URL.Query()
|
||||||
|
params.Set("chunked", "true")
|
||||||
|
if q.ChunkSize > 0 {
|
||||||
|
params.Set("chunk_size", strconv.Itoa(q.ChunkSize))
|
||||||
|
}
|
||||||
|
req.URL.RawQuery = params.Encode()
|
||||||
|
resp, err := c.httpClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := checkResponse(resp); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return NewChunkedResponse(resp.Body), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkResponse(resp *http.Response) error {
|
||||||
|
// If we lack a X-Influxdb-Version header, then we didn't get a response from influxdb
|
||||||
|
// but instead some other service. If the error code is also a 500+ code, then some
|
||||||
|
// downstream loadbalancer/proxy/etc had an issue and we should report that.
|
||||||
|
if resp.Header.Get("X-Influxdb-Version") == "" && resp.StatusCode >= http.StatusInternalServerError {
|
||||||
|
body, err := ioutil.ReadAll(resp.Body)
|
||||||
|
if err != nil || len(body) == 0 {
|
||||||
|
return fmt.Errorf("received status code %d from downstream server", resp.StatusCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Errorf("received status code %d from downstream server, with response body: %q", resp.StatusCode, body)
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we get an unexpected content type, then it is also not from influx direct and therefore
|
||||||
|
// we want to know what we received and what status code was returned for debugging purposes.
|
||||||
|
if cType, _, _ := mime.ParseMediaType(resp.Header.Get("Content-Type")); cType != "application/json" {
|
||||||
|
// Read up to 1kb of the body to help identify downstream errors and limit the impact of things
|
||||||
|
// like downstream serving a large file
|
||||||
|
body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1024))
|
||||||
|
if err != nil || len(body) == 0 {
|
||||||
|
return fmt.Errorf("expected json response, got empty body, with status: %v", resp.StatusCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Errorf("expected json response, got %q, with status: %v and response body: %q", cType, resp.StatusCode, body)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *client) createDefaultRequest(ctx context.Context, q Query) (*http.Request, error) {
|
||||||
|
u := c.url
|
||||||
|
u.Path = path.Join(u.Path, "query")
|
||||||
|
|
||||||
|
jsonParameters, err := json.Marshal(q.Parameters)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
req, err := http.NewRequest("POST", u.String(), nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if ctx != nil {
|
||||||
|
req = req.WithContext(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
req.Header.Set("Content-Type", "")
|
||||||
|
req.Header.Set("User-Agent", c.useragent)
|
||||||
|
|
||||||
|
if c.username != "" {
|
||||||
|
req.SetBasicAuth(c.username, c.password)
|
||||||
|
}
|
||||||
|
|
||||||
|
params := req.URL.Query()
|
||||||
|
params.Set("q", q.Command)
|
||||||
|
params.Set("db", q.Database)
|
||||||
|
if q.RetentionPolicy != "" {
|
||||||
|
params.Set("rp", q.RetentionPolicy)
|
||||||
|
}
|
||||||
|
params.Set("params", string(jsonParameters))
|
||||||
|
|
||||||
|
if q.Precision != "" {
|
||||||
|
params.Set("epoch", q.Precision)
|
||||||
|
}
|
||||||
|
req.URL.RawQuery = params.Encode()
|
||||||
|
|
||||||
|
return req, nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// duplexReader reads responses and writes it to another writer while
|
||||||
|
// satisfying the reader interface.
|
||||||
|
type duplexReader struct {
|
||||||
|
r io.ReadCloser
|
||||||
|
w io.Writer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *duplexReader) Read(p []byte) (n int, err error) {
|
||||||
|
n, err = r.r.Read(p)
|
||||||
|
if err == nil {
|
||||||
|
r.w.Write(p[:n])
|
||||||
|
}
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close closes the response.
|
||||||
|
func (r *duplexReader) Close() error {
|
||||||
|
return r.r.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChunkedResponse represents a response from the server that
|
||||||
|
// uses chunking to stream the output.
|
||||||
|
type ChunkedResponse struct {
|
||||||
|
dec *json.Decoder
|
||||||
|
duplex *duplexReader
|
||||||
|
buf bytes.Buffer
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewChunkedResponse reads a stream and produces responses from the stream.
|
||||||
|
func NewChunkedResponse(r io.Reader) *ChunkedResponse {
|
||||||
|
rc, ok := r.(io.ReadCloser)
|
||||||
|
if !ok {
|
||||||
|
rc = ioutil.NopCloser(r)
|
||||||
|
}
|
||||||
|
resp := &ChunkedResponse{}
|
||||||
|
resp.duplex = &duplexReader{r: rc, w: &resp.buf}
|
||||||
|
resp.dec = json.NewDecoder(resp.duplex)
|
||||||
|
resp.dec.UseNumber()
|
||||||
|
return resp
|
||||||
|
}
|
||||||
|
|
||||||
|
// NextResponse reads the next line of the stream and returns a response.
|
||||||
|
func (r *ChunkedResponse) NextResponse() (*Response, error) {
|
||||||
|
var response Response
|
||||||
|
if err := r.dec.Decode(&response); err != nil {
|
||||||
|
if err == io.EOF {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// A decoding error happened. This probably means the server crashed
|
||||||
|
// and sent a last-ditch error message to us. Ensure we have read the
|
||||||
|
// entirety of the connection to get any remaining error text.
|
||||||
|
io.Copy(ioutil.Discard, r.duplex)
|
||||||
|
return nil, errors.New(strings.TrimSpace(r.buf.String()))
|
||||||
|
}
|
||||||
|
|
||||||
|
r.buf.Reset()
|
||||||
|
return &response, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close closes the response.
|
||||||
|
func (r *ChunkedResponse) Close() error {
|
||||||
|
return r.duplex.Close()
|
||||||
|
}
|
73
vendor/github.com/influxdata/influxdb/client/v2/params.go
generated
vendored
Normal file
73
vendor/github.com/influxdata/influxdb/client/v2/params.go
generated
vendored
Normal file
|
@ -0,0 +1,73 @@
|
||||||
|
package client
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type (
|
||||||
|
// Identifier is an identifier value.
|
||||||
|
Identifier string
|
||||||
|
|
||||||
|
// StringValue is a string literal.
|
||||||
|
StringValue string
|
||||||
|
|
||||||
|
// RegexValue is a regexp literal.
|
||||||
|
RegexValue string
|
||||||
|
|
||||||
|
// NumberValue is a number literal.
|
||||||
|
NumberValue float64
|
||||||
|
|
||||||
|
// IntegerValue is an integer literal.
|
||||||
|
IntegerValue int64
|
||||||
|
|
||||||
|
// BooleanValue is a boolean literal.
|
||||||
|
BooleanValue bool
|
||||||
|
|
||||||
|
// TimeValue is a time literal.
|
||||||
|
TimeValue time.Time
|
||||||
|
|
||||||
|
// DurationValue is a duration literal.
|
||||||
|
DurationValue time.Duration
|
||||||
|
)
|
||||||
|
|
||||||
|
func (v Identifier) MarshalJSON() ([]byte, error) {
|
||||||
|
m := map[string]string{"identifier": string(v)}
|
||||||
|
return json.Marshal(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v StringValue) MarshalJSON() ([]byte, error) {
|
||||||
|
m := map[string]string{"string": string(v)}
|
||||||
|
return json.Marshal(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v RegexValue) MarshalJSON() ([]byte, error) {
|
||||||
|
m := map[string]string{"regex": string(v)}
|
||||||
|
return json.Marshal(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v NumberValue) MarshalJSON() ([]byte, error) {
|
||||||
|
m := map[string]float64{"number": float64(v)}
|
||||||
|
return json.Marshal(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v IntegerValue) MarshalJSON() ([]byte, error) {
|
||||||
|
m := map[string]int64{"integer": int64(v)}
|
||||||
|
return json.Marshal(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v BooleanValue) MarshalJSON() ([]byte, error) {
|
||||||
|
m := map[string]bool{"boolean": bool(v)}
|
||||||
|
return json.Marshal(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v TimeValue) MarshalJSON() ([]byte, error) {
|
||||||
|
t := time.Time(v)
|
||||||
|
m := map[string]string{"string": t.Format(time.RFC3339Nano)}
|
||||||
|
return json.Marshal(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v DurationValue) MarshalJSON() ([]byte, error) {
|
||||||
|
m := map[string]int64{"duration": int64(v)}
|
||||||
|
return json.Marshal(m)
|
||||||
|
}
|
121
vendor/github.com/influxdata/influxdb/client/v2/udp.go
generated
vendored
Normal file
121
vendor/github.com/influxdata/influxdb/client/v2/udp.go
generated
vendored
Normal file
|
@ -0,0 +1,121 @@
|
||||||
|
package client
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// UDPPayloadSize is a reasonable default payload size for UDP packets that
|
||||||
|
// could be travelling over the internet.
|
||||||
|
UDPPayloadSize = 512
|
||||||
|
)
|
||||||
|
|
||||||
|
// UDPConfig is the config data needed to create a UDP Client.
|
||||||
|
type UDPConfig struct {
|
||||||
|
// Addr should be of the form "host:port"
|
||||||
|
// or "[ipv6-host%zone]:port".
|
||||||
|
Addr string
|
||||||
|
|
||||||
|
// PayloadSize is the maximum size of a UDP client message, optional
|
||||||
|
// Tune this based on your network. Defaults to UDPPayloadSize.
|
||||||
|
PayloadSize int
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewUDPClient returns a client interface for writing to an InfluxDB UDP
|
||||||
|
// service from the given config.
|
||||||
|
func NewUDPClient(conf UDPConfig) (Client, error) {
|
||||||
|
var udpAddr *net.UDPAddr
|
||||||
|
udpAddr, err := net.ResolveUDPAddr("udp", conf.Addr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
conn, err := net.DialUDP("udp", nil, udpAddr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
payloadSize := conf.PayloadSize
|
||||||
|
if payloadSize == 0 {
|
||||||
|
payloadSize = UDPPayloadSize
|
||||||
|
}
|
||||||
|
|
||||||
|
return &udpclient{
|
||||||
|
conn: conn,
|
||||||
|
payloadSize: payloadSize,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close releases the udpclient's resources.
|
||||||
|
func (uc *udpclient) Close() error {
|
||||||
|
return uc.conn.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
type udpclient struct {
|
||||||
|
conn io.WriteCloser
|
||||||
|
payloadSize int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (uc *udpclient) Write(bp BatchPoints) error {
|
||||||
|
var b = make([]byte, 0, uc.payloadSize) // initial buffer size, it will grow as needed
|
||||||
|
var d, _ = time.ParseDuration("1" + bp.Precision())
|
||||||
|
|
||||||
|
var delayedError error
|
||||||
|
|
||||||
|
var checkBuffer = func(n int) {
|
||||||
|
if len(b) > 0 && len(b)+n > uc.payloadSize {
|
||||||
|
if _, err := uc.conn.Write(b); err != nil {
|
||||||
|
delayedError = err
|
||||||
|
}
|
||||||
|
b = b[:0]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, p := range bp.Points() {
|
||||||
|
p.pt.Round(d)
|
||||||
|
pointSize := p.pt.StringSize() + 1 // include newline in size
|
||||||
|
//point := p.pt.RoundedString(d) + "\n"
|
||||||
|
|
||||||
|
checkBuffer(pointSize)
|
||||||
|
|
||||||
|
if p.Time().IsZero() || pointSize <= uc.payloadSize {
|
||||||
|
b = p.pt.AppendString(b)
|
||||||
|
b = append(b, '\n')
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
points := p.pt.Split(uc.payloadSize - 1) // account for newline character
|
||||||
|
for _, sp := range points {
|
||||||
|
checkBuffer(sp.StringSize() + 1)
|
||||||
|
b = sp.AppendString(b)
|
||||||
|
b = append(b, '\n')
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(b) > 0 {
|
||||||
|
if _, err := uc.conn.Write(b); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return delayedError
|
||||||
|
}
|
||||||
|
|
||||||
|
func (uc *udpclient) Query(q Query) (*Response, error) {
|
||||||
|
return nil, fmt.Errorf("querying via UDP is not supported")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (uc *udpclient) QueryCtx(ctx context.Context, q Query) (*Response, error) {
|
||||||
|
return nil, fmt.Errorf("querying via UDP is not supported")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (uc *udpclient) QueryAsChunk(q Query) (*ChunkedResponse, error) {
|
||||||
|
return nil, fmt.Errorf("querying via UDP is not supported")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (uc *udpclient) Ping(timeout time.Duration) (time.Duration, string, error) {
|
||||||
|
return 0, "", nil
|
||||||
|
}
|
48
vendor/github.com/influxdata/influxdb/models/consistency.go
generated
vendored
Normal file
48
vendor/github.com/influxdata/influxdb/models/consistency.go
generated
vendored
Normal file
|
@ -0,0 +1,48 @@
|
||||||
|
package models
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ConsistencyLevel represent a required replication criteria before a write can
|
||||||
|
// be returned as successful.
|
||||||
|
//
|
||||||
|
// The consistency level is handled in open-source InfluxDB but only applicable to clusters.
|
||||||
|
type ConsistencyLevel int
|
||||||
|
|
||||||
|
const (
|
||||||
|
// ConsistencyLevelAny allows for hinted handoff, potentially no write happened yet.
|
||||||
|
ConsistencyLevelAny ConsistencyLevel = iota
|
||||||
|
|
||||||
|
// ConsistencyLevelOne requires at least one data node acknowledged a write.
|
||||||
|
ConsistencyLevelOne
|
||||||
|
|
||||||
|
// ConsistencyLevelQuorum requires a quorum of data nodes to acknowledge a write.
|
||||||
|
ConsistencyLevelQuorum
|
||||||
|
|
||||||
|
// ConsistencyLevelAll requires all data nodes to acknowledge a write.
|
||||||
|
ConsistencyLevelAll
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrInvalidConsistencyLevel is returned when parsing the string version
|
||||||
|
// of a consistency level.
|
||||||
|
ErrInvalidConsistencyLevel = errors.New("invalid consistency level")
|
||||||
|
)
|
||||||
|
|
||||||
|
// ParseConsistencyLevel converts a consistency level string to the corresponding ConsistencyLevel const.
|
||||||
|
func ParseConsistencyLevel(level string) (ConsistencyLevel, error) {
|
||||||
|
switch strings.ToLower(level) {
|
||||||
|
case "any":
|
||||||
|
return ConsistencyLevelAny, nil
|
||||||
|
case "one":
|
||||||
|
return ConsistencyLevelOne, nil
|
||||||
|
case "quorum":
|
||||||
|
return ConsistencyLevelQuorum, nil
|
||||||
|
case "all":
|
||||||
|
return ConsistencyLevelAll, nil
|
||||||
|
default:
|
||||||
|
return 0, ErrInvalidConsistencyLevel
|
||||||
|
}
|
||||||
|
}
|
16
vendor/github.com/influxdata/influxdb/models/fieldtype_string.go
generated
vendored
Normal file
16
vendor/github.com/influxdata/influxdb/models/fieldtype_string.go
generated
vendored
Normal file
|
@ -0,0 +1,16 @@
|
||||||
|
// Code generated by "stringer -type=FieldType"; DO NOT EDIT.
|
||||||
|
|
||||||
|
package models
|
||||||
|
|
||||||
|
import "strconv"
|
||||||
|
|
||||||
|
const _FieldType_name = "IntegerFloatBooleanStringEmptyUnsigned"
|
||||||
|
|
||||||
|
var _FieldType_index = [...]uint8{0, 7, 12, 19, 25, 30, 38}
|
||||||
|
|
||||||
|
func (i FieldType) String() string {
|
||||||
|
if i < 0 || i >= FieldType(len(_FieldType_index)-1) {
|
||||||
|
return "FieldType(" + strconv.FormatInt(int64(i), 10) + ")"
|
||||||
|
}
|
||||||
|
return _FieldType_name[_FieldType_index[i]:_FieldType_index[i+1]]
|
||||||
|
}
|
3
vendor/github.com/influxdata/influxdb/models/gen.go
generated
vendored
Normal file
3
vendor/github.com/influxdata/influxdb/models/gen.go
generated
vendored
Normal file
|
@ -0,0 +1,3 @@
|
||||||
|
package models
|
||||||
|
|
||||||
|
//go:generate stringer -type=FieldType
|
32
vendor/github.com/influxdata/influxdb/models/inline_fnv.go
generated
vendored
Normal file
32
vendor/github.com/influxdata/influxdb/models/inline_fnv.go
generated
vendored
Normal file
|
@ -0,0 +1,32 @@
|
||||||
|
package models // import "github.com/influxdata/influxdb/models"
|
||||||
|
|
||||||
|
// from stdlib hash/fnv/fnv.go
|
||||||
|
const (
|
||||||
|
prime64 = 1099511628211
|
||||||
|
offset64 = 14695981039346656037
|
||||||
|
)
|
||||||
|
|
||||||
|
// InlineFNV64a is an alloc-free port of the standard library's fnv64a.
|
||||||
|
// See https://en.wikipedia.org/wiki/Fowler%E2%80%93Noll%E2%80%93Vo_hash_function.
|
||||||
|
type InlineFNV64a uint64
|
||||||
|
|
||||||
|
// NewInlineFNV64a returns a new instance of InlineFNV64a.
|
||||||
|
func NewInlineFNV64a() InlineFNV64a {
|
||||||
|
return offset64
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write adds data to the running hash.
|
||||||
|
func (s *InlineFNV64a) Write(data []byte) (int, error) {
|
||||||
|
hash := uint64(*s)
|
||||||
|
for _, c := range data {
|
||||||
|
hash ^= uint64(c)
|
||||||
|
hash *= prime64
|
||||||
|
}
|
||||||
|
*s = InlineFNV64a(hash)
|
||||||
|
return len(data), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sum64 returns the uint64 of the current resulting hash.
|
||||||
|
func (s *InlineFNV64a) Sum64() uint64 {
|
||||||
|
return uint64(*s)
|
||||||
|
}
|
44
vendor/github.com/influxdata/influxdb/models/inline_strconv_parse.go
generated
vendored
Normal file
44
vendor/github.com/influxdata/influxdb/models/inline_strconv_parse.go
generated
vendored
Normal file
|
@ -0,0 +1,44 @@
|
||||||
|
package models // import "github.com/influxdata/influxdb/models"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
"strconv"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
// parseIntBytes is a zero-alloc wrapper around strconv.ParseInt.
|
||||||
|
func parseIntBytes(b []byte, base int, bitSize int) (i int64, err error) {
|
||||||
|
s := unsafeBytesToString(b)
|
||||||
|
return strconv.ParseInt(s, base, bitSize)
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseUintBytes is a zero-alloc wrapper around strconv.ParseUint.
|
||||||
|
func parseUintBytes(b []byte, base int, bitSize int) (i uint64, err error) {
|
||||||
|
s := unsafeBytesToString(b)
|
||||||
|
return strconv.ParseUint(s, base, bitSize)
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseFloatBytes is a zero-alloc wrapper around strconv.ParseFloat.
|
||||||
|
func parseFloatBytes(b []byte, bitSize int) (float64, error) {
|
||||||
|
s := unsafeBytesToString(b)
|
||||||
|
return strconv.ParseFloat(s, bitSize)
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseBoolBytes is a zero-alloc wrapper around strconv.ParseBool.
|
||||||
|
func parseBoolBytes(b []byte) (bool, error) {
|
||||||
|
return strconv.ParseBool(unsafeBytesToString(b))
|
||||||
|
}
|
||||||
|
|
||||||
|
// unsafeBytesToString converts a []byte to a string without a heap allocation.
|
||||||
|
//
|
||||||
|
// It is unsafe, and is intended to prepare input to short-lived functions
|
||||||
|
// that require strings.
|
||||||
|
func unsafeBytesToString(in []byte) string {
|
||||||
|
src := *(*reflect.SliceHeader)(unsafe.Pointer(&in))
|
||||||
|
dst := reflect.StringHeader{
|
||||||
|
Data: src.Data,
|
||||||
|
Len: src.Len,
|
||||||
|
}
|
||||||
|
s := *(*string)(unsafe.Pointer(&dst))
|
||||||
|
return s
|
||||||
|
}
|
2476
vendor/github.com/influxdata/influxdb/models/points.go
generated
vendored
Normal file
2476
vendor/github.com/influxdata/influxdb/models/points.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
62
vendor/github.com/influxdata/influxdb/models/rows.go
generated
vendored
Normal file
62
vendor/github.com/influxdata/influxdb/models/rows.go
generated
vendored
Normal file
|
@ -0,0 +1,62 @@
|
||||||
|
package models
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sort"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Row represents a single row returned from the execution of a statement.
|
||||||
|
type Row struct {
|
||||||
|
Name string `json:"name,omitempty"`
|
||||||
|
Tags map[string]string `json:"tags,omitempty"`
|
||||||
|
Columns []string `json:"columns,omitempty"`
|
||||||
|
Values [][]interface{} `json:"values,omitempty"`
|
||||||
|
Partial bool `json:"partial,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// SameSeries returns true if r contains values for the same series as o.
|
||||||
|
func (r *Row) SameSeries(o *Row) bool {
|
||||||
|
return r.tagsHash() == o.tagsHash() && r.Name == o.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
// tagsHash returns a hash of tag key/value pairs.
|
||||||
|
func (r *Row) tagsHash() uint64 {
|
||||||
|
h := NewInlineFNV64a()
|
||||||
|
keys := r.tagsKeys()
|
||||||
|
for _, k := range keys {
|
||||||
|
h.Write([]byte(k))
|
||||||
|
h.Write([]byte(r.Tags[k]))
|
||||||
|
}
|
||||||
|
return h.Sum64()
|
||||||
|
}
|
||||||
|
|
||||||
|
// tagKeys returns a sorted list of tag keys.
|
||||||
|
func (r *Row) tagsKeys() []string {
|
||||||
|
a := make([]string, 0, len(r.Tags))
|
||||||
|
for k := range r.Tags {
|
||||||
|
a = append(a, k)
|
||||||
|
}
|
||||||
|
sort.Strings(a)
|
||||||
|
return a
|
||||||
|
}
|
||||||
|
|
||||||
|
// Rows represents a collection of rows. Rows implements sort.Interface.
|
||||||
|
type Rows []*Row
|
||||||
|
|
||||||
|
// Len implements sort.Interface.
|
||||||
|
func (p Rows) Len() int { return len(p) }
|
||||||
|
|
||||||
|
// Less implements sort.Interface.
|
||||||
|
func (p Rows) Less(i, j int) bool {
|
||||||
|
// Sort by name first.
|
||||||
|
if p[i].Name != p[j].Name {
|
||||||
|
return p[i].Name < p[j].Name
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort by tag set hash. Tags don't have a meaningful sort order so we
|
||||||
|
// just compute a hash and sort by that instead. This allows the tests
|
||||||
|
// to receive rows in a predictable order every time.
|
||||||
|
return p[i].tagsHash() < p[j].tagsHash()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Swap implements sort.Interface.
|
||||||
|
func (p Rows) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
42
vendor/github.com/influxdata/influxdb/models/statistic.go
generated
vendored
Normal file
42
vendor/github.com/influxdata/influxdb/models/statistic.go
generated
vendored
Normal file
|
@ -0,0 +1,42 @@
|
||||||
|
package models
|
||||||
|
|
||||||
|
// Statistic is the representation of a statistic used by the monitoring service.
|
||||||
|
type Statistic struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Tags map[string]string `json:"tags"`
|
||||||
|
Values map[string]interface{} `json:"values"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewStatistic returns an initialized Statistic.
|
||||||
|
func NewStatistic(name string) Statistic {
|
||||||
|
return Statistic{
|
||||||
|
Name: name,
|
||||||
|
Tags: make(map[string]string),
|
||||||
|
Values: make(map[string]interface{}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatisticTags is a map that can be merged with others without causing
|
||||||
|
// mutations to either map.
|
||||||
|
type StatisticTags map[string]string
|
||||||
|
|
||||||
|
// Merge creates a new map containing the merged contents of tags and t.
|
||||||
|
// If both tags and the receiver map contain the same key, the value in tags
|
||||||
|
// is used in the resulting map.
|
||||||
|
//
|
||||||
|
// Merge always returns a usable map.
|
||||||
|
func (t StatisticTags) Merge(tags map[string]string) map[string]string {
|
||||||
|
// Add everything in tags to the result.
|
||||||
|
out := make(map[string]string, len(tags))
|
||||||
|
for k, v := range tags {
|
||||||
|
out[k] = v
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only add values from t that don't appear in tags.
|
||||||
|
for k, v := range t {
|
||||||
|
if _, ok := tags[k]; !ok {
|
||||||
|
out[k] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
156
vendor/github.com/influxdata/influxdb/models/tagkeysset.go
generated
vendored
Normal file
156
vendor/github.com/influxdata/influxdb/models/tagkeysset.go
generated
vendored
Normal file
|
@ -0,0 +1,156 @@
|
||||||
|
package models
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TagKeysSet provides set operations for combining Tags.
|
||||||
|
type TagKeysSet struct {
|
||||||
|
i int
|
||||||
|
keys [2][][]byte
|
||||||
|
tmp [][]byte
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clear removes all the elements of TagKeysSet and ensures all internal
|
||||||
|
// buffers are reset.
|
||||||
|
func (set *TagKeysSet) Clear() {
|
||||||
|
set.clear(set.keys[0])
|
||||||
|
set.clear(set.keys[1])
|
||||||
|
set.clear(set.tmp)
|
||||||
|
set.i = 0
|
||||||
|
set.keys[0] = set.keys[0][:0]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *TagKeysSet) clear(b [][]byte) {
|
||||||
|
b = b[:cap(b)]
|
||||||
|
for i := range b {
|
||||||
|
b[i] = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// KeysBytes returns the merged keys in lexicographical order.
|
||||||
|
// The slice is valid until the next call to UnionKeys, UnionBytes or Reset.
|
||||||
|
func (set *TagKeysSet) KeysBytes() [][]byte {
|
||||||
|
return set.keys[set.i&1]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Keys returns a copy of the merged keys in lexicographical order.
|
||||||
|
func (set *TagKeysSet) Keys() []string {
|
||||||
|
keys := set.KeysBytes()
|
||||||
|
s := make([]string, 0, len(keys))
|
||||||
|
for i := range keys {
|
||||||
|
s = append(s, string(keys[i]))
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *TagKeysSet) String() string {
|
||||||
|
var s []string
|
||||||
|
for _, k := range set.KeysBytes() {
|
||||||
|
s = append(s, string(k))
|
||||||
|
}
|
||||||
|
return strings.Join(s, ",")
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsSupersetKeys returns true if the TagKeysSet is a superset of all the keys
|
||||||
|
// contained in other.
|
||||||
|
func (set *TagKeysSet) IsSupersetKeys(other Tags) bool {
|
||||||
|
keys := set.keys[set.i&1]
|
||||||
|
i, j := 0, 0
|
||||||
|
for i < len(keys) && j < len(other) {
|
||||||
|
if cmp := bytes.Compare(keys[i], other[j].Key); cmp > 0 {
|
||||||
|
return false
|
||||||
|
} else if cmp == 0 {
|
||||||
|
j++
|
||||||
|
}
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
|
||||||
|
return j == len(other)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsSupersetBytes returns true if the TagKeysSet is a superset of all the keys
|
||||||
|
// in other.
|
||||||
|
// Other must be lexicographically sorted or the results are undefined.
|
||||||
|
func (set *TagKeysSet) IsSupersetBytes(other [][]byte) bool {
|
||||||
|
keys := set.keys[set.i&1]
|
||||||
|
i, j := 0, 0
|
||||||
|
for i < len(keys) && j < len(other) {
|
||||||
|
if cmp := bytes.Compare(keys[i], other[j]); cmp > 0 {
|
||||||
|
return false
|
||||||
|
} else if cmp == 0 {
|
||||||
|
j++
|
||||||
|
}
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
|
||||||
|
return j == len(other)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnionKeys updates the set so that it is the union of itself and all the
|
||||||
|
// keys contained in other.
|
||||||
|
func (set *TagKeysSet) UnionKeys(other Tags) {
|
||||||
|
if set.IsSupersetKeys(other) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if l := len(other); cap(set.tmp) < l {
|
||||||
|
set.tmp = make([][]byte, l)
|
||||||
|
} else {
|
||||||
|
set.tmp = set.tmp[:l]
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := range other {
|
||||||
|
set.tmp[i] = other[i].Key
|
||||||
|
}
|
||||||
|
|
||||||
|
set.merge(set.tmp)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnionBytes updates the set so that it is the union of itself and all the
|
||||||
|
// keys contained in other.
|
||||||
|
// Other must be lexicographically sorted or the results are undefined.
|
||||||
|
func (set *TagKeysSet) UnionBytes(other [][]byte) {
|
||||||
|
if set.IsSupersetBytes(other) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
set.merge(other)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (set *TagKeysSet) merge(in [][]byte) {
|
||||||
|
keys := set.keys[set.i&1]
|
||||||
|
l := len(keys) + len(in)
|
||||||
|
set.i = (set.i + 1) & 1
|
||||||
|
keya := set.keys[set.i&1]
|
||||||
|
if cap(keya) < l {
|
||||||
|
keya = make([][]byte, 0, l)
|
||||||
|
} else {
|
||||||
|
keya = keya[:0]
|
||||||
|
}
|
||||||
|
|
||||||
|
i, j := 0, 0
|
||||||
|
for i < len(keys) && j < len(in) {
|
||||||
|
ki, kj := keys[i], in[j]
|
||||||
|
if cmp := bytes.Compare(ki, kj); cmp < 0 {
|
||||||
|
i++
|
||||||
|
} else if cmp > 0 {
|
||||||
|
ki = kj
|
||||||
|
j++
|
||||||
|
} else {
|
||||||
|
i++
|
||||||
|
j++
|
||||||
|
}
|
||||||
|
|
||||||
|
keya = append(keya, ki)
|
||||||
|
}
|
||||||
|
|
||||||
|
if i < len(keys) {
|
||||||
|
keya = append(keya, keys[i:]...)
|
||||||
|
} else if j < len(in) {
|
||||||
|
keya = append(keya, in[j:]...)
|
||||||
|
}
|
||||||
|
|
||||||
|
set.keys[set.i&1] = keya
|
||||||
|
}
|
74
vendor/github.com/influxdata/influxdb/models/time.go
generated
vendored
Normal file
74
vendor/github.com/influxdata/influxdb/models/time.go
generated
vendored
Normal file
|
@ -0,0 +1,74 @@
|
||||||
|
package models
|
||||||
|
|
||||||
|
// Helper time methods since parsing time can easily overflow and we only support a
|
||||||
|
// specific time range.
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// MinNanoTime is the minimum time that can be represented.
|
||||||
|
//
|
||||||
|
// 1677-09-21 00:12:43.145224194 +0000 UTC
|
||||||
|
//
|
||||||
|
// The two lowest minimum integers are used as sentinel values. The
|
||||||
|
// minimum value needs to be used as a value lower than any other value for
|
||||||
|
// comparisons and another separate value is needed to act as a sentinel
|
||||||
|
// default value that is unusable by the user, but usable internally.
|
||||||
|
// Because these two values need to be used for a special purpose, we do
|
||||||
|
// not allow users to write points at these two times.
|
||||||
|
MinNanoTime = int64(math.MinInt64) + 2
|
||||||
|
|
||||||
|
// MaxNanoTime is the maximum time that can be represented.
|
||||||
|
//
|
||||||
|
// 2262-04-11 23:47:16.854775806 +0000 UTC
|
||||||
|
//
|
||||||
|
// The highest time represented by a nanosecond needs to be used for an
|
||||||
|
// exclusive range in the shard group, so the maximum time needs to be one
|
||||||
|
// less than the possible maximum number of nanoseconds representable by an
|
||||||
|
// int64 so that we don't lose a point at that one time.
|
||||||
|
MaxNanoTime = int64(math.MaxInt64) - 1
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
minNanoTime = time.Unix(0, MinNanoTime).UTC()
|
||||||
|
maxNanoTime = time.Unix(0, MaxNanoTime).UTC()
|
||||||
|
|
||||||
|
// ErrTimeOutOfRange gets returned when time is out of the representable range using int64 nanoseconds since the epoch.
|
||||||
|
ErrTimeOutOfRange = fmt.Errorf("time outside range %d - %d", MinNanoTime, MaxNanoTime)
|
||||||
|
)
|
||||||
|
|
||||||
|
// SafeCalcTime safely calculates the time given. Will return error if the time is outside the
|
||||||
|
// supported range.
|
||||||
|
func SafeCalcTime(timestamp int64, precision string) (time.Time, error) {
|
||||||
|
mult := GetPrecisionMultiplier(precision)
|
||||||
|
if t, ok := safeSignedMult(timestamp, mult); ok {
|
||||||
|
tme := time.Unix(0, t).UTC()
|
||||||
|
return tme, CheckTime(tme)
|
||||||
|
}
|
||||||
|
|
||||||
|
return time.Time{}, ErrTimeOutOfRange
|
||||||
|
}
|
||||||
|
|
||||||
|
// CheckTime checks that a time is within the safe range.
|
||||||
|
func CheckTime(t time.Time) error {
|
||||||
|
if t.Before(minNanoTime) || t.After(maxNanoTime) {
|
||||||
|
return ErrTimeOutOfRange
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Perform the multiplication and check to make sure it didn't overflow.
|
||||||
|
func safeSignedMult(a, b int64) (int64, bool) {
|
||||||
|
if a == 0 || b == 0 || a == 1 || b == 1 {
|
||||||
|
return a * b, true
|
||||||
|
}
|
||||||
|
if a == MinNanoTime || b == MaxNanoTime {
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
c := a * b
|
||||||
|
return c, c/b == a
|
||||||
|
}
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue