diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index d0291479d..2fcfe52aa 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -45,16 +45,19 @@ jobs: GOOS=freebsd go build -mod=vendor ./app/vmalert GOOS=freebsd go build -mod=vendor ./app/vmbackup GOOS=freebsd go build -mod=vendor ./app/vmrestore + GOOS=freebsd go build -mod=vendor ./app/vmctl GOOS=openbsd go build -mod=vendor ./app/victoria-metrics GOOS=openbsd go build -mod=vendor ./app/vmagent GOOS=openbsd go build -mod=vendor ./app/vmalert GOOS=openbsd go build -mod=vendor ./app/vmbackup GOOS=openbsd go build -mod=vendor ./app/vmrestore + GOOS=openbsd go build -mod=vendor ./app/vmctl GOOS=darwin go build -mod=vendor ./app/victoria-metrics GOOS=darwin go build -mod=vendor ./app/vmagent GOOS=darwin go build -mod=vendor ./app/vmalert GOOS=darwin go build -mod=vendor ./app/vmbackup GOOS=darwin go build -mod=vendor ./app/vmrestore + GOOS=darwin go build -mod=vendor ./app/vmctl - name: Publish coverage uses: codecov/codecov-action@v1.0.6 with: diff --git a/Makefile b/Makefile index 9d7ffaad2..e69e16662 100644 --- a/Makefile +++ b/Makefile @@ -18,7 +18,8 @@ all: \ vmalert-prod \ vmauth-prod \ vmbackup-prod \ - vmrestore-prod + vmrestore-prod \ + vmctl-prod include app/*/Makefile include deployment/*/Makefile @@ -32,7 +33,8 @@ publish: \ publish-vmalert \ publish-vmauth \ publish-vmbackup \ - publish-vmrestore + publish-vmrestore \ + publish-vmctl package: \ package-victoria-metrics \ @@ -40,21 +42,24 @@ package: \ package-vmalert \ package-vmauth \ package-vmbackup \ - package-vmrestore + package-vmrestore \ + package-vmctl vmutils: \ vmagent \ vmalert \ vmauth \ vmbackup \ - vmrestore + vmrestore \ + vmctl vmutils-arm64: \ vmagent-arm64 \ vmalert-arm64 \ vmauth-arm64 \ vmbackup-arm64 \ - vmrestore-arm64 + vmrestore-arm64 \ + vmctl-arm64 release-snap: snapcraft @@ -97,7 +102,8 @@ release-vmutils-generic: \ vmalert-$(GOARCH)-prod \ vmauth-$(GOARCH)-prod \ vmbackup-$(GOARCH)-prod \ - vmrestore-$(GOARCH)-prod + vmrestore-$(GOARCH)-prod \ + vmctl-$(GOARCH)-prod cd bin && \ tar --transform="flags=r;s|-$(GOARCH)||" -czf vmutils-$(GOARCH)-$(PKG_TAG).tar.gz \ vmagent-$(GOARCH)-prod \ @@ -105,12 +111,14 @@ release-vmutils-generic: \ vmauth-$(GOARCH)-prod \ vmbackup-$(GOARCH)-prod \ vmrestore-$(GOARCH)-prod \ + vmctl-$(GOARCH)-prod \ && sha256sum vmutils-$(GOARCH)-$(PKG_TAG).tar.gz \ vmagent-$(GOARCH)-prod \ vmalert-$(GOARCH)-prod \ vmauth-$(GOARCH)-prod \ vmbackup-$(GOARCH)-prod \ vmrestore-$(GOARCH)-prod \ + vmctl-$(GOARCH)-prod \ | sed s/-$(GOARCH)// > vmutils-$(GOARCH)-$(PKG_TAG)_checksums.txt pprof-cpu: @@ -141,6 +149,7 @@ errcheck: install-errcheck errcheck -exclude=errcheck_excludes.txt ./app/vmauth/... errcheck -exclude=errcheck_excludes.txt ./app/vmbackup/... errcheck -exclude=errcheck_excludes.txt ./app/vmrestore/... + errcheck -exclude=errcheck_excludes.txt ./app/vmctl/... install-errcheck: which errcheck || go install github.com/kisielk/errcheck @@ -204,4 +213,5 @@ docs-sync: cp app/vmauth/README.md docs/vmauth.md cp app/vmbackup/README.md docs/vmbackup.md cp app/vmrestore/README.md docs/vmrestore.md + cp app/vmctl/README.md docs/vmctl.md cp README.md docs/Single-server-VictoriaMetrics.md diff --git a/README.md b/README.md index e60d13534..3dc192b00 100644 --- a/README.md +++ b/README.md @@ -154,6 +154,7 @@ Alphabetically sorted links to case studies: * [Tuning](#tuning) * [Monitoring](#monitoring) * [Troubleshooting](#troubleshooting) +* [Data migration](#data-migration) * [Backfilling](#backfilling) * [Data updates](#data-updates) * [Replication](#replication) @@ -1353,6 +1354,17 @@ See the example of alerting rules for VM components [here](https://github.com/Vi * VictoriaMetrics ignores `NaN` values during data ingestion. +## Data migration + +Use [vmctl](https://victoriametrics.github.io/vmctl.html) for data migration. It supports the following data migration types: + +* From Prometheus to VictoriaMetrics +* From InfluxDB to VictoriaMetrics +* From VictoriaMetrics to VictoriaMetrics + +See [vmctl docs](https://victoriametrics.github.io/vmctl.html) for more details. + + ## Backfilling VictoriaMetrics accepts historical data in arbitrary order of time via [any supported ingestion method](#how-to-import-time-series-data). @@ -1420,7 +1432,6 @@ The collected profiles may be analyzed with [go tool pprof](https://github.com/g * [Helm charts for single-node and cluster versions of VictoriaMetrics](https://github.com/VictoriaMetrics/helm-charts). * [Kubernetes operator for VictoriaMetrics](https://github.com/VictoriaMetrics/operator). -* [vmctl tool for data migration to VictoriaMetrics](https://github.com/VictoriaMetrics/vmctl). * [netdata](https://github.com/netdata/netdata) can push data into VictoriaMetrics via `Prometheus remote_write API`. See [these docs](https://github.com/netdata/netdata#integrations). * [go-graphite/carbonapi](https://github.com/go-graphite/carbonapi) can use VictoriaMetrics as time series backend. diff --git a/app/vmctl/Makefile b/app/vmctl/Makefile new file mode 100644 index 000000000..b039877ba --- /dev/null +++ b/app/vmctl/Makefile @@ -0,0 +1,73 @@ +# All these commands must run from repository root. + +vmctl: + APP_NAME=vmctl $(MAKE) app-local + +vmctl-race: + APP_NAME=vmctl RACE=-race $(MAKE) app-local + +vmctl-prod: + APP_NAME=vmctl $(MAKE) app-via-docker + +vmctl-pure-prod: + APP_NAME=vmctl $(MAKE) app-via-docker-pure + +vmctl-amd64-prod: + APP_NAME=vmctl $(MAKE) app-via-docker-amd64 + +vmctl-arm-prod: + APP_NAME=vmctl $(MAKE) app-via-docker-arm + +vmctl-arm64-prod: + APP_NAME=vmctl $(MAKE) app-via-docker-arm64 + +vmctl-ppc64le-prod: + APP_NAME=vmctl $(MAKE) app-via-docker-ppc64le + +vmctl-386-prod: + APP_NAME=vmctl $(MAKE) app-via-docker-386 + +package-vmctl: + APP_NAME=vmctl $(MAKE) package-via-docker + +package-vmctl-pure: + APP_NAME=vmctl $(MAKE) package-via-docker-pure + +package-vmctl-amd64: + APP_NAME=vmctl $(MAKE) package-via-docker-amd64 + +package-vmctl-arm: + APP_NAME=vmctl $(MAKE) package-via-docker-arm + +package-vmctl-arm64: + APP_NAME=vmctl $(MAKE) package-via-docker-arm64 + +package-vmctl-ppc64le: + APP_NAME=vmctl $(MAKE) package-via-docker-ppc64le + +package-vmctl-386: + APP_NAME=vmctl $(MAKE) package-via-docker-386 + +publish-vmctl: + APP_NAME=vmctl $(MAKE) publish-via-docker + +vmctl-amd64: + CGO_ENABLED=1 GOARCH=amd64 $(MAKE) vmctl-local-with-goarch + +vmctl-arm: + CGO_ENABLED=0 GOARCH=arm $(MAKE) vmctl-local-with-goarch + +vmctl-arm64: + CGO_ENABLED=0 GOARCH=arm64 $(MAKE) vmctl-local-with-goarch + +vmctl-ppc64le: + CGO_ENABLED=0 GOARCH=ppc64le $(MAKE) vmctl-local-with-goarch + +vmctl-386: + CGO_ENABLED=0 GOARCH=386 $(MAKE) vmctl-local-with-goarch + +vmctl-local-with-goarch: + APP_NAME=vmctl $(MAKE) app-local-with-goarch + +vmctl-pure: + APP_NAME=vmctl $(MAKE) app-local-pure diff --git a/app/vmctl/README.md b/app/vmctl/README.md new file mode 100644 index 000000000..f4f315c75 --- /dev/null +++ b/app/vmctl/README.md @@ -0,0 +1,427 @@ +# vmctl - Victoria metrics command-line tool + +Features: +- [x] Prometheus: migrate data from Prometheus to VictoriaMetrics using snapshot API +- [x] Thanos: migrate data from Thanos to VictoriaMetrics +- [ ] ~~Prometheus: migrate data from Prometheus to VictoriaMetrics by query~~(discarded) +- [x] InfluxDB: migrate data from InfluxDB to VictoriaMetrics +- [ ] Storage Management: data re-balancing between nodes + +# Table of contents + +* [Articles](#articles) +* [How to build](#how-to-build) +* [Migrating data from InfluxDB 1.x](#migrating-data-from-influxdb-1x) + * [Data mapping](#data-mapping) + * [Configuration](#configuration) + * [Filtering](#filtering) +* [Migrating data from InfluxDB 2.x](#migrating-data-from-influxdb-2x) +* [Migrating data from Prometheus](#migrating-data-from-prometheus) + * [Data mapping](#data-mapping-1) + * [Configuration](#configuration-1) + * [Filtering](#filtering-1) +* [Migrating data from Thanos](#migrating-data-from-thanos) + * [Current data](#current-data) + * [Historical data](#historical-data) +* [Migrating data from VictoriaMetrics](#migrating-data-from-victoriametrics) + * [Native protocol](#native-protocol) +* [Tuning](#tuning) + * [Influx mode](#influx-mode) + * [Prometheus mode](#prometheus-mode) + * [VictoriaMetrics importer](#victoriametrics-importer) + * [Importer stats](#importer-stats) +* [Significant figures](#significant-figures) +* [Adding extra labels](#adding-extra-labels) + + +## Articles + +* [How to migrate data from Prometheus](https://medium.com/@romanhavronenko/victoriametrics-how-to-migrate-data-from-prometheus-d44a6728f043) +* [How to migrate data from Prometheus. Filtering and modifying time series](https://medium.com/@romanhavronenko/victoriametrics-how-to-migrate-data-from-prometheus-filtering-and-modifying-time-series-6d40cea4bf21) + +## How to build + +1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.12. +2. Run `make build` from the root folder of the repository. + It builds `vmctl` binary and puts it into the `bin` folder. + +## Migrating data from InfluxDB (1.x) + +`vmctl` supports the `influx` mode to migrate data from InfluxDB to VictoriaMetrics time-series database. + +See `./vmctl influx --help` for details and full list of flags. + +To use migration tool please specify the InfluxDB address `--influx-addr`, the database `--influx-database` and VictoriaMetrics address `--vm-addr`. +Flag `--vm-addr` for single-node VM is usually equal to `--httpListenAddr`, and for cluster version +is equal to `--httpListenAddr` flag of VMInsert component. Please note, that vmctl performs initial readiness check for the given address +by checking `/health` endpoint. For cluster version it is additionally required to specify the `--vm-account-id` flag. +See more details for cluster version [here](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/cluster). + +As soon as required flags are provided and all endpoints are accessible, `vmctl` will start the InfluxDB scheme exploration. +Basically, it just fetches all fields and timeseries from the provided database and builds up registry of all available timeseries. +Then `vmctl` sends fetch requests for each timeseries to InfluxDB one by one and pass results to VM importer. +VM importer then accumulates received samples in batches and sends import requests to VM. + +The importing process example for local installation of InfluxDB(`http://localhost:8086`) +and single-node VictoriaMetrics(`http://localhost:8428`): +``` +./vmctl influx --influx-database benchmark +InfluxDB import mode +2020/01/18 20:47:11 Exploring scheme for database "benchmark" +2020/01/18 20:47:11 fetching fields: command: "show field keys"; database: "benchmark"; retention: "autogen" +2020/01/18 20:47:11 found 10 fields +2020/01/18 20:47:11 fetching series: command: "show series "; database: "benchmark"; retention: "autogen" +Found 40000 timeseries to import. Continue? [Y/n] y +40000 / 40000 [-----------------------------------------------------------------------------------------------------------------------------------------------] 100.00% 21 p/s +2020/01/18 21:19:00 Import finished! +2020/01/18 21:19:00 VictoriaMetrics importer stats: + idle duration: 13m51.461434876s; + time spent while importing: 17m56.923899847s; + total samples: 345600000; + samples/s: 320914.04; + total bytes: 5.9 GB; + bytes/s: 5.4 MB; + import requests: 40001; +2020/01/18 21:19:00 Total time: 31m48.467044016s +``` + +### Data mapping + +Vmctl maps Influx data the same way as VictoriaMetrics does by using the following rules: + +* `influx-database` arg is mapped into `db` label value unless `db` tag exists in the Influx line. +* Field names are mapped to time series names prefixed with {measurement}{separator} value, +where {separator} equals to _ by default. +It can be changed with `--influx-measurement-field-separator` command-line flag. +* Field values are mapped to time series values. +* Tags are mapped to Prometheus labels format as-is. + +For example, the following Influx line: +``` +foo,tag1=value1,tag2=value2 field1=12,field2=40 +``` + +is converted into the following Prometheus format data points: +``` +foo_field1{tag1="value1", tag2="value2"} 12 +foo_field2{tag1="value1", tag2="value2"} 40 +``` + +### Configuration + +The configuration flags should contain self-explanatory descriptions. + +### Filtering + +The filtering consists of two parts: timeseries and time. +The first step of application is to select all available timeseries +for given database and retention. User may specify additional filtering +condition via `--influx-filter-series` flag. For example: +``` +./vmctl influx --influx-database benchmark \ + --influx-filter-series "on benchmark from cpu where hostname='host_1703'" +InfluxDB import mode +2020/01/26 14:23:29 Exploring scheme for database "benchmark" +2020/01/26 14:23:29 fetching fields: command: "show field keys"; database: "benchmark"; retention: "autogen" +2020/01/26 14:23:29 found 12 fields +2020/01/26 14:23:29 fetching series: command: "show series on benchmark from cpu where hostname='host_1703'"; database: "benchmark"; retention: "autogen" +Found 10 timeseries to import. Continue? [Y/n] +``` +The timeseries select query would be following: + `fetching series: command: "show series on benchmark from cpu where hostname='host_1703'"; database: "benchmark"; retention: "autogen"` + +The second step of filtering is a time filter and it applies when fetching the datapoints from Influx. +Time filtering may be configured with two flags: +* --influx-filter-time-start +* --influx-filter-time-end +Here's an example of importing timeseries for one day only: +`./vmctl influx --influx-database benchmark --influx-filter-series "where hostname='host_1703'" --influx-filter-time-start "2020-01-01T10:07:00Z" --influx-filter-time-end "2020-01-01T15:07:00Z"` + +Please see more about time filtering [here](https://docs.influxdata.com/influxdb/v1.7/query_language/schema_exploration#filter-meta-queries-by-time). + +## Migrating data from InfluxDB (2.x) + +Migrating data from InfluxDB v2.x is not supported yet ([#32](https://github.com/VictoriaMetrics/vmctl/issues/32)). +You may find useful a 3rd party solution for this - https://github.com/jonppe/influx_to_victoriametrics. + + +## Migrating data from Prometheus + +`vmctl` supports the `prometheus` mode for migrating data from Prometheus to VictoriaMetrics time-series database. +Migration is based on reading Prometheus snapshot, which is basically a hard-link to Prometheus data files. + +See `./vmctl prometheus --help` for details and full list of flags. + +To use migration tool please specify the path to Prometheus snapshot `--prom-snapshot` and VictoriaMetrics address `--vm-addr`. +More about Prometheus snapshots may be found [here](https://www.robustperception.io/taking-snapshots-of-prometheus-data). +Flag `--vm-addr` for single-node VM is usually equal to `--httpListenAddr`, and for cluster version +is equal to `--httpListenAddr` flag of VMInsert component. Please note, that vmctl performs initial readiness check for the given address +by checking `/health` endpoint. For cluster version it is additionally required to specify the `--vm-account-id` flag. +See more details for cluster version [here](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/cluster). + +As soon as required flags are provided and all endpoints are accessible, `vmctl` will start the Prometheus snapshot exploration. +Basically, it just fetches all available blocks in provided snapshot and read the metadata. It also does initial filtering by time +if flags `--prom-filter-time-start` or `--prom-filter-time-end` were set. The exploration procedure prints some stats from read blocks. +Please note that stats are not taking into account timeseries or samples filtering. This will be done during importing process. + +The importing process takes the snapshot blocks revealed from Explore procedure and processes them one by one +accumulating timeseries and samples. Please note, that `vmctl` relies on responses from Influx on this stage, +so ensure that Explore queries are executed without errors or limits. Please see this +[issue](https://github.com/VictoriaMetrics/vmctl/issues/30) for details. +The data processed in chunks and then sent to VM. + +The importing process example for local installation of Prometheus +and single-node VictoriaMetrics(`http://localhost:8428`): +``` +./vmctl prometheus --prom-snapshot=/path/to/snapshot \ + --vm-concurrency=1 \ + --vm-batch-size=200000 \ + --prom-concurrency=3 +Prometheus import mode +Prometheus snapshot stats: + blocks found: 14; + blocks skipped: 0; + min time: 1581288163058 (2020-02-09T22:42:43Z); + max time: 1582409128139 (2020-02-22T22:05:28Z); + samples: 32549106; + series: 27289. +Found 14 blocks to import. Continue? [Y/n] y +14 / 14 [-------------------------------------------------------------------------------------------] 100.00% 0 p/s +2020/02/23 15:50:03 Import finished! +2020/02/23 15:50:03 VictoriaMetrics importer stats: + idle duration: 6.152953029s; + time spent while importing: 44.908522491s; + total samples: 32549106; + samples/s: 724786.84; + total bytes: 669.1 MB; + bytes/s: 14.9 MB; + import requests: 323; + import requests retries: 0; +2020/02/23 15:50:03 Total time: 51.077451066s +``` + +### Data mapping + +VictoriaMetrics has very similar data model to Prometheus and supports [RemoteWrite integration](https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage). +So no data changes will be applied. + +### Configuration + +The configuration flags should contain self-explanatory descriptions. + +### Filtering + +The filtering consists of three parts: by timeseries and time. + +Filtering by time may be configured via flags `--prom-filter-time-start` and `--prom-filter-time-end` +in in RFC3339 format. This filter applied twice: to drop blocks out of range and to filter timeseries in blocks with +overlapping time range. + +Example of applying time filter: +``` +./vmctl prometheus --prom-snapshot=/path/to/snapshot \ + --prom-filter-time-start=2020-02-07T00:07:01Z \ + --prom-filter-time-end=2020-02-11T00:07:01Z +Prometheus import mode +Prometheus snapshot stats: + blocks found: 2; + blocks skipped: 12; + min time: 1581288163058 (2020-02-09T22:42:43Z); + max time: 1581328800000 (2020-02-10T10:00:00Z); + samples: 1657698; + series: 3930. +Found 2 blocks to import. Continue? [Y/n] y +``` + +Please notice, that total amount of blocks in provided snapshot is 14, but only 2 of them were in provided +time range. So other 12 blocks were marked as `skipped`. The amount of samples and series is not taken into account, +since this is heavy operation and will be done during import process. + + +Filtering by timeseries is configured with following flags: +* `--prom-filter-label` - the label name, e.g. `__name__` or `instance`; +* `--prom-filter-label-value` - the regular expression to filter the label value. By default matches all `.*` + +For example: +``` +./vmctl prometheus --prom-snapshot=/path/to/snapshot \ + --prom-filter-label="__name__" \ + --prom-filter-label-value="promhttp.*" \ + --prom-filter-time-start=2020-02-07T00:07:01Z \ + --prom-filter-time-end=2020-02-11T00:07:01Z +Prometheus import mode +Prometheus snapshot stats: + blocks found: 2; + blocks skipped: 12; + min time: 1581288163058 (2020-02-09T22:42:43Z); + max time: 1581328800000 (2020-02-10T10:00:00Z); + samples: 1657698; + series: 3930. +Found 2 blocks to import. Continue? [Y/n] y +14 / 14 [------------------------------------------------------------------------------------------------------------------------------------------------------] 100.00% ? p/s +2020/02/23 15:51:07 Import finished! +2020/02/23 15:51:07 VictoriaMetrics importer stats: + idle duration: 0s; + time spent while importing: 37.415461ms; + total samples: 10128; + samples/s: 270690.24; + total bytes: 195.2 kB; + bytes/s: 5.2 MB; + import requests: 2; + import requests retries: 0; +2020/02/23 15:51:07 Total time: 7.153158218s +``` + +## Migrating data from Thanos + +Thanos uses the same storage engine as Prometheus and the data layout on-disk should be the same. That means +`vmctl` in mode `prometheus` may be used for Thanos historical data migration as well. +These instructions may vary based on the details of your Thanos configuration. +Please read carefully and verify as you go. We assume you're using Thanos Sidecar on your Prometheus pods, +and that you have a separate Thanos Store installation. + +### Current data + +1. For now, keep your Thanos Sidecar and Thanos-related Prometheus configuration, but add this to also stream + metrics to VictoriaMetrics: + ``` + remote_write: + - url: http://victoria-metrics:8428/api/v1/write + ``` +2. Make sure VM is running, of course. Now check the logs to make sure that Prometheus is sending and VM is receiving. + In Prometheus, make sure there are no errors. On the VM side, you should see messages like this: + ``` + 2020-04-27T18:38:46.474Z info VictoriaMetrics/lib/storage/partition.go:207 creating a partition "2020_04" with smallPartsPath="/victoria-metrics-data/data/small/2020_04", bigPartsPath="/victoria-metrics-data/data/big/2020_04" + 2020-04-27T18:38:46.506Z info VictoriaMetrics/lib/storage/partition.go:222 partition "2020_04" has been created + ``` +3. Now just wait. Within two hours, Prometheus should finish its current data file and hand it off to Thanos Store for long term + storage. + +### Historical data + +Let's assume your data is stored on S3 served by minio. You first need to copy that out to a local filesystem, +then import it into VM using `vmctl` in `prometheus` mode. +1. Copy data from minio. + 1. Run the `minio/mc` Docker container. + 1. `mc config host add minio http://minio:9000 accessKey secretKey`, substituting appropriate values for the last 3 items. + 1. `mc cp -r minio/prometheus thanos-data` +1. Import using `vmctl`. + 1. Follow the [instructions](#how-to-build) to compile `vmctl` on your machine. + 1. Use [prometheus](#migrating-data-from-prometheus) mode to import data: + ``` + vmctl prometheus --prom-snapshot thanos-data --vm-addr http://victoria-metrics:8428 + ``` + +## Migrating data from VictoriaMetrics + +### Native protocol + +The [native binary protocol](https://victoriametrics.github.io/#how-to-export-data-in-native-format) +was introduced in [1.42.0 release](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.42.0) +and provides the most efficient way to migrate data between VM instances: single to single, cluster to cluster, +single to cluster and vice versa. Please note that both instances (source and destination) should be of v1.42.0 +or higher. + +See `./vmctl vm-native --help` for details and full list of flags. + +In this mode `vmctl` acts as a proxy between two VM instances, where time series filtering is done by "source" (`src`) +and processing is done by "destination" (`dst`). Because of that, `vmctl` doesn't actually know how much data will be +processed and can't show the progress bar. It will show the current processing speed and total number of processed bytes: + +``` +./vmctl vm-native --vm-native-src-addr=http://localhost:8528 \ + --vm-native-dst-addr=http://localhost:8428 \ + --vm-native-filter-match='{job="vmagent"}' \ + --vm-native-filter-time-start='2020-01-01T20:07:00Z' +VictoriaMetrics Native import mode +Initing export pipe from "http://localhost:8528" with filters: + filter: match[]={job="vmagent"} +Initing import process to "http://localhost:8428": +Total: 336.75 KiB ↖ Speed: 454.46 KiB p/s +2020/10/13 17:04:59 Total time: 952.143376ms +``` + +Importing tips: +1. Migrating all the metrics from one VM to another may collide with existing application metrics +(prefixed with `vm_`) at destination and lead to confusion when using +[official Grafana dashboards](https://grafana.com/orgs/victoriametrics/dashboards). +To avoid such situation try to filter out VM process metrics via `--vm-native-filter-match` flag. +2. Migration is a backfilling process, so it is recommended to read +[Backfilling tips](https://github.com/VictoriaMetrics/VictoriaMetrics#backfilling) section. +3. `vmctl` doesn't provide relabeling or other types of labels management in this mode. +Instead, use [relabeling in VictoriaMetrics](https://github.com/VictoriaMetrics/vmctl/issues/4#issuecomment-683424375). + + +## Tuning + +### Influx mode + +The flag `--influx-concurrency` controls how many concurrent requests may be sent to InfluxDB while fetching +timeseries. Please set it wisely to avoid InfluxDB overwhelming. + +The flag `--influx-chunk-size` controls the max amount of datapoints to return in single chunk from fetch requests. +Please see more details [here](https://docs.influxdata.com/influxdb/v1.7/guides/querying_data/#chunking). +The chunk size is used to control InfluxDB memory usage, so it won't OOM on processing large timeseries with +billions of datapoints. + +### Prometheus mode + +The flag `--prom-concurrency` controls how many concurrent readers will be reading the blocks in snapshot. +Since snapshots are just files on disk it would be hard to overwhelm the system. Please go with value equal +to number of free CPU cores. + +### VictoriaMetrics importer + +The flag `--vm-concurrency` controls the number of concurrent workers that process the input from InfluxDB query results. +Please note that each import request can load up to a single vCPU core on VictoriaMetrics. So try to set it according +to allocated CPU resources of your VictoriMetrics installation. + +The flag `--vm-batch-size` controls max amount of samples collected before sending the import request. +For example, if `--influx-chunk-size=500` and `--vm-batch-size=2000` then importer will process not more +than 4 chunks before sending the request. + +### Importer stats + +After successful import `vmctl` prints some statistics for details. +The important numbers to watch are following: + - `idle duration` - shows time that importer spent while waiting for data from InfluxDB/Prometheus +to fill up `--vm-batch-size` batch size. Value shows total duration across all workers configured +via `--vm-concurrency`. High value may be a sign of too slow InfluxDB/Prometheus fetches or too +high `--vm-concurrency` value. Try to improve it by increasing `---concurrency` value or +decreasing `--vm-concurrency` value. +- `import requests` - shows how many import requests were issued to VM server. +The import request is issued once the batch size(`--vm-batch-size`) is full and ready to be sent. +Please prefer big batch sizes (50k-500k) to improve performance. +- `import requests retries` - shows number of unsuccessful import requests. Non-zero value may be +a sign of network issues or VM being overloaded. See the logs during import for error messages. + +### Silent mode + +By default `vmctl` waits confirmation from user before starting the import. If this is unwanted +behavior and no user interaction required - pass `-s` flag to enable "silence" mode: +``` + -s Whether to run in silent mode. If set to true no confirmation prompts will appear. (default: false) +``` + +### Significant figures + +`vmctl` allows to limit the number of [significant figures](https://en.wikipedia.org/wiki/Significant_figures) +before importing. For example, the average value for response size is `102.342305` bytes and it has 9 significant figures. +If you ask a human to pronounce this value then with high probability value will be rounded to first 4 or 5 figures +because the rest aren't really that important to mention. In most cases, such a high precision is too much. +Moreover, such values may be just a result of [floating point arithmetic](https://en.wikipedia.org/wiki/Floating-point_arithmetic), +create a [false precision](https://en.wikipedia.org/wiki/False_precision) and result into bad compression ratio +according to [information theory](https://en.wikipedia.org/wiki/Information_theory). + +The `--vm-significant-figures` flag allows to limit the number of significant figures. It takes no effect if set +to 0 (by default), but set `--vm-significant-figures=5` and `102.342305` will be rounded to `102.34`. Such value will +have much higher compression ratio comparing to previous one and will save some extra disk space after the migration. +The most common case for using this flag is to reduce number of significant figures for time series storing aggregation +results such as `average`, `rate`, etc. + +### Adding extra labels + + `vmctl` allows to add extra labels to all imported series. It can be achived with flag `--vm-extra-label label=value`. + If multiple labels needs to be added, set flag for each label, for example, `--vm-extra-label label1=value1 --vm-extra-label label2=value2`. + If timeseries already have label, that must be added with `--vm-extra-label` flag, flag has priority and will override label value from timeseries. + \ No newline at end of file diff --git a/app/vmctl/deployment/Dockerfile b/app/vmctl/deployment/Dockerfile new file mode 100644 index 000000000..27b442b01 --- /dev/null +++ b/app/vmctl/deployment/Dockerfile @@ -0,0 +1,6 @@ +ARG base_image +FROM $base_image + +ENTRYPOINT ["/vmctl-prod"] +ARG src_binary +COPY $src_binary ./vmctl-prod diff --git a/app/vmctl/flags.go b/app/vmctl/flags.go new file mode 100644 index 000000000..9501286f8 --- /dev/null +++ b/app/vmctl/flags.go @@ -0,0 +1,284 @@ +package main + +import ( + "fmt" + + "github.com/urfave/cli/v2" +) + +const ( + globalSilent = "s" +) + +var ( + globalFlags = []cli.Flag{ + &cli.BoolFlag{ + Name: globalSilent, + Value: false, + Usage: "Whether to run in silent mode. If set to true no confirmation prompts will appear.", + }, + } +) + +const ( + vmAddr = "vm-addr" + vmUser = "vm-user" + vmPassword = "vm-password" + vmAccountID = "vm-account-id" + vmConcurrency = "vm-concurrency" + vmCompress = "vm-compress" + vmBatchSize = "vm-batch-size" + vmSignificantFigures = "vm-significant-figures" + vmExtraLabel = "vm-extra-label" +) + +var ( + vmFlags = []cli.Flag{ + &cli.StringFlag{ + Name: vmAddr, + Value: "http://localhost:8428", + Usage: "VictoriaMetrics address to perform import requests. \n" + + "Should be the same as --httpListenAddr value for single-node version or VMInsert component. \n" + + "Please note, that `vmctl` performs initial readiness check for the given address by checking `/health` endpoint.", + }, + &cli.StringFlag{ + Name: vmUser, + Usage: "VictoriaMetrics username for basic auth", + EnvVars: []string{"VM_USERNAME"}, + }, + &cli.StringFlag{ + Name: vmPassword, + Usage: "VictoriaMetrics password for basic auth", + EnvVars: []string{"VM_PASSWORD"}, + }, + &cli.StringFlag{ + Name: vmAccountID, + Usage: "AccountID is an arbitrary 32-bit integer identifying namespace for data ingestion (aka tenant). \n" + + "It is possible to set it as accountID:projectID, where projectID is also arbitrary 32-bit integer. \n" + + "If projectID isn't set, then it equals to 0", + }, + &cli.UintFlag{ + Name: vmConcurrency, + Usage: "Number of workers concurrently performing import requests to VM", + Value: 2, + }, + &cli.BoolFlag{ + Name: vmCompress, + Value: true, + Usage: "Whether to apply gzip compression to import requests", + }, + &cli.IntFlag{ + Name: vmBatchSize, + Value: 200e3, + Usage: "How many samples importer collects before sending the import request to VM", + }, + &cli.IntFlag{ + Name: vmSignificantFigures, + Value: 0, + Usage: "The number of significant figures to leave in metric values before importing. " + + "See https://en.wikipedia.org/wiki/Significant_figures. Zero value saves all the significant figures. " + + "This option may be used for increasing on-disk compression level for the stored metrics", + }, + &cli.StringSliceFlag{ + Name: vmExtraLabel, + Value: nil, + Usage: "Extra labels, that will be added to imported timeseries. In case of collision, label value defined by flag" + + "will have priority. Flag can be set multiple times, to add few additional labels.", + }, + } +) + +const ( + influxAddr = "influx-addr" + influxUser = "influx-user" + influxPassword = "influx-password" + influxDB = "influx-database" + influxRetention = "influx-retention-policy" + influxChunkSize = "influx-chunk-size" + influxConcurrency = "influx-concurrency" + influxFilterSeries = "influx-filter-series" + influxFilterTimeStart = "influx-filter-time-start" + influxFilterTimeEnd = "influx-filter-time-end" + influxMeasurementFieldSeparator = "influx-measurement-field-separator" +) + +var ( + influxFlags = []cli.Flag{ + &cli.StringFlag{ + Name: influxAddr, + Value: "http://localhost:8086", + Usage: "Influx server addr", + }, + &cli.StringFlag{ + Name: influxUser, + Usage: "Influx user", + EnvVars: []string{"INFLUX_USERNAME"}, + }, + &cli.StringFlag{ + Name: influxPassword, + Usage: "Influx user password", + EnvVars: []string{"INFLUX_PASSWORD"}, + }, + &cli.StringFlag{ + Name: influxDB, + Usage: "Influx database", + Required: true, + }, + &cli.StringFlag{ + Name: influxRetention, + Usage: "Influx retention policy", + Value: "autogen", + }, + &cli.IntFlag{ + Name: influxChunkSize, + Usage: "The chunkSize defines max amount of series to be returned in one chunk", + Value: 10e3, + }, + &cli.IntFlag{ + Name: influxConcurrency, + Usage: "Number of concurrently running fetch queries to InfluxDB", + Value: 1, + }, + &cli.StringFlag{ + Name: influxFilterSeries, + Usage: "Influx filter expression to select series. E.g. \"from cpu where arch='x86' AND hostname='host_2753'\".\n" + + "See for details https://docs.influxdata.com/influxdb/v1.7/query_language/schema_exploration#show-series", + }, + &cli.StringFlag{ + Name: influxFilterTimeStart, + Usage: "The time filter to select timeseries with timestamp equal or higher than provided value. E.g. '2020-01-01T20:07:00Z'", + }, + &cli.StringFlag{ + Name: influxFilterTimeEnd, + Usage: "The time filter to select timeseries with timestamp equal or lower than provided value. E.g. '2020-01-01T20:07:00Z'", + }, + &cli.StringFlag{ + Name: influxMeasurementFieldSeparator, + Usage: "The {separator} symbol used to concatenate {measurement} and {field} names into series name {measurement}{separator}{field}.", + Value: "_", + }, + } +) + +const ( + promSnapshot = "prom-snapshot" + promConcurrency = "prom-concurrency" + promFilterTimeStart = "prom-filter-time-start" + promFilterTimeEnd = "prom-filter-time-end" + promFilterLabel = "prom-filter-label" + promFilterLabelValue = "prom-filter-label-value" +) + +var ( + promFlags = []cli.Flag{ + &cli.StringFlag{ + Name: promSnapshot, + Usage: "Path to Prometheus snapshot. Pls see for details https://www.robustperception.io/taking-snapshots-of-prometheus-data", + Required: true, + }, + &cli.IntFlag{ + Name: promConcurrency, + Usage: "Number of concurrently running snapshot readers", + Value: 1, + }, + &cli.StringFlag{ + Name: promFilterTimeStart, + Usage: "The time filter in RFC3339 format to select timeseries with timestamp equal or higher than provided value. E.g. '2020-01-01T20:07:00Z'", + }, + &cli.StringFlag{ + Name: promFilterTimeEnd, + Usage: "The time filter in RFC3339 format to select timeseries with timestamp equal or lower than provided value. E.g. '2020-01-01T20:07:00Z'", + }, + &cli.StringFlag{ + Name: promFilterLabel, + Usage: "Prometheus label name to filter timeseries by. E.g. '__name__' will filter timeseries by name.", + }, + &cli.StringFlag{ + Name: promFilterLabelValue, + Usage: fmt.Sprintf("Prometheus regular expression to filter label from %q flag.", promFilterLabel), + Value: ".*", + }, + } +) + +const ( + vmNativeFilterMatch = "vm-native-filter-match" + vmNativeFilterTimeStart = "vm-native-filter-time-start" + vmNativeFilterTimeEnd = "vm-native-filter-time-end" + + vmNativeSrcAddr = "vm-native-src-addr" + vmNativeSrcUser = "vm-native-src-user" + vmNativeSrcPassword = "vm-native-src-password" + + vmNativeDstAddr = "vm-native-dst-addr" + vmNativeDstUser = "vm-native-dst-user" + vmNativeDstPassword = "vm-native-dst-password" +) + +var ( + vmNativeFlags = []cli.Flag{ + &cli.StringFlag{ + Name: vmNativeFilterMatch, + Usage: "Time series selector to match series for export. For example, select {instance!=\"localhost\"} will " + + "match all series with \"instance\" label different to \"localhost\".\n" + + " See more details here https://github.com/VictoriaMetrics/VictoriaMetrics#how-to-export-data-in-native-format", + Value: `{__name__!=""}`, + }, + &cli.StringFlag{ + Name: vmNativeFilterTimeStart, + Usage: "The time filter may contain either unix timestamp in seconds or RFC3339 values. E.g. '2020-01-01T20:07:00Z'", + }, + &cli.StringFlag{ + Name: vmNativeFilterTimeEnd, + Usage: "The time filter may contain either unix timestamp in seconds or RFC3339 values. E.g. '2020-01-01T20:07:00Z'", + }, + &cli.StringFlag{ + Name: vmNativeSrcAddr, + Usage: "VictoriaMetrics address to perform export from. \n" + + " Should be the same as --httpListenAddr value for single-node version or VMSelect component." + + " If exporting from cluster version - include the tenet token in address.", + Required: true, + }, + &cli.StringFlag{ + Name: vmNativeSrcUser, + Usage: "VictoriaMetrics username for basic auth", + EnvVars: []string{"VM_NATIVE_SRC_USERNAME"}, + }, + &cli.StringFlag{ + Name: vmNativeSrcPassword, + Usage: "VictoriaMetrics password for basic auth", + EnvVars: []string{"VM_NATIVE_SRC_PASSWORD"}, + }, + &cli.StringFlag{ + Name: vmNativeDstAddr, + Usage: "VictoriaMetrics address to perform import to. \n" + + " Should be the same as --httpListenAddr value for single-node version or VMInsert component." + + " If importing into cluster version - include the tenet token in address.", + Required: true, + }, + &cli.StringFlag{ + Name: vmNativeDstUser, + Usage: "VictoriaMetrics username for basic auth", + EnvVars: []string{"VM_NATIVE_DST_USERNAME"}, + }, + &cli.StringFlag{ + Name: vmNativeDstPassword, + Usage: "VictoriaMetrics password for basic auth", + EnvVars: []string{"VM_NATIVE_DST_PASSWORD"}, + }, + &cli.StringSliceFlag{ + Name: vmExtraLabel, + Value: nil, + Usage: "Extra labels, that will be added to imported timeseries. In case of collision, label value defined by flag" + + "will have priority. Flag can be set multiple times, to add few additional labels.", + }, + } +) + +func mergeFlags(flags ...[]cli.Flag) []cli.Flag { + var result []cli.Flag + for _, f := range flags { + result = append(result, f...) + } + return result +} diff --git a/app/vmctl/influx.go b/app/vmctl/influx.go new file mode 100644 index 000000000..032f1f639 --- /dev/null +++ b/app/vmctl/influx.go @@ -0,0 +1,144 @@ +package main + +import ( + "fmt" + "io" + "log" + "sync" + + "github.com/cheggaaa/pb/v3" + "github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/influx" + "github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/vm" +) + +type influxProcessor struct { + ic *influx.Client + im *vm.Importer + cc int + separator string +} + +func newInfluxProcessor(ic *influx.Client, im *vm.Importer, cc int, separator string) *influxProcessor { + if cc < 1 { + cc = 1 + } + return &influxProcessor{ + ic: ic, + im: im, + cc: cc, + separator: separator, + } +} + +func (ip *influxProcessor) run(silent bool) error { + series, err := ip.ic.Explore() + if err != nil { + return fmt.Errorf("explore query failed: %s", err) + } + if len(series) < 1 { + return fmt.Errorf("found no timeseries to import") + } + + question := fmt.Sprintf("Found %d timeseries to import. Continue?", len(series)) + if !silent && !prompt(question) { + return nil + } + + bar := pb.StartNew(len(series)) + seriesCh := make(chan *influx.Series) + errCh := make(chan error) + ip.im.ResetStats() + + var wg sync.WaitGroup + wg.Add(ip.cc) + for i := 0; i < ip.cc; i++ { + go func() { + defer wg.Done() + for s := range seriesCh { + if err := ip.do(s); err != nil { + errCh <- fmt.Errorf("request failed for %q.%q: %s", s.Measurement, s.Field, err) + return + } + bar.Increment() + } + }() + } + + // any error breaks the import + for _, s := range series { + select { + case infErr := <-errCh: + return fmt.Errorf("influx error: %s", infErr) + case vmErr := <-ip.im.Errors(): + return fmt.Errorf("Import process failed: \n%s", wrapErr(vmErr)) + case seriesCh <- s: + } + } + + close(seriesCh) + wg.Wait() + ip.im.Close() + // drain import errors channel + for vmErr := range ip.im.Errors() { + return fmt.Errorf("Import process failed: \n%s", wrapErr(vmErr)) + } + bar.Finish() + log.Println("Import finished!") + log.Print(ip.im.Stats()) + return nil +} + +const dbLabel = "db" + +func (ip *influxProcessor) do(s *influx.Series) error { + cr, err := ip.ic.FetchDataPoints(s) + if err != nil { + return fmt.Errorf("failed to fetch datapoints: %s", err) + } + defer cr.Close() + var name string + if s.Measurement != "" { + name = fmt.Sprintf("%s%s%s", s.Measurement, ip.separator, s.Field) + } else { + name = s.Field + } + + labels := make([]vm.LabelPair, len(s.LabelPairs)) + var containsDBLabel bool + for i, lp := range s.LabelPairs { + if lp.Name == dbLabel { + containsDBLabel = true + break + } + labels[i] = vm.LabelPair{ + Name: lp.Name, + Value: lp.Value, + } + } + if !containsDBLabel { + labels = append(labels, vm.LabelPair{ + Name: dbLabel, + Value: ip.ic.Database(), + }) + } + + for { + time, values, err := cr.Next() + if err != nil { + if err == io.EOF { + return nil + } + return err + } + // skip empty results + if len(time) < 1 { + continue + } + ip.im.Input() <- &vm.TimeSeries{ + Name: name, + LabelPairs: labels, + Timestamps: time, + Values: values, + } + } +} diff --git a/app/vmctl/influx/influx.go b/app/vmctl/influx/influx.go new file mode 100644 index 000000000..9e68b741a --- /dev/null +++ b/app/vmctl/influx/influx.go @@ -0,0 +1,360 @@ +package influx + +import ( + "fmt" + "io" + "log" + "strings" + "time" + + influx "github.com/influxdata/influxdb/client/v2" +) + +// Client represents a wrapper over +// influx HTTP client +type Client struct { + influx.Client + + database string + retention string + chunkSize int + + filterSeries string + filterTime string +} + +// Config contains fields required +// for Client configuration +type Config struct { + Addr string + Username string + Password string + Database string + Retention string + ChunkSize int + + Filter Filter +} + +// Filter contains configuration for filtering +// the timeseries +type Filter struct { + Series string + TimeStart string + TimeEnd string +} + +// Series holds the time series +type Series struct { + Measurement string + Field string + LabelPairs []LabelPair +} + +var valueEscaper = strings.NewReplacer(`\`, `\\`, `'`, `\'`) + +func (s Series) fetchQuery(timeFilter string) string { + f := &strings.Builder{} + fmt.Fprintf(f, "select %q from %q", s.Field, s.Measurement) + if len(s.LabelPairs) > 0 || len(timeFilter) > 0 { + f.WriteString(" where") + } + for i, pair := range s.LabelPairs { + pairV := valueEscaper.Replace(pair.Value) + fmt.Fprintf(f, " %q='%s'", pair.Name, pairV) + if i != len(s.LabelPairs)-1 { + f.WriteString(" and") + } + } + if len(timeFilter) > 0 { + if len(s.LabelPairs) > 0 { + f.WriteString(" and") + } + fmt.Fprintf(f, " %s", timeFilter) + } + return f.String() +} + +// LabelPair is the key-value record +// of time series label +type LabelPair struct { + Name string + Value string +} + +// NewClient creates and returns influx client +// configured with passed Config +func NewClient(cfg Config) (*Client, error) { + c := influx.HTTPConfig{ + Addr: cfg.Addr, + Username: cfg.Username, + Password: cfg.Password, + InsecureSkipVerify: true, + } + hc, err := influx.NewHTTPClient(c) + if err != nil { + return nil, fmt.Errorf("failed to establish conn: %s", err) + } + if _, _, err := hc.Ping(time.Second); err != nil { + return nil, fmt.Errorf("ping failed: %s", err) + } + + chunkSize := cfg.ChunkSize + if chunkSize < 1 { + chunkSize = 10e3 + } + + client := &Client{ + Client: hc, + database: cfg.Database, + retention: cfg.Retention, + chunkSize: chunkSize, + filterTime: timeFilter(cfg.Filter.TimeStart, cfg.Filter.TimeEnd), + filterSeries: cfg.Filter.Series, + } + return client, nil +} + +func (c Client) Database() string { + return c.database +} + +func timeFilter(start, end string) string { + if start == "" && end == "" { + return "" + } + var tf string + if start != "" { + tf = fmt.Sprintf("time >= '%s'", start) + } + if end != "" { + if tf != "" { + tf += " and " + } + tf += fmt.Sprintf("time <= '%s'", end) + } + return tf +} + +// Explore checks the existing data schema in influx +// by checking available fields and series, +// which unique combination represents all possible +// time series existing in database. +// The explore required to reduce the load on influx +// by querying field of the exact time series at once, +// instead of fetching all of the values over and over. +// +// May contain non-existing time series. +func (c *Client) Explore() ([]*Series, error) { + log.Printf("Exploring scheme for database %q", c.database) + mFields, err := c.fieldsByMeasurement() + if err != nil { + return nil, fmt.Errorf("failed to get field keys: %s", err) + } + + series, err := c.getSeries() + if err != nil { + return nil, fmt.Errorf("failed to get series: %s", err) + } + + var iSeries []*Series + for _, s := range series { + fields, ok := mFields[s.Measurement] + if !ok { + return nil, fmt.Errorf("can't find field keys for measurement %q", s.Measurement) + } + for _, field := range fields { + is := &Series{ + Measurement: s.Measurement, + Field: field, + LabelPairs: s.LabelPairs, + } + iSeries = append(iSeries, is) + } + } + return iSeries, nil +} + +// ChunkedResponse is a wrapper over influx.ChunkedResponse. +// Used for better memory usage control while iterating +// over huge time series. +type ChunkedResponse struct { + cr *influx.ChunkedResponse + iq influx.Query + field string +} + +func (cr *ChunkedResponse) Close() error { + return cr.cr.Close() +} + +// Next reads the next part/chunk of time series. +// Returns io.EOF when time series was read entirely. +func (cr *ChunkedResponse) Next() ([]int64, []float64, error) { + resp, err := cr.cr.NextResponse() + if err != nil { + return nil, nil, err + } + if resp.Error() != nil { + return nil, nil, fmt.Errorf("response error for %s: %s", cr.iq.Command, resp.Error()) + } + if len(resp.Results) != 1 { + return nil, nil, fmt.Errorf("unexpected number of results in response: %d", len(resp.Results)) + } + results, err := parseResult(resp.Results[0]) + if err != nil { + return nil, nil, err + } + if len(results) < 1 { + return nil, nil, nil + } + r := results[0] + + const key = "time" + timestamps, ok := r.values[key] + if !ok { + return nil, nil, fmt.Errorf("response doesn't contain field %q", key) + } + + fieldValues, ok := r.values[cr.field] + if !ok { + return nil, nil, fmt.Errorf("response doesn't contain filed %q", cr.field) + } + values := make([]float64, len(fieldValues)) + for i, fv := range fieldValues { + v, err := toFloat64(fv) + if err != nil { + return nil, nil, fmt.Errorf("failed to convert value %q.%v to float64: %s", + cr.field, v, err) + } + values[i] = v + } + + ts := make([]int64, len(results[0].values[key])) + for i, v := range timestamps { + t, err := parseDate(v.(string)) + if err != nil { + return nil, nil, err + } + ts[i] = t + } + return ts, values, nil +} + +// FetchDataPoints performs SELECT request to fetch +// datapoints for particular field. +func (c *Client) FetchDataPoints(s *Series) (*ChunkedResponse, error) { + iq := influx.Query{ + Command: s.fetchQuery(c.filterTime), + Database: c.database, + RetentionPolicy: c.retention, + Chunked: true, + ChunkSize: 1e4, + } + cr, err := c.QueryAsChunk(iq) + if err != nil { + return nil, fmt.Errorf("query %q err: %s", iq.Command, err) + } + return &ChunkedResponse{cr, iq, s.Field}, nil +} + +func (c *Client) fieldsByMeasurement() (map[string][]string, error) { + q := influx.Query{ + Command: "show field keys", + Database: c.database, + RetentionPolicy: c.retention, + } + log.Printf("fetching fields: %s", stringify(q)) + qValues, err := c.do(q) + if err != nil { + return nil, fmt.Errorf("error while executing query %q: %s", q.Command, err) + } + + var total int + var skipped int + const fKey = "fieldKey" + const fType = "fieldType" + result := make(map[string][]string, len(qValues)) + for _, qv := range qValues { + types := qv.values[fType] + fields := qv.values[fKey] + values := make([]string, 0) + for key, field := range fields { + if types[key].(string) == "string" { + skipped++ + continue + } + values = append(values, field.(string)) + total++ + } + result[qv.name] = values + } + + if skipped > 0 { + log.Printf("found %d fields; skipped %d non-numeric fields", total, skipped) + } else { + log.Printf("found %d fields", total) + } + return result, nil +} + +func (c *Client) getSeries() ([]*Series, error) { + com := "show series" + if c.filterSeries != "" { + com = fmt.Sprintf("%s %s", com, c.filterSeries) + } + q := influx.Query{ + Command: com, + Database: c.database, + RetentionPolicy: c.retention, + Chunked: true, + ChunkSize: c.chunkSize, + } + + log.Printf("fetching series: %s", stringify(q)) + cr, err := c.QueryAsChunk(q) + if err != nil { + return nil, fmt.Errorf("error while executing query %q: %s", q.Command, err) + } + + const key = "key" + var result []*Series + for { + resp, err := cr.NextResponse() + if err != nil { + if err == io.EOF { + break + } + return nil, err + } + if resp.Error() != nil { + return nil, fmt.Errorf("response error for query %q: %s", q.Command, resp.Error()) + } + qValues, err := parseResult(resp.Results[0]) + if err != nil { + return nil, err + } + for _, qv := range qValues { + for _, v := range qv.values[key] { + s := &Series{} + if err := s.unmarshal(v.(string)); err != nil { + return nil, err + } + result = append(result, s) + } + } + } + log.Printf("found %d series", len(result)) + return result, nil +} + +func (c *Client) do(q influx.Query) ([]queryValues, error) { + res, err := c.Query(q) + if err != nil { + return nil, fmt.Errorf("query %q err: %s", q.Command, err) + } + if len(res.Results) < 1 { + return nil, fmt.Errorf("exploration query %q returned 0 results", q.Command) + } + return parseResult(res.Results[0]) +} diff --git a/app/vmctl/influx/influx_test.go b/app/vmctl/influx/influx_test.go new file mode 100644 index 000000000..c92b0f5e0 --- /dev/null +++ b/app/vmctl/influx/influx_test.go @@ -0,0 +1,127 @@ +package influx + +import "testing" + +func TestFetchQuery(t *testing.T) { + testCases := []struct { + s Series + timeFilter string + expected string + }{ + { + s: Series{ + Measurement: "cpu", + Field: "value", + LabelPairs: []LabelPair{ + { + Name: "foo", + Value: "bar", + }, + }, + }, + expected: `select "value" from "cpu" where "foo"='bar'`, + }, + { + s: Series{ + Measurement: "cpu", + Field: "value", + LabelPairs: []LabelPair{ + { + Name: "foo", + Value: "bar", + }, + { + Name: "baz", + Value: "qux", + }, + }, + }, + expected: `select "value" from "cpu" where "foo"='bar' and "baz"='qux'`, + }, + { + s: Series{ + Measurement: "cpu", + Field: "value", + LabelPairs: []LabelPair{ + { + Name: "foo", + Value: "b'ar", + }, + }, + }, + timeFilter: "time >= now()", + expected: `select "value" from "cpu" where "foo"='b\'ar' and time >= now()`, + }, + { + s: Series{ + Measurement: "cpu", + Field: "value", + LabelPairs: []LabelPair{ + { + Name: "name", + Value: `dev-mapper-centos\x2dswap.swap`, + }, + { + Name: "state", + Value: "dev-mapp'er-c'en'tos", + }, + }, + }, + timeFilter: "time >= now()", + expected: `select "value" from "cpu" where "name"='dev-mapper-centos\\x2dswap.swap' and "state"='dev-mapp\'er-c\'en\'tos' and time >= now()`, + }, + { + s: Series{ + Measurement: "cpu", + Field: "value", + }, + timeFilter: "time >= now()", + expected: `select "value" from "cpu" where time >= now()`, + }, + { + s: Series{ + Measurement: "cpu", + Field: "value", + }, + expected: `select "value" from "cpu"`, + }, + } + + for _, tc := range testCases { + query := tc.s.fetchQuery(tc.timeFilter) + if query != tc.expected { + t.Fatalf("got: \n%s;\nexpected: \n%s", query, tc.expected) + } + } +} + +func TestTimeFilter(t *testing.T) { + testCases := []struct { + start string + end string + expected string + }{ + { + start: "2020-01-01T20:07:00Z", + end: "2020-01-01T21:07:00Z", + expected: "time >= '2020-01-01T20:07:00Z' and time <= '2020-01-01T21:07:00Z'", + }, + { + expected: "", + }, + { + start: "2020-01-01T20:07:00Z", + expected: "time >= '2020-01-01T20:07:00Z'", + }, + { + end: "2020-01-01T21:07:00Z", + expected: "time <= '2020-01-01T21:07:00Z'", + }, + } + for _, tc := range testCases { + f := timeFilter(tc.start, tc.end) + if f != tc.expected { + t.Fatalf("got: \n%q;\nexpected: \n%q", f, tc.expected) + } + } +} diff --git a/app/vmctl/influx/parser.go b/app/vmctl/influx/parser.go new file mode 100644 index 000000000..e8b425f42 --- /dev/null +++ b/app/vmctl/influx/parser.go @@ -0,0 +1,191 @@ +package influx + +import ( + "encoding/json" + "fmt" + "strconv" + "strings" + "time" + + influx "github.com/influxdata/influxdb/client/v2" +) + +type queryValues struct { + name string + values map[string][]interface{} +} + +func parseResult(r influx.Result) ([]queryValues, error) { + if len(r.Err) > 0 { + return nil, fmt.Errorf("result error: %s", r.Err) + } + qValues := make([]queryValues, len(r.Series)) + for i, row := range r.Series { + values := make(map[string][]interface{}, len(row.Values)) + for _, value := range row.Values { + for idx, v := range value { + key := row.Columns[idx] + values[key] = append(values[key], v) + } + } + qValues[i] = queryValues{ + name: row.Name, + values: values, + } + } + return qValues, nil +} + +func toFloat64(v interface{}) (float64, error) { + switch i := v.(type) { + case json.Number: + return i.Float64() + case float64: + return i, nil + case float32: + return float64(i), nil + case int64: + return float64(i), nil + case int32: + return float64(i), nil + case int: + return float64(i), nil + case uint64: + return float64(i), nil + case uint32: + return float64(i), nil + case uint: + return float64(i), nil + case string: + return strconv.ParseFloat(i, 64) + default: + return 0, fmt.Errorf("unexpected value type %v", i) + } +} + +func parseDate(dateStr string) (int64, error) { + startTime, err := time.Parse(time.RFC3339, dateStr) + if err != nil { + return 0, fmt.Errorf("cannot parse %q: %s", dateStr, err) + } + return startTime.UnixNano() / 1e6, nil +} + +func stringify(q influx.Query) string { + return fmt.Sprintf("command: %q; database: %q; retention: %q", + q.Command, q.Database, q.RetentionPolicy) +} + +func (s *Series) unmarshal(v string) error { + noEscapeChars := strings.IndexByte(v, '\\') < 0 + n := nextUnescapedChar(v, ',', noEscapeChars) + if n < 0 { + s.Measurement = unescapeTagValue(v, noEscapeChars) + return nil + } + s.Measurement = unescapeTagValue(v[:n], noEscapeChars) + var err error + s.LabelPairs, err = unmarshalTags(v[n+1:], noEscapeChars) + if err != nil { + return fmt.Errorf("failed to unmarhsal tags: %s", err) + } + return nil +} + +func unmarshalTags(s string, noEscapeChars bool) ([]LabelPair, error) { + var result []LabelPair + for { + lp := LabelPair{} + n := nextUnescapedChar(s, ',', noEscapeChars) + if n < 0 { + if err := lp.unmarshal(s, noEscapeChars); err != nil { + return nil, err + } + if len(lp.Name) == 0 || len(lp.Value) == 0 { + return nil, nil + } + result = append(result, lp) + return result, nil + } + if err := lp.unmarshal(s[:n], noEscapeChars); err != nil { + return nil, err + } + s = s[n+1:] + if len(lp.Name) == 0 || len(lp.Value) == 0 { + continue + } + result = append(result, lp) + } +} + +func (lp *LabelPair) unmarshal(s string, noEscapeChars bool) error { + n := nextUnescapedChar(s, '=', noEscapeChars) + if n < 0 { + return fmt.Errorf("missing tag value for %q", s) + } + lp.Name = unescapeTagValue(s[:n], noEscapeChars) + lp.Value = unescapeTagValue(s[n+1:], noEscapeChars) + return nil +} + +func unescapeTagValue(s string, noEscapeChars bool) string { + if noEscapeChars { + // Fast path - no escape chars. + return s + } + n := strings.IndexByte(s, '\\') + if n < 0 { + return s + } + + // Slow path. Remove escape chars. + dst := make([]byte, 0, len(s)) + for { + dst = append(dst, s[:n]...) + s = s[n+1:] + if len(s) == 0 { + return string(append(dst, '\\')) + } + ch := s[0] + if ch != ' ' && ch != ',' && ch != '=' && ch != '\\' { + dst = append(dst, '\\') + } + dst = append(dst, ch) + s = s[1:] + n = strings.IndexByte(s, '\\') + if n < 0 { + return string(append(dst, s...)) + } + } +} + +func nextUnescapedChar(s string, ch byte, noEscapeChars bool) int { + if noEscapeChars { + // Fast path: just search for ch in s, since s has no escape chars. + return strings.IndexByte(s, ch) + } + + sOrig := s +again: + n := strings.IndexByte(s, ch) + if n < 0 { + return -1 + } + if n == 0 { + return len(sOrig) - len(s) + n + } + if s[n-1] != '\\' { + return len(sOrig) - len(s) + n + } + nOrig := n + slashes := 0 + for n > 0 && s[n-1] == '\\' { + slashes++ + n-- + } + if slashes&1 == 0 { + return len(sOrig) - len(s) + nOrig + } + s = s[nOrig+1:] + goto again +} diff --git a/app/vmctl/influx/parser_test.go b/app/vmctl/influx/parser_test.go new file mode 100644 index 000000000..15ce9adaa --- /dev/null +++ b/app/vmctl/influx/parser_test.go @@ -0,0 +1,60 @@ +package influx + +import ( + "reflect" + "testing" +) + +func TestSeries_Unmarshal(t *testing.T) { + tag := func(name, value string) LabelPair { + return LabelPair{ + Name: name, + Value: value, + } + } + series := func(measurement string, lp ...LabelPair) Series { + return Series{ + Measurement: measurement, + LabelPairs: lp, + } + } + testCases := []struct { + got string + want Series + }{ + { + got: "cpu", + want: series("cpu"), + }, + { + got: "cpu,host=localhost", + want: series("cpu", tag("host", "localhost")), + }, + { + got: "cpu,host=localhost,instance=instance", + want: series("cpu", tag("host", "localhost"), tag("instance", "instance")), + }, + { + got: `fo\,bar\=baz,x\=\b=\\a\,\=\q\ `, + want: series("fo,bar=baz", tag(`x=\b`, `\a,=\q `)), + }, + { + got: "cpu,host=192.168.0.1,instance=fe80::fdc8:5e36:c2c6:baac%utun1", + want: series("cpu", tag("host", "192.168.0.1"), tag("instance", "fe80::fdc8:5e36:c2c6:baac%utun1")), + }, + { + got: `cpu,db=db1,host=localhost,server=host\=localhost\ user\=user\ `, + want: series("cpu", tag("db", "db1"), + tag("host", "localhost"), tag("server", "host=localhost user=user ")), + }, + } + for _, tc := range testCases { + s := Series{} + if err := s.unmarshal(tc.got); err != nil { + t.Fatalf("%q: unmarshal err: %s", tc.got, err) + } + if !reflect.DeepEqual(s, tc.want) { + t.Fatalf("%q: expected\n%#v\nto be equal\n%#v", tc.got, s, tc.want) + } + } +} diff --git a/app/vmctl/main.go b/app/vmctl/main.go new file mode 100644 index 000000000..d11aae8cc --- /dev/null +++ b/app/vmctl/main.go @@ -0,0 +1,158 @@ +package main + +import ( + "fmt" + "log" + "os" + "os/signal" + "strings" + "syscall" + "time" + + "github.com/urfave/cli/v2" + "github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/influx" + "github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/prometheus" + "github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/vm" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/buildinfo" +) + +func main() { + start := time.Now() + app := &cli.App{ + Name: "vmctl", + Usage: "Victoria metrics command-line tool", + Version: buildinfo.Version, + Commands: []*cli.Command{ + { + Name: "influx", + Usage: "Migrate timeseries from InfluxDB", + Flags: mergeFlags(globalFlags, influxFlags, vmFlags), + Action: func(c *cli.Context) error { + fmt.Println("InfluxDB import mode") + + iCfg := influx.Config{ + Addr: c.String(influxAddr), + Username: c.String(influxUser), + Password: c.String(influxPassword), + Database: c.String(influxDB), + Retention: c.String(influxRetention), + Filter: influx.Filter{ + Series: c.String(influxFilterSeries), + TimeStart: c.String(influxFilterTimeStart), + TimeEnd: c.String(influxFilterTimeEnd), + }, + ChunkSize: c.Int(influxChunkSize), + } + influxClient, err := influx.NewClient(iCfg) + if err != nil { + return fmt.Errorf("failed to create influx client: %s", err) + } + + vmCfg := initConfigVM(c) + importer, err := vm.NewImporter(vmCfg) + if err != nil { + return fmt.Errorf("failed to create VM importer: %s", err) + } + + processor := newInfluxProcessor(influxClient, importer, + c.Int(influxConcurrency), c.String(influxMeasurementFieldSeparator)) + return processor.run(c.Bool(globalSilent)) + }, + }, + { + Name: "prometheus", + Usage: "Migrate timeseries from Prometheus", + Flags: mergeFlags(globalFlags, promFlags, vmFlags), + Action: func(c *cli.Context) error { + fmt.Println("Prometheus import mode") + + vmCfg := initConfigVM(c) + importer, err := vm.NewImporter(vmCfg) + if err != nil { + return fmt.Errorf("failed to create VM importer: %s", err) + } + + promCfg := prometheus.Config{ + Snapshot: c.String(promSnapshot), + Filter: prometheus.Filter{ + TimeMin: c.String(promFilterTimeStart), + TimeMax: c.String(promFilterTimeEnd), + Label: c.String(promFilterLabel), + LabelValue: c.String(promFilterLabelValue), + }, + } + cl, err := prometheus.NewClient(promCfg) + if err != nil { + return fmt.Errorf("failed to create prometheus client: %s", err) + } + pp := prometheusProcessor{ + cl: cl, + im: importer, + cc: c.Int(promConcurrency), + } + return pp.run(c.Bool(globalSilent)) + }, + }, + { + Name: "vm-native", + Usage: "Migrate time series between VictoriaMetrics installations via native binary format", + Flags: vmNativeFlags, + Action: func(c *cli.Context) error { + fmt.Println("VictoriaMetrics Native import mode") + + if c.String(vmNativeFilterMatch) == "" { + return fmt.Errorf("flag %q can't be empty", vmNativeFilterMatch) + } + + p := vmNativeProcessor{ + filter: filter{ + match: c.String(vmNativeFilterMatch), + timeStart: c.String(vmNativeFilterTimeStart), + timeEnd: c.String(vmNativeFilterTimeEnd), + }, + src: &vmNativeClient{ + addr: strings.Trim(c.String(vmNativeSrcAddr), "/"), + user: c.String(vmNativeSrcUser), + password: c.String(vmNativeSrcPassword), + }, + dst: &vmNativeClient{ + addr: strings.Trim(c.String(vmNativeDstAddr), "/"), + user: c.String(vmNativeDstUser), + password: c.String(vmNativeDstPassword), + extraLabels: c.StringSlice(vmExtraLabel), + }, + } + return p.run() + }, + }, + }, + } + + c := make(chan os.Signal, 1) + signal.Notify(c, syscall.SIGINT, syscall.SIGTERM) + go func() { + <-c + fmt.Println("\r- Execution cancelled") + os.Exit(0) + }() + + err := app.Run(os.Args) + if err != nil { + log.Fatal(err) + } + log.Printf("Total time: %v", time.Since(start)) +} + +func initConfigVM(c *cli.Context) vm.Config { + return vm.Config{ + Addr: c.String(vmAddr), + User: c.String(vmUser), + Password: c.String(vmPassword), + Concurrency: uint8(c.Int(vmConcurrency)), + Compress: c.Bool(vmCompress), + AccountID: c.String(vmAccountID), + BatchSize: c.Int(vmBatchSize), + SignificantFigures: c.Int(vmSignificantFigures), + ExtraLabels: c.StringSlice(vmExtraLabel), + } +} diff --git a/app/vmctl/prometheus.go b/app/vmctl/prometheus.go new file mode 100644 index 000000000..a6c4a11ae --- /dev/null +++ b/app/vmctl/prometheus.go @@ -0,0 +1,131 @@ +package main + +import ( + "fmt" + "log" + "sync" + + "github.com/cheggaaa/pb/v3" + "github.com/prometheus/prometheus/tsdb" + "github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/prometheus" + "github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/vm" +) + +type prometheusProcessor struct { + // prometheus client fetches and reads + // snapshot blocks + cl *prometheus.Client + // importer performs import requests + // for timeseries data returned from + // snapshot blocks + im *vm.Importer + // cc stands for concurrency + // and defines number of concurrently + // running snapshot block readers + cc int +} + +func (pp *prometheusProcessor) run(silent bool) error { + blocks, err := pp.cl.Explore() + if err != nil { + return fmt.Errorf("explore failed: %s", err) + } + if len(blocks) < 1 { + return fmt.Errorf("found no blocks to import") + } + question := fmt.Sprintf("Found %d blocks to import. Continue?", len(blocks)) + if !silent && !prompt(question) { + return nil + } + + bar := pb.StartNew(len(blocks)) + blockReadersCh := make(chan tsdb.BlockReader) + errCh := make(chan error, pp.cc) + pp.im.ResetStats() + + var wg sync.WaitGroup + wg.Add(pp.cc) + for i := 0; i < pp.cc; i++ { + go func() { + defer wg.Done() + for br := range blockReadersCh { + if err := pp.do(br); err != nil { + errCh <- fmt.Errorf("read failed for block %q: %s", br.Meta().ULID, err) + return + } + bar.Increment() + } + }() + } + + // any error breaks the import + for _, br := range blocks { + select { + case promErr := <-errCh: + close(blockReadersCh) + return fmt.Errorf("prometheus error: %s", promErr) + case vmErr := <-pp.im.Errors(): + close(blockReadersCh) + return fmt.Errorf("Import process failed: \n%s", wrapErr(vmErr)) + case blockReadersCh <- br: + } + } + + close(blockReadersCh) + wg.Wait() + // wait for all buffers to flush + pp.im.Close() + // drain import errors channel + for vmErr := range pp.im.Errors() { + return fmt.Errorf("Import process failed: \n%s", wrapErr(vmErr)) + } + bar.Finish() + log.Println("Import finished!") + log.Print(pp.im.Stats()) + return nil +} + +func (pp *prometheusProcessor) do(b tsdb.BlockReader) error { + ss, err := pp.cl.Read(b) + if err != nil { + return fmt.Errorf("failed to read block: %s", err) + } + for ss.Next() { + var name string + var labels []vm.LabelPair + series := ss.At() + + for _, label := range series.Labels() { + if label.Name == "__name__" { + name = label.Value + continue + } + labels = append(labels, vm.LabelPair{ + Name: label.Name, + Value: label.Value, + }) + } + if name == "" { + return fmt.Errorf("failed to find `__name__` label in labelset for block %v", b.Meta().ULID) + } + + var timestamps []int64 + var values []float64 + it := series.Iterator() + for it.Next() { + t, v := it.At() + timestamps = append(timestamps, t) + values = append(values, v) + } + if err := it.Err(); err != nil { + return err + } + pp.im.Input() <- &vm.TimeSeries{ + Name: name, + LabelPairs: labels, + Timestamps: timestamps, + Values: values, + } + } + return ss.Err() +} diff --git a/app/vmctl/prometheus/prometheus.go b/app/vmctl/prometheus/prometheus.go new file mode 100644 index 000000000..fad37e4c6 --- /dev/null +++ b/app/vmctl/prometheus/prometheus.go @@ -0,0 +1,147 @@ +package prometheus + +import ( + "fmt" + "time" + + "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/tsdb" +) + +// Config contains a list of params needed +// for reading Prometheus snapshots +type Config struct { + // Path to snapshot directory + Snapshot string + + Filter Filter +} + +// Filter contains configuration for filtering +// the timeseries +type Filter struct { + TimeMin string + TimeMax string + Label string + LabelValue string +} + +// Clinet is a wrapper over Prometheus tsdb.DBReader +type Client struct { + *tsdb.DBReadOnly + filter filter +} + +type filter struct { + min, max int64 + label string + labelValue string +} + +func (f filter) inRange(min, max int64) bool { + fmin, fmax := f.min, f.max + if min == 0 { + fmin = min + } + if fmax == 0 { + fmax = max + } + return min <= fmax && fmin <= max +} + +// NewClient creates and validates new Client +// with given Config +func NewClient(cfg Config) (*Client, error) { + db, err := tsdb.OpenDBReadOnly(cfg.Snapshot, nil) + if err != nil { + return nil, fmt.Errorf("failed to open snapshot %q: %s", cfg.Snapshot, err) + } + c := &Client{DBReadOnly: db} + min, max, err := parseTime(cfg.Filter.TimeMin, cfg.Filter.TimeMax) + if err != nil { + return nil, fmt.Errorf("failed to parse time in filter: %s", err) + } + c.filter = filter{ + min: min, + max: max, + label: cfg.Filter.Label, + labelValue: cfg.Filter.LabelValue, + } + return c, nil +} + +// Explore fetches all available blocks from a snapshot +// and collects the Meta() data from each block. +// Explore does initial filtering by time-range +// for snapshot blocks but does not take into account +// label filters. +func (c *Client) Explore() ([]tsdb.BlockReader, error) { + blocks, err := c.Blocks() + if err != nil { + return nil, fmt.Errorf("failed to fetch blocks: %s", err) + } + s := &Stats{ + Filtered: c.filter.min != 0 || c.filter.max != 0 || c.filter.label != "", + Blocks: len(blocks), + } + var blocksToImport []tsdb.BlockReader + for _, block := range blocks { + meta := block.Meta() + if !c.filter.inRange(meta.MinTime, meta.MaxTime) { + s.SkippedBlocks++ + continue + } + if s.MinTime == 0 || meta.MinTime < s.MinTime { + s.MinTime = meta.MinTime + } + if s.MaxTime == 0 || meta.MaxTime > s.MaxTime { + s.MaxTime = meta.MaxTime + } + s.Samples += meta.Stats.NumSamples + s.Series += meta.Stats.NumSeries + blocksToImport = append(blocksToImport, block) + } + fmt.Println(s) + return blocksToImport, nil +} + +// Read reads the given BlockReader according to configured +// time and label filters. +func (c *Client) Read(block tsdb.BlockReader) (storage.SeriesSet, error) { + minTime, maxTime := block.Meta().MinTime, block.Meta().MaxTime + if c.filter.min != 0 { + minTime = c.filter.min + } + if c.filter.max != 0 { + maxTime = c.filter.max + } + q, err := tsdb.NewBlockQuerier(block, minTime, maxTime) + if err != nil { + return nil, err + } + ss := q.Select(false, nil, labels.MustNewMatcher(labels.MatchRegexp, c.filter.label, c.filter.labelValue)) + return ss, nil +} + +func parseTime(start, end string) (int64, int64, error) { + var s, e int64 + if start == "" && end == "" { + return 0, 0, nil + } + if start != "" { + v, err := time.Parse(time.RFC3339, start) + if err != nil { + return 0, 0, fmt.Errorf("failed to parse %q: %s", start, err) + } + s = v.UnixNano() / int64(time.Millisecond) + } + if end != "" { + v, err := time.Parse(time.RFC3339, end) + if err != nil { + return 0, 0, fmt.Errorf("failed to parse %q: %s", end, err) + } + e = v.UnixNano() / int64(time.Millisecond) + } + return s, e, nil +} diff --git a/app/vmctl/prometheus/prometheus_test.go b/app/vmctl/prometheus/prometheus_test.go new file mode 100644 index 000000000..6fd738f4e --- /dev/null +++ b/app/vmctl/prometheus/prometheus_test.go @@ -0,0 +1,34 @@ +package prometheus + +import ( + "testing" +) + +func TestInRange(t *testing.T) { + testCases := []struct { + filterMin, filterMax int64 + blockMin, blockMax int64 + expected bool + }{ + {0, 0, 1, 2, true}, + {0, 3, 1, 2, true}, + {0, 3, 4, 5, false}, + {3, 0, 1, 2, false}, + {3, 0, 2, 4, true}, + {3, 10, 1, 2, false}, + {3, 10, 1, 4, true}, + {3, 10, 5, 9, true}, + {3, 10, 9, 12, true}, + {3, 10, 12, 15, false}, + } + for _, tc := range testCases { + f := filter{ + min: tc.filterMin, + max: tc.filterMax, + } + got := f.inRange(tc.blockMin, tc.blockMax) + if got != tc.expected { + t.Fatalf("got %v; expected %v: %v", got, tc.expected, tc) + } + } +} diff --git a/app/vmctl/prometheus/stats.go b/app/vmctl/prometheus/stats.go new file mode 100644 index 000000000..a5d778deb --- /dev/null +++ b/app/vmctl/prometheus/stats.go @@ -0,0 +1,36 @@ +package prometheus + +import ( + "fmt" + "time" +) + +type Stats struct { + Filtered bool + MinTime int64 + MaxTime int64 + Samples uint64 + Series uint64 + Blocks int + SkippedBlocks int +} + +func (s Stats) String() string { + str := fmt.Sprintf("Prometheus snapshot stats:\n"+ + " blocks found: %d;\n"+ + " blocks skipped by time filter: %d;\n"+ + " min time: %d (%v);\n"+ + " max time: %d (%v);\n"+ + " samples: %d;\n"+ + " series: %d.", + s.Blocks, s.SkippedBlocks, + s.MinTime, time.Unix(s.MinTime/1e3, 0).Format(time.RFC3339), + s.MaxTime, time.Unix(s.MaxTime/1e3, 0).Format(time.RFC3339), + s.Samples, s.Series) + + if s.Filtered { + str += "\n* Stats numbers are based on blocks meta info and don't account for applied filters." + } + + return str +} diff --git a/app/vmctl/utils.go b/app/vmctl/utils.go new file mode 100644 index 000000000..678e70635 --- /dev/null +++ b/app/vmctl/utils.go @@ -0,0 +1,33 @@ +package main + +import ( + "bufio" + "fmt" + "os" + "strings" + + "github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/vm" +) + +func prompt(question string) bool { + reader := bufio.NewReader(os.Stdin) + fmt.Print(question, " [Y/n] ") + answer, err := reader.ReadString('\n') + if err != nil { + panic(err) + } + answer = strings.TrimSpace(strings.ToLower(answer)) + if answer == "" || answer == "yes" || answer == "y" { + return true + } + return false +} + +func wrapErr(vmErr *vm.ImportError) error { + var errTS string + for _, ts := range vmErr.Batch { + errTS += fmt.Sprintf("%s for timestamps range %d - %d\n", + ts.String(), ts.Timestamps[0], ts.Timestamps[len(ts.Timestamps)-1]) + } + return fmt.Errorf("%s with error: %s", errTS, vmErr.Err) +} diff --git a/app/vmctl/vm/stats.go b/app/vmctl/vm/stats.go new file mode 100644 index 000000000..dc76905e4 --- /dev/null +++ b/app/vmctl/vm/stats.go @@ -0,0 +1,47 @@ +package vm + +import ( + "fmt" + "sync" + "time" +) + +type stats struct { + sync.Mutex + samples uint64 + bytes uint64 + requests uint64 + retries uint64 + startTime time.Time + idleDuration time.Duration +} + +func (s *stats) String() string { + s.Lock() + defer s.Unlock() + + totalImportDuration := time.Since(s.startTime) + totalImportDurationS := totalImportDuration.Seconds() + var samplesPerS float64 + if s.samples > 0 && totalImportDurationS > 0 { + samplesPerS = float64(s.samples) / totalImportDurationS + } + bytesPerS := byteCountSI(0) + if s.bytes > 0 && totalImportDurationS > 0 { + bytesPerS = byteCountSI(int64(float64(s.bytes) / totalImportDurationS)) + } + + return fmt.Sprintf("VictoriaMetrics importer stats:\n"+ + " idle duration: %v;\n"+ + " time spent while importing: %v;\n"+ + " total samples: %d;\n"+ + " samples/s: %.2f;\n"+ + " total bytes: %s;\n"+ + " bytes/s: %s;\n"+ + " import requests: %d;\n"+ + " import requests retries: %d;", + s.idleDuration, totalImportDuration, + s.samples, samplesPerS, + byteCountSI(int64(s.bytes)), bytesPerS, + s.requests, s.retries) +} diff --git a/app/vmctl/vm/timeseries.go b/app/vmctl/vm/timeseries.go new file mode 100644 index 000000000..7941268ba --- /dev/null +++ b/app/vmctl/vm/timeseries.go @@ -0,0 +1,79 @@ +package vm + +import ( + "fmt" + "io" +) + +type TimeSeries struct { + Name string + LabelPairs []LabelPair + Timestamps []int64 + Values []float64 +} + +type LabelPair struct { + Name string + Value string +} + +func (ts TimeSeries) String() string { + s := ts.Name + if len(ts.LabelPairs) < 1 { + return s + } + var labels string + for i, lp := range ts.LabelPairs { + labels += fmt.Sprintf("%s=%q", lp.Name, lp.Value) + if i < len(ts.LabelPairs)-1 { + labels += "," + } + } + return fmt.Sprintf("%s{%s}", s, labels) +} + +// cWriter used to avoid error checking +// while doing Write calls. +// cWriter caches the first error if any +// and discards all sequential write calls +type cWriter struct { + w io.Writer + n int + err error +} + +func (cw *cWriter) printf(format string, args ...interface{}) { + if cw.err != nil { + return + } + n, err := fmt.Fprintf(cw.w, format, args...) + cw.n += n + cw.err = err +} + +//"{"metric":{"__name__":"cpu_usage_guest","arch":"x64","hostname":"host_19",},"timestamps":[1567296000000,1567296010000],"values":[1567296000000,66]} +func (ts *TimeSeries) write(w io.Writer) (int, error) { + pointsCount := len(ts.Timestamps) + if pointsCount == 0 { + return 0, nil + } + + cw := &cWriter{w: w} + cw.printf(`{"metric":{"__name__":%q`, ts.Name) + if len(ts.LabelPairs) > 0 { + for _, lp := range ts.LabelPairs { + cw.printf(",%q:%q", lp.Name, lp.Value) + } + } + + cw.printf(`},"timestamps":[`) + for i := 0; i < pointsCount-1; i++ { + cw.printf(`%d,`, ts.Timestamps[i]) + } + cw.printf(`%d],"values":[`, ts.Timestamps[pointsCount-1]) + for i := 0; i < pointsCount-1; i++ { + cw.printf(`%v,`, ts.Values[i]) + } + cw.printf("%v]}\n", ts.Values[pointsCount-1]) + return cw.n, cw.err +} diff --git a/app/vmctl/vm/timeseries_test.go b/app/vmctl/vm/timeseries_test.go new file mode 100644 index 000000000..f020df96f --- /dev/null +++ b/app/vmctl/vm/timeseries_test.go @@ -0,0 +1,89 @@ +package vm + +import ( + "bytes" + "math" + "strings" + "testing" +) + +func TestTimeSeries_Write(t *testing.T) { + var testCases = []struct { + name string + ts *TimeSeries + exp string + }{ + { + name: "one datapoint", + ts: &TimeSeries{ + Name: "foo", + LabelPairs: []LabelPair{ + { + Name: "key", + Value: "val", + }, + }, + Timestamps: []int64{1577877162200}, + Values: []float64{1}, + }, + exp: `{"metric":{"__name__":"foo","key":"val"},"timestamps":[1577877162200],"values":[1]}`, + }, + { + name: "multiple samples", + ts: &TimeSeries{ + Name: "foo", + LabelPairs: []LabelPair{ + { + Name: "key", + Value: "val", + }, + }, + Timestamps: []int64{1577877162200, 15778771622400, 15778771622600}, + Values: []float64{1, 1.6263, 32.123}, + }, + exp: `{"metric":{"__name__":"foo","key":"val"},"timestamps":[1577877162200,15778771622400,15778771622600],"values":[1,1.6263,32.123]}`, + }, + { + name: "no samples", + ts: &TimeSeries{ + Name: "foo", + LabelPairs: []LabelPair{ + { + Name: "key", + Value: "val", + }, + }, + }, + exp: ``, + }, + { + name: "inf values", + ts: &TimeSeries{ + Name: "foo", + LabelPairs: []LabelPair{ + { + Name: "key", + Value: "val", + }, + }, + Timestamps: []int64{1577877162200, 1577877162200, 1577877162200}, + Values: []float64{0, math.Inf(-1), math.Inf(1)}, + }, + exp: `{"metric":{"__name__":"foo","key":"val"},"timestamps":[1577877162200,1577877162200,1577877162200],"values":[0,-Inf,+Inf]}`, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + b := &bytes.Buffer{} + _, err := tc.ts.write(b) + if err != nil { + t.Error(err) + } + got := strings.TrimSpace(b.String()) + if got != tc.exp { + t.Fatalf("\ngot: %q\nwant: %q", got, tc.exp) + } + }) + } +} diff --git a/app/vmctl/vm/vm.go b/app/vmctl/vm/vm.go new file mode 100644 index 000000000..aac65f943 --- /dev/null +++ b/app/vmctl/vm/vm.go @@ -0,0 +1,369 @@ +package vm + +import ( + "bufio" + "compress/gzip" + "errors" + "fmt" + "io" + "io/ioutil" + "math" + "net/http" + "strings" + "sync" + "time" + + "github.com/VictoriaMetrics/VictoriaMetrics/lib/decimal" +) + +// Config contains list of params to configure +// the Importer +type Config struct { + // VictoriaMetrics address to perform import requests + // --httpListenAddr value for single node version + // --httpListenAddr value of VMSelect component for cluster version + Addr string + // Concurrency defines number of worker + // performing the import requests concurrently + Concurrency uint8 + // Whether to apply gzip compression + Compress bool + // AccountID for cluster version. + // Empty value assumes it is a single node version + AccountID string + // BatchSize defines how many samples + // importer collects before sending the import request + BatchSize int + // User name for basic auth + User string + // Password for basic auth + Password string + // SignificantFigures defines the number of significant figures to leave + // in metric values before importing. + // Zero value saves all the significant decimal places + SignificantFigures int + // ExtraLabels that will be added to all imported series. Must be in label=value format. + ExtraLabels []string +} + +// Importer performs insertion of timeseries +// via VictoriaMetrics import protocol +// see https://github.com/VictoriaMetrics/VictoriaMetrics/tree/master#how-to-import-time-series-data +type Importer struct { + addr string + importPath string + compress bool + user string + password string + + close chan struct{} + input chan *TimeSeries + errors chan *ImportError + + wg sync.WaitGroup + once sync.Once + + s *stats +} + +func (im *Importer) ResetStats() { + im.s = &stats{ + startTime: time.Now(), + } +} + +func (im *Importer) Stats() string { + return im.s.String() +} + +// AddExtraLabelsToImportPath - adds extra labels query params to given url path. +func AddExtraLabelsToImportPath(path string, extraLabels []string) (string, error) { + dst := path + separator := "?" + for _, extraLabel := range extraLabels { + if !strings.Contains(extraLabel, "=") { + return path, fmt.Errorf("bad format for extra_label flag, it must be `key=value`, got: %q", extraLabel) + } + if strings.Contains(dst, "?") { + separator = "&" + } + dst += fmt.Sprintf("%sextra_label=%s", separator, extraLabel) + } + return dst, nil +} + +func NewImporter(cfg Config) (*Importer, error) { + if cfg.Concurrency < 1 { + return nil, fmt.Errorf("concurrency can't be lower than 1") + } + + addr := strings.TrimRight(cfg.Addr, "/") + // if single version + // see https://github.com/VictoriaMetrics/VictoriaMetrics/tree/master#how-to-import-time-series-data + importPath := addr + "/api/v1/import" + if cfg.AccountID != "" { + // if cluster version + // see https://github.com/VictoriaMetrics/VictoriaMetrics/tree/cluster#url-format + importPath = fmt.Sprintf("%s/insert/%s/prometheus/api/v1/import", addr, cfg.AccountID) + } + importPath, err := AddExtraLabelsToImportPath(importPath, cfg.ExtraLabels) + if err != nil { + return nil, err + } + + im := &Importer{ + addr: addr, + importPath: importPath, + compress: cfg.Compress, + user: cfg.User, + password: cfg.Password, + close: make(chan struct{}), + input: make(chan *TimeSeries, cfg.Concurrency*4), + errors: make(chan *ImportError, cfg.Concurrency), + } + if err := im.Ping(); err != nil { + return nil, fmt.Errorf("ping to %q failed: %s", addr, err) + } + + if cfg.BatchSize < 1 { + cfg.BatchSize = 1e5 + } + + im.wg.Add(int(cfg.Concurrency)) + for i := 0; i < int(cfg.Concurrency); i++ { + go func() { + defer im.wg.Done() + im.startWorker(cfg.BatchSize, cfg.SignificantFigures) + }() + } + im.ResetStats() + return im, nil +} + +// ImportError is type of error generated +// in case of unsuccessful import request +type ImportError struct { + // The batch of timeseries that failed + Batch []*TimeSeries + // The error that appeared during insert + Err error +} + +// Errors returns a channel for receiving +// import errors if any +func (im *Importer) Errors() chan *ImportError { return im.errors } + +// Input returns a channel for sending timeseries +// that need to be imported +func (im *Importer) Input() chan<- *TimeSeries { return im.input } + +// Close sends signal to all goroutines to exit +// and waits until they are finished +func (im *Importer) Close() { + im.once.Do(func() { + close(im.close) + im.wg.Wait() + close(im.errors) + }) +} + +func (im *Importer) startWorker(batchSize, significantFigures int) { + var batch []*TimeSeries + var dataPoints int + var waitForBatch time.Time + for { + select { + case <-im.close: + if err := im.Import(batch); err != nil { + im.errors <- &ImportError{ + Batch: batch, + Err: err, + } + } + return + case ts := <-im.input: + // init waitForBatch when first + // value was received + if waitForBatch.IsZero() { + waitForBatch = time.Now() + } + + if significantFigures > 0 { + // Round values according to significantFigures + for i, v := range ts.Values { + ts.Values[i] = decimal.Round(v, significantFigures) + } + } + + batch = append(batch, ts) + dataPoints += len(ts.Values) + if dataPoints < batchSize { + continue + } + im.s.Lock() + im.s.idleDuration += time.Since(waitForBatch) + im.s.Unlock() + + if err := im.flush(batch); err != nil { + im.errors <- &ImportError{ + Batch: batch, + Err: err, + } + // make a new batch, since old one was referenced as err + batch = make([]*TimeSeries, len(batch)) + } + batch = batch[:0] + dataPoints = 0 + waitForBatch = time.Now() + } + } +} + +const ( + // TODO: make configurable + backoffRetries = 5 + backoffFactor = 1.7 + backoffMinDuration = time.Second +) + +func (im *Importer) flush(b []*TimeSeries) error { + var err error + for i := 0; i < backoffRetries; i++ { + err = im.Import(b) + if err == nil { + return nil + } + if errors.Is(err, ErrBadRequest) { + return err // fail fast if not recoverable + } + im.s.Lock() + im.s.retries++ + im.s.Unlock() + backoff := float64(backoffMinDuration) * math.Pow(backoffFactor, float64(i)) + time.Sleep(time.Duration(backoff)) + } + return fmt.Errorf("import failed with %d retries: %s", backoffRetries, err) +} + +func (im *Importer) Ping() error { + url := fmt.Sprintf("%s/health", im.addr) + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return fmt.Errorf("cannot create request to %q: %s", im.addr, err) + } + if im.user != "" { + req.SetBasicAuth(im.user, im.password) + } + resp, err := http.DefaultClient.Do(req) + if err != nil { + return err + } + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("bad status code: %d", resp.StatusCode) + } + return nil +} + +func (im *Importer) Import(tsBatch []*TimeSeries) error { + if len(tsBatch) < 1 { + return nil + } + + pr, pw := io.Pipe() + req, err := http.NewRequest("POST", im.importPath, pr) + if err != nil { + return fmt.Errorf("cannot create request to %q: %s", im.addr, err) + } + if im.user != "" { + req.SetBasicAuth(im.user, im.password) + } + if im.compress { + req.Header.Set("Content-Encoding", "gzip") + } + + errCh := make(chan error) + go func() { + errCh <- do(req) + close(errCh) + }() + + w := io.Writer(pw) + if im.compress { + zw, err := gzip.NewWriterLevel(pw, 1) + if err != nil { + return fmt.Errorf("unexpected error when creating gzip writer: %s", err) + } + w = zw + } + bw := bufio.NewWriterSize(w, 16*1024) + + var totalSamples, totalBytes int + for _, ts := range tsBatch { + n, err := ts.write(bw) + if err != nil { + return fmt.Errorf("write err: %w", err) + } + totalBytes += n + totalSamples += len(ts.Values) + } + if err := bw.Flush(); err != nil { + return err + } + if im.compress { + err := w.(*gzip.Writer).Close() + if err != nil { + return err + } + } + if err := pw.Close(); err != nil { + return err + } + + requestErr := <-errCh + if requestErr != nil { + return fmt.Errorf("import request error for %q: %w", im.addr, requestErr) + } + + im.s.Lock() + im.s.bytes += uint64(totalBytes) + im.s.samples += uint64(totalSamples) + im.s.requests++ + im.s.Unlock() + + return nil +} + +var ErrBadRequest = errors.New("bad request") + +func do(req *http.Request) error { + resp, err := http.DefaultClient.Do(req) + if err != nil { + return fmt.Errorf("unexpected error when performing request: %s", err) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusNoContent { + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return fmt.Errorf("failed to read response body for status code %d: %s", resp.StatusCode, err) + } + if resp.StatusCode == http.StatusBadRequest { + return fmt.Errorf("%w: unexpected response code %d: %s", ErrBadRequest, resp.StatusCode, string(body)) + } + return fmt.Errorf("unexpected response code %d: %s", resp.StatusCode, string(body)) + } + return nil +} + +func byteCountSI(b int64) string { + const unit = 1000 + if b < unit { + return fmt.Sprintf("%d B", b) + } + div, exp := int64(unit), 0 + for n := b / unit; n >= unit; n /= unit { + div *= unit + exp++ + } + return fmt.Sprintf("%.1f %cB", + float64(b)/float64(div), "kMGTPE"[exp]) +} diff --git a/app/vmctl/vm/vm_test.go b/app/vmctl/vm/vm_test.go new file mode 100644 index 000000000..1d9d42523 --- /dev/null +++ b/app/vmctl/vm/vm_test.go @@ -0,0 +1,69 @@ +package vm + +import "testing" + +func TestAddExtraLabelsToImportPath(t *testing.T) { + type args struct { + path string + extraLabels []string + } + tests := []struct { + name string + args args + want string + wantErr bool + }{ + { + name: "ok w/o extra labels", + args: args{ + path: "/api/v1/import", + }, + want: "/api/v1/import", + }, + { + name: "ok one extra label", + args: args{ + path: "/api/v1/import", + extraLabels: []string{"instance=host-1"}, + }, + want: "/api/v1/import?extra_label=instance=host-1", + }, + { + name: "ok two extra labels", + args: args{ + path: "/api/v1/import", + extraLabels: []string{"instance=host-2", "job=vmagent"}, + }, + want: "/api/v1/import?extra_label=instance=host-2&extra_label=job=vmagent", + }, + { + name: "ok two extra with exist param", + args: args{ + path: "/api/v1/import?timeout=50", + extraLabels: []string{"instance=host-2", "job=vmagent"}, + }, + want: "/api/v1/import?timeout=50&extra_label=instance=host-2&extra_label=job=vmagent", + }, + { + name: "bad incorrect format for extra label", + args: args{ + path: "/api/v1/import", + extraLabels: []string{"label=value", "bad_label_wo_value"}, + }, + want: "/api/v1/import", + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := AddExtraLabelsToImportPath(tt.args.path, tt.args.extraLabels) + if (err != nil) != tt.wantErr { + t.Errorf("AddExtraLabelsToImportPath() error = %v, wantErr %v", err, tt.wantErr) + return + } + if got != tt.want { + t.Errorf("AddExtraLabelsToImportPath() got = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/app/vmctl/vm_native.go b/app/vmctl/vm_native.go new file mode 100644 index 000000000..a69174847 --- /dev/null +++ b/app/vmctl/vm_native.go @@ -0,0 +1,141 @@ +package main + +import ( + "fmt" + "io" + "io/ioutil" + "log" + "net/http" + + "github.com/cheggaaa/pb/v3" + "github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/vm" +) + +type vmNativeProcessor struct { + filter filter + + dst *vmNativeClient + src *vmNativeClient +} + +type vmNativeClient struct { + addr string + user string + password string + extraLabels []string +} + +type filter struct { + match string + timeStart string + timeEnd string +} + +func (f filter) String() string { + s := fmt.Sprintf("\n\tfilter: match[]=%s", f.match) + if f.timeStart != "" { + s += fmt.Sprintf("\n\tstart: %s", f.timeStart) + } + if f.timeEnd != "" { + s += fmt.Sprintf("\n\tend: %s", f.timeEnd) + } + return s +} + +const ( + nativeExportAddr = "api/v1/export/native" + nativeImportAddr = "api/v1/import/native" + + barTpl = `Total: {{counters . }} {{ cycle . "↖" "↗" "↘" "↙" }} Speed: {{speed . }} {{string . "suffix"}}` +) + +func (p *vmNativeProcessor) run() error { + pr, pw := io.Pipe() + + fmt.Printf("Initing export pipe from %q with filters: %s\n", p.src.addr, p.filter) + exportReader, err := p.exportPipe() + if err != nil { + return fmt.Errorf("failed to init export pipe: %s", err) + } + + sync := make(chan struct{}) + nativeImportAddr, err := vm.AddExtraLabelsToImportPath(nativeImportAddr, p.dst.extraLabels) + if err != nil { + return err + } + + go func() { + defer func() { close(sync) }() + u := fmt.Sprintf("%s/%s", p.dst.addr, nativeImportAddr) + req, err := http.NewRequest("POST", u, pr) + if err != nil { + log.Fatalf("cannot create import request to %q: %s", p.dst.addr, err) + } + importResp, err := p.dst.do(req, http.StatusNoContent) + if err != nil { + log.Fatalf("import request failed: %s", err) + } + importResp.Body.Close() + }() + + fmt.Printf("Initing import process to %q:\n", p.dst.addr) + bar := pb.ProgressBarTemplate(barTpl).Start64(0) + barReader := bar.NewProxyReader(exportReader) + + _, err = io.Copy(pw, barReader) + if err != nil { + return fmt.Errorf("failed to write into %q: %s", p.dst.addr, err) + } + if err := pw.Close(); err != nil { + return err + } + <-sync + + bar.Finish() + return nil +} + +func (p *vmNativeProcessor) exportPipe() (io.ReadCloser, error) { + u := fmt.Sprintf("%s/%s", p.src.addr, nativeExportAddr) + req, err := http.NewRequest("GET", u, nil) + if err != nil { + return nil, fmt.Errorf("cannot create request to %q: %s", p.src.addr, err) + } + + params := req.URL.Query() + params.Set("match[]", p.filter.match) + if p.filter.timeStart != "" { + params.Set("start", p.filter.timeStart) + } + if p.filter.timeEnd != "" { + params.Set("end", p.filter.timeEnd) + } + req.URL.RawQuery = params.Encode() + + // disable compression since it is meaningless for native format + req.Header.Set("Accept-Encoding", "identity") + resp, err := p.src.do(req, http.StatusOK) + if err != nil { + return nil, fmt.Errorf("export request failed: %s", err) + } + return resp.Body, nil +} + +func (c *vmNativeClient) do(req *http.Request, expSC int) (*http.Response, error) { + if c.user != "" { + req.SetBasicAuth(c.user, c.password) + } + resp, err := http.DefaultClient.Do(req) + if err != nil { + return nil, fmt.Errorf("unexpected error when performing request: %s", err) + } + + if resp.StatusCode != expSC { + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("failed to read response body for status code %d: %s", resp.StatusCode, err) + } + return nil, fmt.Errorf("unexpected response code %d: %s", resp.StatusCode, string(body)) + } + return resp, err +} diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md index 6f991e3f2..887b1f4e3 100644 --- a/docs/CHANGELOG.md +++ b/docs/CHANGELOG.md @@ -2,6 +2,7 @@ # tip +* FEATURE: added [vmctl tool](https://victoriametrics.github.io/vmctl.html) to VictoriaMetrics release process. Now it is packaged in `vmutils-*.tar.gz` archive on [the releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases). Source code for `vmctl` tool has been moved from [github.com/VictoriaMetrics/vmctl](https://github.com/VictoriaMetrics/vmctl) to [github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/master/app/vmctl). * FEATURE: added `-loggerTimezone` command-line flag for adjusting time zone for timestamps in log messages. By default UTC is used. * FEATURE: added `-search.maxStepForPointsAdjustment` command-line flag, which can be used for disabling adjustment for points returned by `/api/v1/query_range` handler if such points have timestamps closer than `-search.latencyOffset` to the current time. Such points may contain incomplete data, so they are substituted by the previous values for `step` query args smaller than one minute by default. * FEATURE: vmalert: added `-datasource.queryStep` command-line flag for passing optional `step` query arg to `/api/v1/query` endpoint. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1025 diff --git a/docs/Single-server-VictoriaMetrics.md b/docs/Single-server-VictoriaMetrics.md index e60d13534..3dc192b00 100644 --- a/docs/Single-server-VictoriaMetrics.md +++ b/docs/Single-server-VictoriaMetrics.md @@ -154,6 +154,7 @@ Alphabetically sorted links to case studies: * [Tuning](#tuning) * [Monitoring](#monitoring) * [Troubleshooting](#troubleshooting) +* [Data migration](#data-migration) * [Backfilling](#backfilling) * [Data updates](#data-updates) * [Replication](#replication) @@ -1353,6 +1354,17 @@ See the example of alerting rules for VM components [here](https://github.com/Vi * VictoriaMetrics ignores `NaN` values during data ingestion. +## Data migration + +Use [vmctl](https://victoriametrics.github.io/vmctl.html) for data migration. It supports the following data migration types: + +* From Prometheus to VictoriaMetrics +* From InfluxDB to VictoriaMetrics +* From VictoriaMetrics to VictoriaMetrics + +See [vmctl docs](https://victoriametrics.github.io/vmctl.html) for more details. + + ## Backfilling VictoriaMetrics accepts historical data in arbitrary order of time via [any supported ingestion method](#how-to-import-time-series-data). @@ -1420,7 +1432,6 @@ The collected profiles may be analyzed with [go tool pprof](https://github.com/g * [Helm charts for single-node and cluster versions of VictoriaMetrics](https://github.com/VictoriaMetrics/helm-charts). * [Kubernetes operator for VictoriaMetrics](https://github.com/VictoriaMetrics/operator). -* [vmctl tool for data migration to VictoriaMetrics](https://github.com/VictoriaMetrics/vmctl). * [netdata](https://github.com/netdata/netdata) can push data into VictoriaMetrics via `Prometheus remote_write API`. See [these docs](https://github.com/netdata/netdata#integrations). * [go-graphite/carbonapi](https://github.com/go-graphite/carbonapi) can use VictoriaMetrics as time series backend. diff --git a/docs/vmctl.md b/docs/vmctl.md new file mode 100644 index 000000000..f4f315c75 --- /dev/null +++ b/docs/vmctl.md @@ -0,0 +1,427 @@ +# vmctl - Victoria metrics command-line tool + +Features: +- [x] Prometheus: migrate data from Prometheus to VictoriaMetrics using snapshot API +- [x] Thanos: migrate data from Thanos to VictoriaMetrics +- [ ] ~~Prometheus: migrate data from Prometheus to VictoriaMetrics by query~~(discarded) +- [x] InfluxDB: migrate data from InfluxDB to VictoriaMetrics +- [ ] Storage Management: data re-balancing between nodes + +# Table of contents + +* [Articles](#articles) +* [How to build](#how-to-build) +* [Migrating data from InfluxDB 1.x](#migrating-data-from-influxdb-1x) + * [Data mapping](#data-mapping) + * [Configuration](#configuration) + * [Filtering](#filtering) +* [Migrating data from InfluxDB 2.x](#migrating-data-from-influxdb-2x) +* [Migrating data from Prometheus](#migrating-data-from-prometheus) + * [Data mapping](#data-mapping-1) + * [Configuration](#configuration-1) + * [Filtering](#filtering-1) +* [Migrating data from Thanos](#migrating-data-from-thanos) + * [Current data](#current-data) + * [Historical data](#historical-data) +* [Migrating data from VictoriaMetrics](#migrating-data-from-victoriametrics) + * [Native protocol](#native-protocol) +* [Tuning](#tuning) + * [Influx mode](#influx-mode) + * [Prometheus mode](#prometheus-mode) + * [VictoriaMetrics importer](#victoriametrics-importer) + * [Importer stats](#importer-stats) +* [Significant figures](#significant-figures) +* [Adding extra labels](#adding-extra-labels) + + +## Articles + +* [How to migrate data from Prometheus](https://medium.com/@romanhavronenko/victoriametrics-how-to-migrate-data-from-prometheus-d44a6728f043) +* [How to migrate data from Prometheus. Filtering and modifying time series](https://medium.com/@romanhavronenko/victoriametrics-how-to-migrate-data-from-prometheus-filtering-and-modifying-time-series-6d40cea4bf21) + +## How to build + +1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.12. +2. Run `make build` from the root folder of the repository. + It builds `vmctl` binary and puts it into the `bin` folder. + +## Migrating data from InfluxDB (1.x) + +`vmctl` supports the `influx` mode to migrate data from InfluxDB to VictoriaMetrics time-series database. + +See `./vmctl influx --help` for details and full list of flags. + +To use migration tool please specify the InfluxDB address `--influx-addr`, the database `--influx-database` and VictoriaMetrics address `--vm-addr`. +Flag `--vm-addr` for single-node VM is usually equal to `--httpListenAddr`, and for cluster version +is equal to `--httpListenAddr` flag of VMInsert component. Please note, that vmctl performs initial readiness check for the given address +by checking `/health` endpoint. For cluster version it is additionally required to specify the `--vm-account-id` flag. +See more details for cluster version [here](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/cluster). + +As soon as required flags are provided and all endpoints are accessible, `vmctl` will start the InfluxDB scheme exploration. +Basically, it just fetches all fields and timeseries from the provided database and builds up registry of all available timeseries. +Then `vmctl` sends fetch requests for each timeseries to InfluxDB one by one and pass results to VM importer. +VM importer then accumulates received samples in batches and sends import requests to VM. + +The importing process example for local installation of InfluxDB(`http://localhost:8086`) +and single-node VictoriaMetrics(`http://localhost:8428`): +``` +./vmctl influx --influx-database benchmark +InfluxDB import mode +2020/01/18 20:47:11 Exploring scheme for database "benchmark" +2020/01/18 20:47:11 fetching fields: command: "show field keys"; database: "benchmark"; retention: "autogen" +2020/01/18 20:47:11 found 10 fields +2020/01/18 20:47:11 fetching series: command: "show series "; database: "benchmark"; retention: "autogen" +Found 40000 timeseries to import. Continue? [Y/n] y +40000 / 40000 [-----------------------------------------------------------------------------------------------------------------------------------------------] 100.00% 21 p/s +2020/01/18 21:19:00 Import finished! +2020/01/18 21:19:00 VictoriaMetrics importer stats: + idle duration: 13m51.461434876s; + time spent while importing: 17m56.923899847s; + total samples: 345600000; + samples/s: 320914.04; + total bytes: 5.9 GB; + bytes/s: 5.4 MB; + import requests: 40001; +2020/01/18 21:19:00 Total time: 31m48.467044016s +``` + +### Data mapping + +Vmctl maps Influx data the same way as VictoriaMetrics does by using the following rules: + +* `influx-database` arg is mapped into `db` label value unless `db` tag exists in the Influx line. +* Field names are mapped to time series names prefixed with {measurement}{separator} value, +where {separator} equals to _ by default. +It can be changed with `--influx-measurement-field-separator` command-line flag. +* Field values are mapped to time series values. +* Tags are mapped to Prometheus labels format as-is. + +For example, the following Influx line: +``` +foo,tag1=value1,tag2=value2 field1=12,field2=40 +``` + +is converted into the following Prometheus format data points: +``` +foo_field1{tag1="value1", tag2="value2"} 12 +foo_field2{tag1="value1", tag2="value2"} 40 +``` + +### Configuration + +The configuration flags should contain self-explanatory descriptions. + +### Filtering + +The filtering consists of two parts: timeseries and time. +The first step of application is to select all available timeseries +for given database and retention. User may specify additional filtering +condition via `--influx-filter-series` flag. For example: +``` +./vmctl influx --influx-database benchmark \ + --influx-filter-series "on benchmark from cpu where hostname='host_1703'" +InfluxDB import mode +2020/01/26 14:23:29 Exploring scheme for database "benchmark" +2020/01/26 14:23:29 fetching fields: command: "show field keys"; database: "benchmark"; retention: "autogen" +2020/01/26 14:23:29 found 12 fields +2020/01/26 14:23:29 fetching series: command: "show series on benchmark from cpu where hostname='host_1703'"; database: "benchmark"; retention: "autogen" +Found 10 timeseries to import. Continue? [Y/n] +``` +The timeseries select query would be following: + `fetching series: command: "show series on benchmark from cpu where hostname='host_1703'"; database: "benchmark"; retention: "autogen"` + +The second step of filtering is a time filter and it applies when fetching the datapoints from Influx. +Time filtering may be configured with two flags: +* --influx-filter-time-start +* --influx-filter-time-end +Here's an example of importing timeseries for one day only: +`./vmctl influx --influx-database benchmark --influx-filter-series "where hostname='host_1703'" --influx-filter-time-start "2020-01-01T10:07:00Z" --influx-filter-time-end "2020-01-01T15:07:00Z"` + +Please see more about time filtering [here](https://docs.influxdata.com/influxdb/v1.7/query_language/schema_exploration#filter-meta-queries-by-time). + +## Migrating data from InfluxDB (2.x) + +Migrating data from InfluxDB v2.x is not supported yet ([#32](https://github.com/VictoriaMetrics/vmctl/issues/32)). +You may find useful a 3rd party solution for this - https://github.com/jonppe/influx_to_victoriametrics. + + +## Migrating data from Prometheus + +`vmctl` supports the `prometheus` mode for migrating data from Prometheus to VictoriaMetrics time-series database. +Migration is based on reading Prometheus snapshot, which is basically a hard-link to Prometheus data files. + +See `./vmctl prometheus --help` for details and full list of flags. + +To use migration tool please specify the path to Prometheus snapshot `--prom-snapshot` and VictoriaMetrics address `--vm-addr`. +More about Prometheus snapshots may be found [here](https://www.robustperception.io/taking-snapshots-of-prometheus-data). +Flag `--vm-addr` for single-node VM is usually equal to `--httpListenAddr`, and for cluster version +is equal to `--httpListenAddr` flag of VMInsert component. Please note, that vmctl performs initial readiness check for the given address +by checking `/health` endpoint. For cluster version it is additionally required to specify the `--vm-account-id` flag. +See more details for cluster version [here](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/cluster). + +As soon as required flags are provided and all endpoints are accessible, `vmctl` will start the Prometheus snapshot exploration. +Basically, it just fetches all available blocks in provided snapshot and read the metadata. It also does initial filtering by time +if flags `--prom-filter-time-start` or `--prom-filter-time-end` were set. The exploration procedure prints some stats from read blocks. +Please note that stats are not taking into account timeseries or samples filtering. This will be done during importing process. + +The importing process takes the snapshot blocks revealed from Explore procedure and processes them one by one +accumulating timeseries and samples. Please note, that `vmctl` relies on responses from Influx on this stage, +so ensure that Explore queries are executed without errors or limits. Please see this +[issue](https://github.com/VictoriaMetrics/vmctl/issues/30) for details. +The data processed in chunks and then sent to VM. + +The importing process example for local installation of Prometheus +and single-node VictoriaMetrics(`http://localhost:8428`): +``` +./vmctl prometheus --prom-snapshot=/path/to/snapshot \ + --vm-concurrency=1 \ + --vm-batch-size=200000 \ + --prom-concurrency=3 +Prometheus import mode +Prometheus snapshot stats: + blocks found: 14; + blocks skipped: 0; + min time: 1581288163058 (2020-02-09T22:42:43Z); + max time: 1582409128139 (2020-02-22T22:05:28Z); + samples: 32549106; + series: 27289. +Found 14 blocks to import. Continue? [Y/n] y +14 / 14 [-------------------------------------------------------------------------------------------] 100.00% 0 p/s +2020/02/23 15:50:03 Import finished! +2020/02/23 15:50:03 VictoriaMetrics importer stats: + idle duration: 6.152953029s; + time spent while importing: 44.908522491s; + total samples: 32549106; + samples/s: 724786.84; + total bytes: 669.1 MB; + bytes/s: 14.9 MB; + import requests: 323; + import requests retries: 0; +2020/02/23 15:50:03 Total time: 51.077451066s +``` + +### Data mapping + +VictoriaMetrics has very similar data model to Prometheus and supports [RemoteWrite integration](https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage). +So no data changes will be applied. + +### Configuration + +The configuration flags should contain self-explanatory descriptions. + +### Filtering + +The filtering consists of three parts: by timeseries and time. + +Filtering by time may be configured via flags `--prom-filter-time-start` and `--prom-filter-time-end` +in in RFC3339 format. This filter applied twice: to drop blocks out of range and to filter timeseries in blocks with +overlapping time range. + +Example of applying time filter: +``` +./vmctl prometheus --prom-snapshot=/path/to/snapshot \ + --prom-filter-time-start=2020-02-07T00:07:01Z \ + --prom-filter-time-end=2020-02-11T00:07:01Z +Prometheus import mode +Prometheus snapshot stats: + blocks found: 2; + blocks skipped: 12; + min time: 1581288163058 (2020-02-09T22:42:43Z); + max time: 1581328800000 (2020-02-10T10:00:00Z); + samples: 1657698; + series: 3930. +Found 2 blocks to import. Continue? [Y/n] y +``` + +Please notice, that total amount of blocks in provided snapshot is 14, but only 2 of them were in provided +time range. So other 12 blocks were marked as `skipped`. The amount of samples and series is not taken into account, +since this is heavy operation and will be done during import process. + + +Filtering by timeseries is configured with following flags: +* `--prom-filter-label` - the label name, e.g. `__name__` or `instance`; +* `--prom-filter-label-value` - the regular expression to filter the label value. By default matches all `.*` + +For example: +``` +./vmctl prometheus --prom-snapshot=/path/to/snapshot \ + --prom-filter-label="__name__" \ + --prom-filter-label-value="promhttp.*" \ + --prom-filter-time-start=2020-02-07T00:07:01Z \ + --prom-filter-time-end=2020-02-11T00:07:01Z +Prometheus import mode +Prometheus snapshot stats: + blocks found: 2; + blocks skipped: 12; + min time: 1581288163058 (2020-02-09T22:42:43Z); + max time: 1581328800000 (2020-02-10T10:00:00Z); + samples: 1657698; + series: 3930. +Found 2 blocks to import. Continue? [Y/n] y +14 / 14 [------------------------------------------------------------------------------------------------------------------------------------------------------] 100.00% ? p/s +2020/02/23 15:51:07 Import finished! +2020/02/23 15:51:07 VictoriaMetrics importer stats: + idle duration: 0s; + time spent while importing: 37.415461ms; + total samples: 10128; + samples/s: 270690.24; + total bytes: 195.2 kB; + bytes/s: 5.2 MB; + import requests: 2; + import requests retries: 0; +2020/02/23 15:51:07 Total time: 7.153158218s +``` + +## Migrating data from Thanos + +Thanos uses the same storage engine as Prometheus and the data layout on-disk should be the same. That means +`vmctl` in mode `prometheus` may be used for Thanos historical data migration as well. +These instructions may vary based on the details of your Thanos configuration. +Please read carefully and verify as you go. We assume you're using Thanos Sidecar on your Prometheus pods, +and that you have a separate Thanos Store installation. + +### Current data + +1. For now, keep your Thanos Sidecar and Thanos-related Prometheus configuration, but add this to also stream + metrics to VictoriaMetrics: + ``` + remote_write: + - url: http://victoria-metrics:8428/api/v1/write + ``` +2. Make sure VM is running, of course. Now check the logs to make sure that Prometheus is sending and VM is receiving. + In Prometheus, make sure there are no errors. On the VM side, you should see messages like this: + ``` + 2020-04-27T18:38:46.474Z info VictoriaMetrics/lib/storage/partition.go:207 creating a partition "2020_04" with smallPartsPath="/victoria-metrics-data/data/small/2020_04", bigPartsPath="/victoria-metrics-data/data/big/2020_04" + 2020-04-27T18:38:46.506Z info VictoriaMetrics/lib/storage/partition.go:222 partition "2020_04" has been created + ``` +3. Now just wait. Within two hours, Prometheus should finish its current data file and hand it off to Thanos Store for long term + storage. + +### Historical data + +Let's assume your data is stored on S3 served by minio. You first need to copy that out to a local filesystem, +then import it into VM using `vmctl` in `prometheus` mode. +1. Copy data from minio. + 1. Run the `minio/mc` Docker container. + 1. `mc config host add minio http://minio:9000 accessKey secretKey`, substituting appropriate values for the last 3 items. + 1. `mc cp -r minio/prometheus thanos-data` +1. Import using `vmctl`. + 1. Follow the [instructions](#how-to-build) to compile `vmctl` on your machine. + 1. Use [prometheus](#migrating-data-from-prometheus) mode to import data: + ``` + vmctl prometheus --prom-snapshot thanos-data --vm-addr http://victoria-metrics:8428 + ``` + +## Migrating data from VictoriaMetrics + +### Native protocol + +The [native binary protocol](https://victoriametrics.github.io/#how-to-export-data-in-native-format) +was introduced in [1.42.0 release](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.42.0) +and provides the most efficient way to migrate data between VM instances: single to single, cluster to cluster, +single to cluster and vice versa. Please note that both instances (source and destination) should be of v1.42.0 +or higher. + +See `./vmctl vm-native --help` for details and full list of flags. + +In this mode `vmctl` acts as a proxy between two VM instances, where time series filtering is done by "source" (`src`) +and processing is done by "destination" (`dst`). Because of that, `vmctl` doesn't actually know how much data will be +processed and can't show the progress bar. It will show the current processing speed and total number of processed bytes: + +``` +./vmctl vm-native --vm-native-src-addr=http://localhost:8528 \ + --vm-native-dst-addr=http://localhost:8428 \ + --vm-native-filter-match='{job="vmagent"}' \ + --vm-native-filter-time-start='2020-01-01T20:07:00Z' +VictoriaMetrics Native import mode +Initing export pipe from "http://localhost:8528" with filters: + filter: match[]={job="vmagent"} +Initing import process to "http://localhost:8428": +Total: 336.75 KiB ↖ Speed: 454.46 KiB p/s +2020/10/13 17:04:59 Total time: 952.143376ms +``` + +Importing tips: +1. Migrating all the metrics from one VM to another may collide with existing application metrics +(prefixed with `vm_`) at destination and lead to confusion when using +[official Grafana dashboards](https://grafana.com/orgs/victoriametrics/dashboards). +To avoid such situation try to filter out VM process metrics via `--vm-native-filter-match` flag. +2. Migration is a backfilling process, so it is recommended to read +[Backfilling tips](https://github.com/VictoriaMetrics/VictoriaMetrics#backfilling) section. +3. `vmctl` doesn't provide relabeling or other types of labels management in this mode. +Instead, use [relabeling in VictoriaMetrics](https://github.com/VictoriaMetrics/vmctl/issues/4#issuecomment-683424375). + + +## Tuning + +### Influx mode + +The flag `--influx-concurrency` controls how many concurrent requests may be sent to InfluxDB while fetching +timeseries. Please set it wisely to avoid InfluxDB overwhelming. + +The flag `--influx-chunk-size` controls the max amount of datapoints to return in single chunk from fetch requests. +Please see more details [here](https://docs.influxdata.com/influxdb/v1.7/guides/querying_data/#chunking). +The chunk size is used to control InfluxDB memory usage, so it won't OOM on processing large timeseries with +billions of datapoints. + +### Prometheus mode + +The flag `--prom-concurrency` controls how many concurrent readers will be reading the blocks in snapshot. +Since snapshots are just files on disk it would be hard to overwhelm the system. Please go with value equal +to number of free CPU cores. + +### VictoriaMetrics importer + +The flag `--vm-concurrency` controls the number of concurrent workers that process the input from InfluxDB query results. +Please note that each import request can load up to a single vCPU core on VictoriaMetrics. So try to set it according +to allocated CPU resources of your VictoriMetrics installation. + +The flag `--vm-batch-size` controls max amount of samples collected before sending the import request. +For example, if `--influx-chunk-size=500` and `--vm-batch-size=2000` then importer will process not more +than 4 chunks before sending the request. + +### Importer stats + +After successful import `vmctl` prints some statistics for details. +The important numbers to watch are following: + - `idle duration` - shows time that importer spent while waiting for data from InfluxDB/Prometheus +to fill up `--vm-batch-size` batch size. Value shows total duration across all workers configured +via `--vm-concurrency`. High value may be a sign of too slow InfluxDB/Prometheus fetches or too +high `--vm-concurrency` value. Try to improve it by increasing `---concurrency` value or +decreasing `--vm-concurrency` value. +- `import requests` - shows how many import requests were issued to VM server. +The import request is issued once the batch size(`--vm-batch-size`) is full and ready to be sent. +Please prefer big batch sizes (50k-500k) to improve performance. +- `import requests retries` - shows number of unsuccessful import requests. Non-zero value may be +a sign of network issues or VM being overloaded. See the logs during import for error messages. + +### Silent mode + +By default `vmctl` waits confirmation from user before starting the import. If this is unwanted +behavior and no user interaction required - pass `-s` flag to enable "silence" mode: +``` + -s Whether to run in silent mode. If set to true no confirmation prompts will appear. (default: false) +``` + +### Significant figures + +`vmctl` allows to limit the number of [significant figures](https://en.wikipedia.org/wiki/Significant_figures) +before importing. For example, the average value for response size is `102.342305` bytes and it has 9 significant figures. +If you ask a human to pronounce this value then with high probability value will be rounded to first 4 or 5 figures +because the rest aren't really that important to mention. In most cases, such a high precision is too much. +Moreover, such values may be just a result of [floating point arithmetic](https://en.wikipedia.org/wiki/Floating-point_arithmetic), +create a [false precision](https://en.wikipedia.org/wiki/False_precision) and result into bad compression ratio +according to [information theory](https://en.wikipedia.org/wiki/Information_theory). + +The `--vm-significant-figures` flag allows to limit the number of significant figures. It takes no effect if set +to 0 (by default), but set `--vm-significant-figures=5` and `102.342305` will be rounded to `102.34`. Such value will +have much higher compression ratio comparing to previous one and will save some extra disk space after the migration. +The most common case for using this flag is to reduce number of significant figures for time series storing aggregation +results such as `average`, `rate`, etc. + +### Adding extra labels + + `vmctl` allows to add extra labels to all imported series. It can be achived with flag `--vm-extra-label label=value`. + If multiple labels needs to be added, set flag for each label, for example, `--vm-extra-label label1=value1 --vm-extra-label label2=value2`. + If timeseries already have label, that must be added with `--vm-extra-label` flag, flag has priority and will override label value from timeseries. + \ No newline at end of file diff --git a/go.mod b/go.mod index 17ee10d4d..3c1b77a87 100644 --- a/go.mod +++ b/go.mod @@ -12,8 +12,12 @@ require ( github.com/VictoriaMetrics/metricsql v0.10.0 github.com/aws/aws-sdk-go v1.36.25 github.com/cespare/xxhash/v2 v2.1.1 + github.com/cheggaaa/pb/v3 v3.0.5 github.com/golang/snappy v0.0.2 + github.com/influxdata/influxdb v1.8.3 github.com/klauspost/compress v1.11.6 + github.com/prometheus/prometheus v1.8.2-0.20201119142752-3ad25a6dc3d9 + github.com/urfave/cli/v2 v2.3.0 github.com/valyala/fastjson v1.6.3 github.com/valyala/fastrand v1.0.0 github.com/valyala/fasttemplate v1.2.1 diff --git a/go.sum b/go.sum index be72c5d88..32685fc9b 100644 --- a/go.sum +++ b/go.sum @@ -1,11 +1,13 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.43.0/go.mod h1:BOSR3VbTLkk6FDC/TcffxP4NF/FFBGA5ku+jvKOP7pg= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw= cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= @@ -23,6 +25,7 @@ cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvf cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/bigtable v1.2.0/go.mod h1:JcVAOl45lrTmQfLj7T6TxyMzIN/3FGGcFm+2xVAli2o= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= @@ -36,9 +39,46 @@ cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RX cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= cloud.google.com/go/storage v1.12.0 h1:4y3gHptW1EHVtcPAVE0eBBlFuGqEejTTG3KdIE0lUX4= cloud.google.com/go/storage v1.12.0/go.mod h1:fFLk2dp2oAhDz8QFKwqrjdJvxSp/W2g7nillojlL5Ho= +collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/Azure/azure-sdk-for-go v48.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest v0.9.6/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= +github.com/Azure/go-autorest/autorest v0.11.11/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= +github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= +github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= +github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= +github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= +github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= +github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= +github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA= +github.com/Azure/go-autorest/autorest/validation v0.2.0/go.mod h1:3EEqHnBxQGHXRYq3HT1WyXAvT7LLY3tl70hw6tQIbjI= +github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/HdrHistogram/hdrhistogram-go v0.9.0/go.mod h1:nxrse8/Tzg2tg3DZcZjm6qEclQKK70g0KxO61gFFZD4= +github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= +github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= +github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= +github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/VictoriaMetrics/fastcache v1.5.7 h1:4y6y0G8PRzszQUYIQHHssv/jgPHAb5qQuuDNdCbyAgw= github.com/VictoriaMetrics/fastcache v1.5.7/go.mod h1:ptDBkNMQI4RtmVo8VS/XwRY6RoTu1dAWCbrk+6WsEM8= github.com/VictoriaMetrics/fasthttp v1.0.12 h1:Ag0E119yrH4BTxVyjKD9TeiSImtG9bUcg/stItLJhSE= @@ -48,32 +88,251 @@ github.com/VictoriaMetrics/metrics v1.12.3 h1:Fe6JHC6MSEKa+BtLhPN8WIvS+HKPzMc2ev github.com/VictoriaMetrics/metrics v1.12.3/go.mod h1:Z1tSfPfngDn12bTfZSCqArT3OPY3u88J12hSoOhuiRE= github.com/VictoriaMetrics/metricsql v0.10.0 h1:45BARAP2shaL/5p67Hvz+YrWUbr0X0VCy9t+gvdIm8o= github.com/VictoriaMetrics/metricsql v0.10.0/go.mod h1:ylO7YITho/Iw6P71oEaGyHbO94bGoGtzWfLGqFhMIg8= +github.com/VividCortex/ewma v1.1.1 h1:MnEK4VOv6n0RSY4vtRe3h11qjxL3+t0B8yOL8iMXdcM= +github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmxzcbUokwA= +github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= +github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= +github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= +github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8= github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= +github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/andybalholm/brotli v1.0.0/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/apache/arrow/go/arrow v0.0.0-20191024131854-af6fa24be0db/go.mod h1:VTxUBvSJ3s3eHAg65PNgrsn5BtqCRPdmyXh6rAfdxN0= +github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-metrics v0.3.3/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= +github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= +github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= +github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= +github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= +github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= +github.com/aws/aws-sdk-go v1.35.31/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.36.25 h1:foHwQg8LGGuR9L8IODs2co5OQqjYhNNrngefIbXbyjg= github.com/aws/aws-sdk-go v1.36.25/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= +github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40/go.mod h1:8rLXio+WjiTceGBHIoTvn60HIbs7Hm7bcHjyrSqYB9c= +github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= +github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34= +github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= +github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/cenkalti/backoff/v4 v4.0.2/go.mod h1:eEew/i+1Q6OrCDZh3WiXYv3+nJwBASZ8Bog/87DQnVg= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cheggaaa/pb/v3 v3.0.5 h1:lmZOti7CraK9RSjzExsY53+WWfub9Qv13B5m4ptEoPE= +github.com/cheggaaa/pb/v3 v3.0.5/go.mod h1:X1L61/+36nz9bjIsrDU52qHKOQukUQe2Ge+YvGuquCw= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= +github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= +github.com/containerd/containerd v1.3.4/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d h1:U+s90UTSYgptZMwQh2aRr3LuazLJIa+Pg3Kc1ylSYVY= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ= +github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/digitalocean/godo v1.52.0/go.mod h1:p7dOjjtSBqCTUksqtA5Fd3uaKs9kyTq2xcz76ulEJRU= +github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v17.12.0-ce-rc1.0.20200706150819-a40b877fbb9e+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= +github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/eclipse/paho.mqtt.golang v1.2.0/go.mod h1:H9keYFcgq3Qr5OUJm/JZI/i6U7joQ8SYLhZwfeOo6Ts= +github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= +github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s= +github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= +github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= +github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= +github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= +github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= +github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.10.0 h1:dXFJfIHVvUcpSgDOV+Ne6t7jXri8Tfv2uOLHUZ2XNuo= +github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0 h1:TrB8swr/68K7m9CcGut2g3UOihhbcbiMAYiuTXdEih4= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= +github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= +github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= +github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= +github.com/go-openapi/analysis v0.19.4/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= +github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2rCu0v0ObL0AU= +github.com/go-openapi/analysis v0.19.10/go.mod h1:qmhS3VNFxBlquFJ0RGoDtylO9y4pgTAUNE9AEEMdlJQ= +github.com/go-openapi/analysis v0.19.14/go.mod h1:zN0kY6i38wo2LQOwltVyMk61bqlqOm86n1/Iszo8F8Y= +github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= +github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= +github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= +github.com/go-openapi/errors v0.19.3/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= +github.com/go-openapi/errors v0.19.4/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= +github.com/go-openapi/errors v0.19.6/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/errors v0.19.7/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= +github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= +github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= +github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= +github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= +github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= +github.com/go-openapi/jsonreference v0.19.4/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= +github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs= +github.com/go-openapi/loads v0.19.3/go.mod h1:YVfqhUCdahYwR3f3iiwQLhicVRvLlU/WO5WPaZvcvSI= +github.com/go-openapi/loads v0.19.4/go.mod h1:zZVHonKd8DXyxyw4yfnVjPzBjIQcLt0CCsn0N0ZrQsk= +github.com/go-openapi/loads v0.19.5/go.mod h1:dswLCAdonkRufe/gSUC3gN8nTSaB9uaS2es0x5/IbjY= +github.com/go-openapi/loads v0.19.6/go.mod h1:brCsvE6j8mnbmGBh103PT/QLHfbyDxA4hsKvYBNEGVc= +github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= +github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= +github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= +github.com/go-openapi/runtime v0.19.15/go.mod h1:dhGWCTKRXlAfGnQG0ONViOZpjfg0m2gUt9nTQPQZuoo= +github.com/go-openapi/runtime v0.19.16/go.mod h1:5P9104EJgYcizotuXhEuUrzVc+j1RiSjahULvYmlv98= +github.com/go-openapi/runtime v0.19.24/go.mod h1:Lm9YGCeecBnUUkFTxPC4s1+lwrkJ0pthx8YvyjCfkgk= +github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= +github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= +github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= +github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= +github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= +github.com/go-openapi/spec v0.19.6/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= +github.com/go-openapi/spec v0.19.8/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= +github.com/go-openapi/spec v0.19.14/go.mod h1:gwrgJS15eCUgjLpMjBJmbZezCsw88LmgeEip0M63doA= +github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= +github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= +github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= +github.com/go-openapi/strfmt v0.19.2/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= +github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= +github.com/go-openapi/strfmt v0.19.4/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk= +github.com/go-openapi/strfmt v0.19.5/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk= +github.com/go-openapi/strfmt v0.19.11/go.mod h1:UukAYgTaQfqJuAFlNxxMWNvMYiwiXtLsF2VwmoFtbtc= +github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= +github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= +github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= +github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.7/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY= +github.com/go-openapi/swag v0.19.9/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY= +github.com/go-openapi/swag v0.19.11/go.mod h1:Uc0gKkdR+ojzsEpjh39QChyu92vPgIr72POcgHMAgSY= +github.com/go-openapi/swag v0.19.12/go.mod h1:eFdyEBkTdoAf/9RXBvj4cr1nH7GD8Kzo5HTt47gr72M= +github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= +github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= +github.com/go-openapi/validate v0.19.3/go.mod h1:90Vh6jjkTn+OT1Eefm0ZixWNFjhtOH7vS9k0lo6zwJo= +github.com/go-openapi/validate v0.19.8/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= +github.com/go-openapi/validate v0.19.10/go.mod h1:RKEZTUWDkxKQxN2jDT7ZnZi2bhZlbNMAuKvKB+IaGx8= +github.com/go-openapi/validate v0.19.12/go.mod h1:Rzou8hA/CBw8donlS6WNEUQupNvUZ0waH08tGe6kAQ4= +github.com/go-openapi/validate v0.19.14/go.mod h1:PdGrHe0rp6MG3A1SrAY/rIHATqzJEEhohGE1atLkBEQ= +github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= +github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY= +github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg= +github.com/gobuffalo/envy v1.6.15/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= +github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= +github.com/gobuffalo/flect v0.1.0/go.mod h1:d2ehjJqGOH/Kjqcoz+F7jHTBbmDb38yXA598Hb50EGs= +github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= +github.com/gobuffalo/flect v0.1.3/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= +github.com/gobuffalo/genny v0.0.0-20190329151137-27723ad26ef9/go.mod h1:rWs4Z12d1Zbf19rlsn0nurr75KqhYp52EAGGxTbBhNk= +github.com/gobuffalo/genny v0.0.0-20190403191548-3ca520ef0d9e/go.mod h1:80lIj3kVJWwOrXWWMRzzdhW3DsrdjILVil/SFKBzF28= +github.com/gobuffalo/genny v0.1.0/go.mod h1:XidbUqzak3lHdS//TPu2OgiFB+51Ur5f7CSnXZ/JDvo= +github.com/gobuffalo/genny v0.1.1/go.mod h1:5TExbEyY48pfunL4QSXxlDOmdsD44RRq4mVZ0Ex28Xk= +github.com/gobuffalo/gitgen v0.0.0-20190315122116-cc086187d211/go.mod h1:vEHJk/E9DmhejeLeNt7UVvlSGv3ziL+djtTr3yyzcOw= +github.com/gobuffalo/gogen v0.0.0-20190315121717-8f38393713f5/go.mod h1:V9QVDIxsgKNZs6L2IYiGR8datgMhB577vzTDqypH360= +github.com/gobuffalo/gogen v0.1.0/go.mod h1:8NTelM5qd8RZ15VjQTFkAW6qOMx5wBbW4dSCS3BY8gg= +github.com/gobuffalo/gogen v0.1.1/go.mod h1:y8iBtmHmGc4qa3urIyo1shvOD8JftTtfcKi+71xfDNE= +github.com/gobuffalo/logger v0.0.0-20190315122211-86e12af44bc2/go.mod h1:QdxcLw541hSGtBnhUc4gaNIXRjiDppFGaDqzbrBd3v8= +github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= +github.com/gobuffalo/mapi v1.0.2/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= +github.com/gobuffalo/packd v0.0.0-20190315124812-a385830c7fc0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= +github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= +github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ= +github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0= +github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= +github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= +github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= @@ -100,11 +359,13 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.2 h1:aeE13tS0IiQgFjYdoL8qN3K1N2bXXtI6Vi51/y7BpMw= github.com/golang/snappy v0.0.2/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/flatbuffers v1.11.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -115,6 +376,9 @@ github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= @@ -129,44 +393,350 @@ github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200905233945-acf8798be1f7/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201117184057-ae444373da19/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= +github.com/gophercloud/gophercloud v0.14.0/go.mod h1:VX0Ibx85B60B5XOrZr6kaNwrmPUzcmMpwxvQ1WQIIWM= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= +github.com/hashicorp/consul/api v1.7.0/go.mod h1:1NSuaUUkFaJzMasbfq/11wKYWSR67Xn6r2DXKhuDNFg= +github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/consul/sdk v0.6.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v0.12.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.2.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY= +github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= +github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/hashicorp/serf v0.9.3/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= +github.com/hetznercloud/hcloud-go v1.23.1/go.mod h1:xng8lbDUg+xM1dgc0yGHX5EeqbwIq7UYlMWMTx3SQVg= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/influxdata/flux v0.65.1/go.mod h1:J754/zds0vvpfwuq7Gc2wRdVwEodfpCFM7mYlOw2LqY= +github.com/influxdata/influxdb v1.8.3 h1:WEypI1BQFTT4teLM+1qkEcvUi0dAvopAI/ir0vAiBg8= +github.com/influxdata/influxdb v1.8.3/go.mod h1:JugdFhsvvI8gadxOI6noqNeeBHvWNTbfYGtiAn+2jhI= +github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= +github.com/influxdata/influxql v1.1.1-0.20200828144457-65d3ef77d385/go.mod h1:gHp9y86a/pxhjJ+zMjNXiQAA197Xk9wLxaz+fGG+kWk= +github.com/influxdata/line-protocol v0.0.0-20180522152040-32c6aa80de5e/go.mod h1:4kt73NQhadE3daL3WhR5EJ/J2ocX0PZzwxQ0gXJ7oFE= +github.com/influxdata/promql/v2 v2.12.0/go.mod h1:fxOPu+DY0bqCTCECchSRtWfc+0X19ybifQhZoQNF5D8= +github.com/influxdata/roaring v0.4.13-0.20180809181101-fc520f41fab6/go.mod h1:bSgUQ7q5ZLSO+bKBGqJiCBGAl+9DxyW63zLTujjUlOE= +github.com/influxdata/tdigest v0.0.0-20181121200506-bf2b5ad3c0a9/go.mod h1:Js0mqiSBE6Ffsg94weZZ2c+v/ciT8QRHFOap7EKDrR0= +github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368/go.mod h1:Wbbw6tYNvwa5dlB6304Sd+82Z3f7PmVZHVKU637d4po= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jsternberg/zap-logfmt v1.0.0/go.mod h1:uvPs/4X51zdkcm5jXl5SYoN+4RK21K8mysFmDaM/h+o= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= +github.com/jwilder/encoding v0.0.0-20170811194829-b4e1701a28ef/go.mod h1:Ct9fl0F6iIOGgxJ5npU/IUOhOhqlVrGjyIZc8/MagT0= +github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= +github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.10.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.6 h1:EgWPCW6O3n1D5n99Zq3xXBt9uCwRGvpwGOusOLNBRSQ= github.com/klauspost/compress v1.11.6/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg= +github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= +github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= +github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= +github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= +github.com/mailru/easyjson v0.7.1/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= +github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= +github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE= +github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= +github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= +github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.7 h1:Ei8KR0497xHyKJPAv59M1dkC+rOZCMBJ+t3fZ+twI54= +github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE= +github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= +github.com/miekg/dns v1.1.35/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= +github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.3.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg= +github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= +github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= +github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= +github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= +github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= +github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= +github.com/opentracing-contrib/go-stdlib v1.0.0/go.mod h1:qtI1ogk+2JhVPIXVc6q+NHziSmy2W5GbdQZFUHADCBU= +github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= +github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.0.3-0.20180606204148-bd9c31933947/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= +github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA= +github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= +github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= +github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= +github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/paulbellamy/ratecounter v0.2.0/go.mod h1:Hfx1hDpSGoqxkVVpBi/IlYD7kChlfo5C6hzIHwPqfFE= +github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= +github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo= +github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= +github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= +github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/peterh/liner v1.0.1-0.20180619022028-8c1271fcf47f/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc= +github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= +github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= +github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= +github.com/pkg/term v0.0.0-20180730021639-bffc007b7fd5/go.mod h1:eCbImbZ95eXtAUIbLAuAVnBnwf83mjf6QIVH8SHYwqQ= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= +github.com/prometheus/alertmanager v0.21.0/go.mod h1:h7tJ81NA0VLWvWEayi1QltevFkLF3KxmC/malTcT8Go= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= +github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.6.0/go.mod h1:ZLOG9ck3JLRdB5MgO8f+lLTe83AXG6ro35rLTxvnIl4= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.8.0 h1:zvJNkoCFAnYFNC24FV8nW4JdRJ3GIFcLbg65lL/JDcw= +github.com/prometheus/client_golang v1.8.0/go.mod h1:O9VU6huf47PktckDQfMTX0Y8tY0/7TSWwj+ITvv0TnM= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= +github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.14.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= +github.com/prometheus/common v0.15.0 h1:4fgOnadei3EZvgRwxJ7RMpG1k1pOZth5Pc13tyspaKM= +github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.2.0 h1:wH4vA7pcjKuZzjF7lM8awk4fnuJO6idemZXoKnULUx4= +github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/prometheus v1.8.2-0.20201119142752-3ad25a6dc3d9 h1:F2A86PGVYqn3P7oWbrSmSlJHae9y6wwpAdoWb/pZi6Q= +github.com/prometheus/prometheus v1.8.2-0.20201119142752-3ad25a6dc3d9/go.mod h1:1MDE/bXgu4gqd5w/otko6WQpXZX9vu8QX4KbitCmaPg= +github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1RftBQPUCDRw6SmxeaREsAaRKnOclghuzp/WRzc= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= +github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= +github.com/samuel/go-zookeeper v0.0.0-20200724154423-2164a8ac840e/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= +github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= +github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= +github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= +github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +github.com/uber/jaeger-client-go v2.25.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= +github.com/uber/jaeger-lib v2.4.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.22.1 h1:+mkCCcOFKPnCmVYVcURKps1Xe+3zP90gSYGNfRkjoIY= +github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli/v2 v2.3.0 h1:qph92Y649prgesehzOrQjdWyxFOp/QVM+6imKHad91M= +github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI= github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= github.com/valyala/fasthttp v1.16.0/go.mod h1:YOKImeEosDdBPnxc0gy7INqi3m1zK6A+xl6TwOBhHCA= @@ -183,10 +753,27 @@ github.com/valyala/histogram v1.1.2/go.mod h1:CZAr6gK9dbD7hYx2s8WSPh0p5x5wETjC+2 github.com/valyala/quicktemplate v1.6.3 h1:O7EuMwuH7Q94U2CXD6sOX8AYHqQqWtmIk690IhmpkKA= github.com/valyala/quicktemplate v1.6.3/go.mod h1:fwPzK2fHuYEODzJ9pkw0ipCPNHZ2tD5KW4lOuSdPKzY= github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= +github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= +github.com/willf/bitset v1.1.3/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= +github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= +github.com/xdg/stringprep v0.0.0-20180714160509-73f8eece6fdc/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= +github.com/xlab/treeprint v1.0.0/go.mod h1:IoImgRak9i3zJyuxOKUP1v4UZd1tMoKkq/Cimt1uhCg= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= +go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.mongodb.org/mongo-driver v1.3.0/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE= +go.mongodb.org/mongo-driver v1.3.4/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE= +go.mongodb.org/mongo-driver v1.4.3/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= +go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -194,12 +781,39 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5 h1:dntmOdLpSpHlVqbW5Eay97DelsZHe+55D+xC6i0dDS0= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/goleak v1.1.10 h1:z+mqJhf6ss6BSfSM671tgKyZBFPTTJM+HLxnhPC3wu0= +go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191202143827-86a70503ff7e/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= @@ -209,6 +823,7 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -235,16 +850,28 @@ golang.org/x/mod v0.4.0 h1:8pl+sMODzuvGJkmj2W4kZihvVb5mKm8pB/X44PIQHv8= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -280,40 +907,73 @@ golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 h1:SQFwaSi55rU7vdNs9Yr0Z324VNlrF+0wMqRXT4St8ck= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191128015809-6d18c012aee9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200107162124-548cf772de50/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200828194041-157a740278f4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210113000019-eaf3bda374d2 h1:F9vNgpIiamoF+Q1/c78bikg/NScXEtbZSNEpnRelOzs= @@ -327,31 +987,54 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5 h1:i6eZZ+zk0SOf0xgBpEpPD18qWcJda6q1sxt3S0kzyUQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200108203644-89082a384178/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= @@ -365,6 +1048,7 @@ golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjs golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200513201620-d5fe73897c97/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= @@ -375,6 +1059,7 @@ golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82u golang.org/x/tools v0.0.0-20200915173823-2db8f0ff891c/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= golang.org/x/tools v0.0.0-20200918232735-d647fc253266/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201119054027-25dc3e1ccc3c/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210112230658-8b4aab62c064 h1:BmCFkEH4nJrYcAc2L08yX5RhYGD4j58PTMkEUDkpz2I= @@ -384,6 +1069,13 @@ golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= +gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= +gonum.org/v1/gonum v0.6.0/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU= +gonum.org/v1/netlib v0.0.0-20181029234149-ec6d1f5cefe6/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= +gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= +gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= +google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -406,6 +1098,7 @@ google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ google.golang.org/api v0.36.0 h1:l2Nfbl2GPXdWorv+dT2XfinX2jOOw4zv1VhLstx+6rE= google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= @@ -418,6 +1111,8 @@ google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRn google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= +google.golang.org/genproto v0.0.0-20190716160619-c506a9f90610/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= @@ -425,6 +1120,7 @@ google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvx google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200108215221-bd8f9a0ef82f/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= @@ -436,6 +1132,7 @@ google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= @@ -451,10 +1148,15 @@ google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210111234610-22ae2b108f89 h1:R2owLnwrU3BdTJ5R9cnHDNsnEmBQ7n5lZjKShnbISe4= google.golang.org/genproto v0.0.0-20210111234610-22ae2b108f89/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= @@ -465,6 +1167,7 @@ google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.32.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= google.golang.org/grpc v1.34.1 h1:ugq+9++ZQPFzM2pKUMCIK8gj9M0pFyuUWO9Q8kwEDQw= @@ -480,14 +1183,37 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/fsnotify/fsnotify.v1 v1.4.7/go.mod h1:Fyux9zXlo4rWoMSIzpn9fDAYjalPqJ/K1qJ27s+7ltE= +gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 h1:tQIYjPdBoyREyB9XMu+nnTclpTYkz2zFM+lzLJFO4gQ= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -495,6 +1221,21 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +k8s.io/api v0.19.4/go.mod h1:SbtJ2aHCItirzdJ36YslycFNzWADYH3tgOhvBEFtZAk= +k8s.io/apimachinery v0.19.4/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= +k8s.io/client-go v0.19.4/go.mod h1:ZrEy7+wj9PjH5VMBCuu/BDlvtUAku0oVFk4MmnW9mWA= +k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= +k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= +k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= +k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= +sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= diff --git a/vendor/github.com/VividCortex/ewma/.gitignore b/vendor/github.com/VividCortex/ewma/.gitignore new file mode 100644 index 000000000..6c7104aef --- /dev/null +++ b/vendor/github.com/VividCortex/ewma/.gitignore @@ -0,0 +1,2 @@ +.DS_Store +.*.sw? diff --git a/vendor/github.com/VividCortex/ewma/LICENSE b/vendor/github.com/VividCortex/ewma/LICENSE new file mode 100644 index 000000000..a78d643ed --- /dev/null +++ b/vendor/github.com/VividCortex/ewma/LICENSE @@ -0,0 +1,21 @@ +The MIT License + +Copyright (c) 2013 VividCortex + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/VividCortex/ewma/README.md b/vendor/github.com/VividCortex/ewma/README.md new file mode 100644 index 000000000..7aab61b87 --- /dev/null +++ b/vendor/github.com/VividCortex/ewma/README.md @@ -0,0 +1,140 @@ +# EWMA [![GoDoc](https://godoc.org/github.com/VividCortex/ewma?status.svg)](https://godoc.org/github.com/VividCortex/ewma) ![Build Status](https://circleci.com/gh/VividCortex/moving_average.png?circle-token=1459fa37f9ca0e50cef05d1963146d96d47ea523) + +This repo provides Exponentially Weighted Moving Average algorithms, or EWMAs for short, [based on our +Quantifying Abnormal Behavior talk](https://vividcortex.com/blog/2013/07/23/a-fast-go-library-for-exponential-moving-averages/). + +### Exponentially Weighted Moving Average + +An exponentially weighted moving average is a way to continuously compute a type of +average for a series of numbers, as the numbers arrive. After a value in the series is +added to the average, its weight in the average decreases exponentially over time. This +biases the average towards more recent data. EWMAs are useful for several reasons, chiefly +their inexpensive computational and memory cost, as well as the fact that they represent +the recent central tendency of the series of values. + +The EWMA algorithm requires a decay factor, alpha. The larger the alpha, the more the average +is biased towards recent history. The alpha must be between 0 and 1, and is typically +a fairly small number, such as 0.04. We will discuss the choice of alpha later. + +The algorithm works thus, in pseudocode: + +1. Multiply the next number in the series by alpha. +2. Multiply the current value of the average by 1 minus alpha. +3. Add the result of steps 1 and 2, and store it as the new current value of the average. +4. Repeat for each number in the series. + +There are special-case behaviors for how to initialize the current value, and these vary +between implementations. One approach is to start with the first value in the series; +another is to average the first 10 or so values in the series using an arithmetic average, +and then begin the incremental updating of the average. Each method has pros and cons. + +It may help to look at it pictorially. Suppose the series has five numbers, and we choose +alpha to be 0.50 for simplicity. Here's the series, with numbers in the neighborhood of 300. + +![Data Series](https://user-images.githubusercontent.com/279875/28242350-463289a2-6977-11e7-88ca-fd778ccef1f0.png) + +Now let's take the moving average of those numbers. First we set the average to the value +of the first number. + +![EWMA Step 1](https://user-images.githubusercontent.com/279875/28242353-464c96bc-6977-11e7-9981-dc4e0789c7ba.png) + +Next we multiply the next number by alpha, multiply the current value by 1-alpha, and add +them to generate a new value. + +![EWMA Step 2](https://user-images.githubusercontent.com/279875/28242351-464abefa-6977-11e7-95d0-43900f29bef2.png) + +This continues until we are done. + +![EWMA Step N](https://user-images.githubusercontent.com/279875/28242352-464c58f0-6977-11e7-8cd0-e01e4efaac7f.png) + +Notice how each of the values in the series decays by half each time a new value +is added, and the top of the bars in the lower portion of the image represents the +size of the moving average. It is a smoothed, or low-pass, average of the original +series. + +For further reading, see [Exponentially weighted moving average](http://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average) on wikipedia. + +### Choosing Alpha + +Consider a fixed-size sliding-window moving average (not an exponentially weighted moving average) +that averages over the previous N samples. What is the average age of each sample? It is N/2. + +Now suppose that you wish to construct a EWMA whose samples have the same average age. The formula +to compute the alpha required for this is: alpha = 2/(N+1). Proof is in the book +"Production and Operations Analysis" by Steven Nahmias. + +So, for example, if you have a time-series with samples once per second, and you want to get the +moving average over the previous minute, you should use an alpha of .032786885. This, by the way, +is the constant alpha used for this repository's SimpleEWMA. + +### Implementations + +This repository contains two implementations of the EWMA algorithm, with different properties. + +The implementations all conform to the MovingAverage interface, and the constructor returns +that type. + +Current implementations assume an implicit time interval of 1.0 between every sample added. +That is, the passage of time is treated as though it's the same as the arrival of samples. +If you need time-based decay when samples are not arriving precisely at set intervals, then +this package will not support your needs at present. + +#### SimpleEWMA + +A SimpleEWMA is designed for low CPU and memory consumption. It **will** have different behavior than the VariableEWMA +for multiple reasons. It has no warm-up period and it uses a constant +decay. These properties let it use less memory. It will also behave +differently when it's equal to zero, which is assumed to mean +uninitialized, so if a value is likely to actually become zero over time, +then any non-zero value will cause a sharp jump instead of a small change. + +#### VariableEWMA + +Unlike SimpleEWMA, this supports a custom age which must be stored, and thus uses more memory. +It also has a "warmup" time when you start adding values to it. It will report a value of 0.0 +until you have added the required number of samples to it. It uses some memory to store the +number of samples added to it. As a result it uses a little over twice the memory of SimpleEWMA. + +## Usage + +### API Documentation + +View the GoDoc generated documentation [here](http://godoc.org/github.com/VividCortex/ewma). + +```go +package main +import "github.com/VividCortex/ewma" + +func main() { + samples := [100]float64{ + 4599, 5711, 4746, 4621, 5037, 4218, 4925, 4281, 5207, 5203, 5594, 5149, + } + + e := ewma.NewMovingAverage() //=> Returns a SimpleEWMA if called without params + a := ewma.NewMovingAverage(5) //=> returns a VariableEWMA with a decay of 2 / (5 + 1) + + for _, f := range samples { + e.Add(f) + a.Add(f) + } + + e.Value() //=> 13.577404704631077 + a.Value() //=> 1.5806140565521463e-12 +} +``` + +## Contributing + +We only accept pull requests for minor fixes or improvements. This includes: + +* Small bug fixes +* Typos +* Documentation or comments + +Please open issues to discuss new features. Pull requests for new features will be rejected, +so we recommend forking the repository and making changes in your fork for your use case. + +## License + +This repository is Copyright (c) 2013 VividCortex, Inc. All rights reserved. +It is licensed under the MIT license. Please see the LICENSE file for applicable license terms. diff --git a/vendor/github.com/VividCortex/ewma/ewma.go b/vendor/github.com/VividCortex/ewma/ewma.go new file mode 100644 index 000000000..44d5d53e3 --- /dev/null +++ b/vendor/github.com/VividCortex/ewma/ewma.go @@ -0,0 +1,126 @@ +// Package ewma implements exponentially weighted moving averages. +package ewma + +// Copyright (c) 2013 VividCortex, Inc. All rights reserved. +// Please see the LICENSE file for applicable license terms. + +const ( + // By default, we average over a one-minute period, which means the average + // age of the metrics in the period is 30 seconds. + AVG_METRIC_AGE float64 = 30.0 + + // The formula for computing the decay factor from the average age comes + // from "Production and Operations Analysis" by Steven Nahmias. + DECAY float64 = 2 / (float64(AVG_METRIC_AGE) + 1) + + // For best results, the moving average should not be initialized to the + // samples it sees immediately. The book "Production and Operations + // Analysis" by Steven Nahmias suggests initializing the moving average to + // the mean of the first 10 samples. Until the VariableEwma has seen this + // many samples, it is not "ready" to be queried for the value of the + // moving average. This adds some memory cost. + WARMUP_SAMPLES uint8 = 10 +) + +// MovingAverage is the interface that computes a moving average over a time- +// series stream of numbers. The average may be over a window or exponentially +// decaying. +type MovingAverage interface { + Add(float64) + Value() float64 + Set(float64) +} + +// NewMovingAverage constructs a MovingAverage that computes an average with the +// desired characteristics in the moving window or exponential decay. If no +// age is given, it constructs a default exponentially weighted implementation +// that consumes minimal memory. The age is related to the decay factor alpha +// by the formula given for the DECAY constant. It signifies the average age +// of the samples as time goes to infinity. +func NewMovingAverage(age ...float64) MovingAverage { + if len(age) == 0 || age[0] == AVG_METRIC_AGE { + return new(SimpleEWMA) + } + return &VariableEWMA{ + decay: 2 / (age[0] + 1), + } +} + +// A SimpleEWMA represents the exponentially weighted moving average of a +// series of numbers. It WILL have different behavior than the VariableEWMA +// for multiple reasons. It has no warm-up period and it uses a constant +// decay. These properties let it use less memory. It will also behave +// differently when it's equal to zero, which is assumed to mean +// uninitialized, so if a value is likely to actually become zero over time, +// then any non-zero value will cause a sharp jump instead of a small change. +// However, note that this takes a long time, and the value may just +// decays to a stable value that's close to zero, but which won't be mistaken +// for uninitialized. See http://play.golang.org/p/litxBDr_RC for example. +type SimpleEWMA struct { + // The current value of the average. After adding with Add(), this is + // updated to reflect the average of all values seen thus far. + value float64 +} + +// Add adds a value to the series and updates the moving average. +func (e *SimpleEWMA) Add(value float64) { + if e.value == 0 { // this is a proxy for "uninitialized" + e.value = value + } else { + e.value = (value * DECAY) + (e.value * (1 - DECAY)) + } +} + +// Value returns the current value of the moving average. +func (e *SimpleEWMA) Value() float64 { + return e.value +} + +// Set sets the EWMA's value. +func (e *SimpleEWMA) Set(value float64) { + e.value = value +} + +// VariableEWMA represents the exponentially weighted moving average of a series of +// numbers. Unlike SimpleEWMA, it supports a custom age, and thus uses more memory. +type VariableEWMA struct { + // The multiplier factor by which the previous samples decay. + decay float64 + // The current value of the average. + value float64 + // The number of samples added to this instance. + count uint8 +} + +// Add adds a value to the series and updates the moving average. +func (e *VariableEWMA) Add(value float64) { + switch { + case e.count < WARMUP_SAMPLES: + e.count++ + e.value += value + case e.count == WARMUP_SAMPLES: + e.count++ + e.value = e.value / float64(WARMUP_SAMPLES) + e.value = (value * e.decay) + (e.value * (1 - e.decay)) + default: + e.value = (value * e.decay) + (e.value * (1 - e.decay)) + } +} + +// Value returns the current value of the average, or 0.0 if the series hasn't +// warmed up yet. +func (e *VariableEWMA) Value() float64 { + if e.count <= WARMUP_SAMPLES { + return 0.0 + } + + return e.value +} + +// Set sets the EWMA's value. +func (e *VariableEWMA) Set(value float64) { + e.value = value + if e.count <= WARMUP_SAMPLES { + e.count = WARMUP_SAMPLES + 1 + } +} diff --git a/vendor/github.com/beorn7/perks/LICENSE b/vendor/github.com/beorn7/perks/LICENSE new file mode 100644 index 000000000..339177be6 --- /dev/null +++ b/vendor/github.com/beorn7/perks/LICENSE @@ -0,0 +1,20 @@ +Copyright (C) 2013 Blake Mizerany + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/beorn7/perks/quantile/exampledata.txt b/vendor/github.com/beorn7/perks/quantile/exampledata.txt new file mode 100644 index 000000000..1602287d7 --- /dev/null +++ b/vendor/github.com/beorn7/perks/quantile/exampledata.txt @@ -0,0 +1,2388 @@ +8 +5 +26 +12 +5 +235 +13 +6 +28 +30 +3 +3 +3 +3 +5 +2 +33 +7 +2 +4 +7 +12 +14 +5 +8 +3 +10 +4 +5 +3 +6 +6 +209 +20 +3 +10 +14 +3 +4 +6 +8 +5 +11 +7 +3 +2 +3 +3 +212 +5 +222 +4 +10 +10 +5 +6 +3 +8 +3 +10 +254 +220 +2 +3 +5 +24 +5 +4 +222 +7 +3 +3 +223 +8 +15 +12 +14 +14 +3 +2 +2 +3 +13 +3 +11 +4 +4 +6 +5 +7 +13 +5 +3 +5 +2 +5 +3 +5 +2 +7 +15 +17 +14 +3 +6 +6 +3 +17 +5 +4 +7 +6 +4 +4 +8 +6 +8 +3 +9 +3 +6 +3 +4 +5 +3 +3 +660 +4 +6 +10 +3 +6 +3 +2 +5 +13 +2 +4 +4 +10 +4 +8 +4 +3 +7 +9 +9 +3 +10 +37 +3 +13 +4 +12 +3 +6 +10 +8 +5 +21 +2 +3 +8 +3 +2 +3 +3 +4 +12 +2 +4 +8 +8 +4 +3 +2 +20 +1 +6 +32 +2 +11 +6 +18 +3 +8 +11 +3 +212 +3 +4 +2 +6 +7 +12 +11 +3 +2 +16 +10 +6 +4 +6 +3 +2 +7 +3 +2 +2 +2 +2 +5 +6 +4 +3 +10 +3 +4 +6 +5 +3 +4 +4 +5 +6 +4 +3 +4 +4 +5 +7 +5 +5 +3 +2 +7 +2 +4 +12 +4 +5 +6 +2 +4 +4 +8 +4 +15 +13 +7 +16 +5 +3 +23 +5 +5 +7 +3 +2 +9 +8 +7 +5 +8 +11 +4 +10 +76 +4 +47 +4 +3 +2 +7 +4 +2 +3 +37 +10 +4 +2 +20 +5 +4 +4 +10 +10 +4 +3 +7 +23 +240 +7 +13 +5 +5 +3 +3 +2 +5 +4 +2 +8 +7 +19 +2 +23 +8 +7 +2 +5 +3 +8 +3 +8 +13 +5 +5 +5 +2 +3 +23 +4 +9 +8 +4 +3 +3 +5 +220 +2 +3 +4 +6 +14 +3 +53 +6 +2 +5 +18 +6 +3 +219 +6 +5 +2 +5 +3 +6 +5 +15 +4 +3 +17 +3 +2 +4 +7 +2 +3 +3 +4 +4 +3 +2 +664 +6 +3 +23 +5 +5 +16 +5 +8 +2 +4 +2 +24 +12 +3 +2 +3 +5 +8 +3 +5 +4 +3 +14 +3 +5 +8 +2 +3 +7 +9 +4 +2 +3 +6 +8 +4 +3 +4 +6 +5 +3 +3 +6 +3 +19 +4 +4 +6 +3 +6 +3 +5 +22 +5 +4 +4 +3 +8 +11 +4 +9 +7 +6 +13 +4 +4 +4 +6 +17 +9 +3 +3 +3 +4 +3 +221 +5 +11 +3 +4 +2 +12 +6 +3 +5 +7 +5 +7 +4 +9 +7 +14 +37 +19 +217 +16 +3 +5 +2 +2 +7 +19 +7 +6 +7 +4 +24 +5 +11 +4 +7 +7 +9 +13 +3 +4 +3 +6 +28 +4 +4 +5 +5 +2 +5 +6 +4 +4 +6 +10 +5 +4 +3 +2 +3 +3 +6 +5 +5 +4 +3 +2 +3 +7 +4 +6 +18 +16 +8 +16 +4 +5 +8 +6 +9 +13 +1545 +6 +215 +6 +5 +6 +3 +45 +31 +5 +2 +2 +4 +3 +3 +2 +5 +4 +3 +5 +7 +7 +4 +5 +8 +5 +4 +749 +2 +31 +9 +11 +2 +11 +5 +4 +4 +7 +9 +11 +4 +5 +4 +7 +3 +4 +6 +2 +15 +3 +4 +3 +4 +3 +5 +2 +13 +5 +5 +3 +3 +23 +4 +4 +5 +7 +4 +13 +2 +4 +3 +4 +2 +6 +2 +7 +3 +5 +5 +3 +29 +5 +4 +4 +3 +10 +2 +3 +79 +16 +6 +6 +7 +7 +3 +5 +5 +7 +4 +3 +7 +9 +5 +6 +5 +9 +6 +3 +6 +4 +17 +2 +10 +9 +3 +6 +2 +3 +21 +22 +5 +11 +4 +2 +17 +2 +224 +2 +14 +3 +4 +4 +2 +4 +4 +4 +4 +5 +3 +4 +4 +10 +2 +6 +3 +3 +5 +7 +2 +7 +5 +6 +3 +218 +2 +2 +5 +2 +6 +3 +5 +222 +14 +6 +33 +3 +2 +5 +3 +3 +3 +9 +5 +3 +3 +2 +7 +4 +3 +4 +3 +5 +6 +5 +26 +4 +13 +9 +7 +3 +221 +3 +3 +4 +4 +4 +4 +2 +18 +5 +3 +7 +9 +6 +8 +3 +10 +3 +11 +9 +5 +4 +17 +5 +5 +6 +6 +3 +2 +4 +12 +17 +6 +7 +218 +4 +2 +4 +10 +3 +5 +15 +3 +9 +4 +3 +3 +6 +29 +3 +3 +4 +5 +5 +3 +8 +5 +6 +6 +7 +5 +3 +5 +3 +29 +2 +31 +5 +15 +24 +16 +5 +207 +4 +3 +3 +2 +15 +4 +4 +13 +5 +5 +4 +6 +10 +2 +7 +8 +4 +6 +20 +5 +3 +4 +3 +12 +12 +5 +17 +7 +3 +3 +3 +6 +10 +3 +5 +25 +80 +4 +9 +3 +2 +11 +3 +3 +2 +3 +8 +7 +5 +5 +19 +5 +3 +3 +12 +11 +2 +6 +5 +5 +5 +3 +3 +3 +4 +209 +14 +3 +2 +5 +19 +4 +4 +3 +4 +14 +5 +6 +4 +13 +9 +7 +4 +7 +10 +2 +9 +5 +7 +2 +8 +4 +6 +5 +5 +222 +8 +7 +12 +5 +216 +3 +4 +4 +6 +3 +14 +8 +7 +13 +4 +3 +3 +3 +3 +17 +5 +4 +3 +33 +6 +6 +33 +7 +5 +3 +8 +7 +5 +2 +9 +4 +2 +233 +24 +7 +4 +8 +10 +3 +4 +15 +2 +16 +3 +3 +13 +12 +7 +5 +4 +207 +4 +2 +4 +27 +15 +2 +5 +2 +25 +6 +5 +5 +6 +13 +6 +18 +6 +4 +12 +225 +10 +7 +5 +2 +2 +11 +4 +14 +21 +8 +10 +3 +5 +4 +232 +2 +5 +5 +3 +7 +17 +11 +6 +6 +23 +4 +6 +3 +5 +4 +2 +17 +3 +6 +5 +8 +3 +2 +2 +14 +9 +4 +4 +2 +5 +5 +3 +7 +6 +12 +6 +10 +3 +6 +2 +2 +19 +5 +4 +4 +9 +2 +4 +13 +3 +5 +6 +3 +6 +5 +4 +9 +6 +3 +5 +7 +3 +6 +6 +4 +3 +10 +6 +3 +221 +3 +5 +3 +6 +4 +8 +5 +3 +6 +4 +4 +2 +54 +5 +6 +11 +3 +3 +4 +4 +4 +3 +7 +3 +11 +11 +7 +10 +6 +13 +223 +213 +15 +231 +7 +3 +7 +228 +2 +3 +4 +4 +5 +6 +7 +4 +13 +3 +4 +5 +3 +6 +4 +6 +7 +2 +4 +3 +4 +3 +3 +6 +3 +7 +3 +5 +18 +5 +6 +8 +10 +3 +3 +3 +2 +4 +2 +4 +4 +5 +6 +6 +4 +10 +13 +3 +12 +5 +12 +16 +8 +4 +19 +11 +2 +4 +5 +6 +8 +5 +6 +4 +18 +10 +4 +2 +216 +6 +6 +6 +2 +4 +12 +8 +3 +11 +5 +6 +14 +5 +3 +13 +4 +5 +4 +5 +3 +28 +6 +3 +7 +219 +3 +9 +7 +3 +10 +6 +3 +4 +19 +5 +7 +11 +6 +15 +19 +4 +13 +11 +3 +7 +5 +10 +2 +8 +11 +2 +6 +4 +6 +24 +6 +3 +3 +3 +3 +6 +18 +4 +11 +4 +2 +5 +10 +8 +3 +9 +5 +3 +4 +5 +6 +2 +5 +7 +4 +4 +14 +6 +4 +4 +5 +5 +7 +2 +4 +3 +7 +3 +3 +6 +4 +5 +4 +4 +4 +3 +3 +3 +3 +8 +14 +2 +3 +5 +3 +2 +4 +5 +3 +7 +3 +3 +18 +3 +4 +4 +5 +7 +3 +3 +3 +13 +5 +4 +8 +211 +5 +5 +3 +5 +2 +5 +4 +2 +655 +6 +3 +5 +11 +2 +5 +3 +12 +9 +15 +11 +5 +12 +217 +2 +6 +17 +3 +3 +207 +5 +5 +4 +5 +9 +3 +2 +8 +5 +4 +3 +2 +5 +12 +4 +14 +5 +4 +2 +13 +5 +8 +4 +225 +4 +3 +4 +5 +4 +3 +3 +6 +23 +9 +2 +6 +7 +233 +4 +4 +6 +18 +3 +4 +6 +3 +4 +4 +2 +3 +7 +4 +13 +227 +4 +3 +5 +4 +2 +12 +9 +17 +3 +7 +14 +6 +4 +5 +21 +4 +8 +9 +2 +9 +25 +16 +3 +6 +4 +7 +8 +5 +2 +3 +5 +4 +3 +3 +5 +3 +3 +3 +2 +3 +19 +2 +4 +3 +4 +2 +3 +4 +4 +2 +4 +3 +3 +3 +2 +6 +3 +17 +5 +6 +4 +3 +13 +5 +3 +3 +3 +4 +9 +4 +2 +14 +12 +4 +5 +24 +4 +3 +37 +12 +11 +21 +3 +4 +3 +13 +4 +2 +3 +15 +4 +11 +4 +4 +3 +8 +3 +4 +4 +12 +8 +5 +3 +3 +4 +2 +220 +3 +5 +223 +3 +3 +3 +10 +3 +15 +4 +241 +9 +7 +3 +6 +6 +23 +4 +13 +7 +3 +4 +7 +4 +9 +3 +3 +4 +10 +5 +5 +1 +5 +24 +2 +4 +5 +5 +6 +14 +3 +8 +2 +3 +5 +13 +13 +3 +5 +2 +3 +15 +3 +4 +2 +10 +4 +4 +4 +5 +5 +3 +5 +3 +4 +7 +4 +27 +3 +6 +4 +15 +3 +5 +6 +6 +5 +4 +8 +3 +9 +2 +6 +3 +4 +3 +7 +4 +18 +3 +11 +3 +3 +8 +9 +7 +24 +3 +219 +7 +10 +4 +5 +9 +12 +2 +5 +4 +4 +4 +3 +3 +19 +5 +8 +16 +8 +6 +22 +3 +23 +3 +242 +9 +4 +3 +3 +5 +7 +3 +3 +5 +8 +3 +7 +5 +14 +8 +10 +3 +4 +3 +7 +4 +6 +7 +4 +10 +4 +3 +11 +3 +7 +10 +3 +13 +6 +8 +12 +10 +5 +7 +9 +3 +4 +7 +7 +10 +8 +30 +9 +19 +4 +3 +19 +15 +4 +13 +3 +215 +223 +4 +7 +4 +8 +17 +16 +3 +7 +6 +5 +5 +4 +12 +3 +7 +4 +4 +13 +4 +5 +2 +5 +6 +5 +6 +6 +7 +10 +18 +23 +9 +3 +3 +6 +5 +2 +4 +2 +7 +3 +3 +2 +5 +5 +14 +10 +224 +6 +3 +4 +3 +7 +5 +9 +3 +6 +4 +2 +5 +11 +4 +3 +3 +2 +8 +4 +7 +4 +10 +7 +3 +3 +18 +18 +17 +3 +3 +3 +4 +5 +3 +3 +4 +12 +7 +3 +11 +13 +5 +4 +7 +13 +5 +4 +11 +3 +12 +3 +6 +4 +4 +21 +4 +6 +9 +5 +3 +10 +8 +4 +6 +4 +4 +6 +5 +4 +8 +6 +4 +6 +4 +4 +5 +9 +6 +3 +4 +2 +9 +3 +18 +2 +4 +3 +13 +3 +6 +6 +8 +7 +9 +3 +2 +16 +3 +4 +6 +3 +2 +33 +22 +14 +4 +9 +12 +4 +5 +6 +3 +23 +9 +4 +3 +5 +5 +3 +4 +5 +3 +5 +3 +10 +4 +5 +5 +8 +4 +4 +6 +8 +5 +4 +3 +4 +6 +3 +3 +3 +5 +9 +12 +6 +5 +9 +3 +5 +3 +2 +2 +2 +18 +3 +2 +21 +2 +5 +4 +6 +4 +5 +10 +3 +9 +3 +2 +10 +7 +3 +6 +6 +4 +4 +8 +12 +7 +3 +7 +3 +3 +9 +3 +4 +5 +4 +4 +5 +5 +10 +15 +4 +4 +14 +6 +227 +3 +14 +5 +216 +22 +5 +4 +2 +2 +6 +3 +4 +2 +9 +9 +4 +3 +28 +13 +11 +4 +5 +3 +3 +2 +3 +3 +5 +3 +4 +3 +5 +23 +26 +3 +4 +5 +6 +4 +6 +3 +5 +5 +3 +4 +3 +2 +2 +2 +7 +14 +3 +6 +7 +17 +2 +2 +15 +14 +16 +4 +6 +7 +13 +6 +4 +5 +6 +16 +3 +3 +28 +3 +6 +15 +3 +9 +2 +4 +6 +3 +3 +22 +4 +12 +6 +7 +2 +5 +4 +10 +3 +16 +6 +9 +2 +5 +12 +7 +5 +5 +5 +5 +2 +11 +9 +17 +4 +3 +11 +7 +3 +5 +15 +4 +3 +4 +211 +8 +7 +5 +4 +7 +6 +7 +6 +3 +6 +5 +6 +5 +3 +4 +4 +26 +4 +6 +10 +4 +4 +3 +2 +3 +3 +4 +5 +9 +3 +9 +4 +4 +5 +5 +8 +2 +4 +2 +3 +8 +4 +11 +19 +5 +8 +6 +3 +5 +6 +12 +3 +2 +4 +16 +12 +3 +4 +4 +8 +6 +5 +6 +6 +219 +8 +222 +6 +16 +3 +13 +19 +5 +4 +3 +11 +6 +10 +4 +7 +7 +12 +5 +3 +3 +5 +6 +10 +3 +8 +2 +5 +4 +7 +2 +4 +4 +2 +12 +9 +6 +4 +2 +40 +2 +4 +10 +4 +223 +4 +2 +20 +6 +7 +24 +5 +4 +5 +2 +20 +16 +6 +5 +13 +2 +3 +3 +19 +3 +2 +4 +5 +6 +7 +11 +12 +5 +6 +7 +7 +3 +5 +3 +5 +3 +14 +3 +4 +4 +2 +11 +1 +7 +3 +9 +6 +11 +12 +5 +8 +6 +221 +4 +2 +12 +4 +3 +15 +4 +5 +226 +7 +218 +7 +5 +4 +5 +18 +4 +5 +9 +4 +4 +2 +9 +18 +18 +9 +5 +6 +6 +3 +3 +7 +3 +5 +4 +4 +4 +12 +3 +6 +31 +5 +4 +7 +3 +6 +5 +6 +5 +11 +2 +2 +11 +11 +6 +7 +5 +8 +7 +10 +5 +23 +7 +4 +3 +5 +34 +2 +5 +23 +7 +3 +6 +8 +4 +4 +4 +2 +5 +3 +8 +5 +4 +8 +25 +2 +3 +17 +8 +3 +4 +8 +7 +3 +15 +6 +5 +7 +21 +9 +5 +6 +6 +5 +3 +2 +3 +10 +3 +6 +3 +14 +7 +4 +4 +8 +7 +8 +2 +6 +12 +4 +213 +6 +5 +21 +8 +2 +5 +23 +3 +11 +2 +3 +6 +25 +2 +3 +6 +7 +6 +6 +4 +4 +6 +3 +17 +9 +7 +6 +4 +3 +10 +7 +2 +3 +3 +3 +11 +8 +3 +7 +6 +4 +14 +36 +3 +4 +3 +3 +22 +13 +21 +4 +2 +7 +4 +4 +17 +15 +3 +7 +11 +2 +4 +7 +6 +209 +6 +3 +2 +2 +24 +4 +9 +4 +3 +3 +3 +29 +2 +2 +4 +3 +3 +5 +4 +6 +3 +3 +2 +4 diff --git a/vendor/github.com/beorn7/perks/quantile/stream.go b/vendor/github.com/beorn7/perks/quantile/stream.go new file mode 100644 index 000000000..d7d14f8eb --- /dev/null +++ b/vendor/github.com/beorn7/perks/quantile/stream.go @@ -0,0 +1,316 @@ +// Package quantile computes approximate quantiles over an unbounded data +// stream within low memory and CPU bounds. +// +// A small amount of accuracy is traded to achieve the above properties. +// +// Multiple streams can be merged before calling Query to generate a single set +// of results. This is meaningful when the streams represent the same type of +// data. See Merge and Samples. +// +// For more detailed information about the algorithm used, see: +// +// Effective Computation of Biased Quantiles over Data Streams +// +// http://www.cs.rutgers.edu/~muthu/bquant.pdf +package quantile + +import ( + "math" + "sort" +) + +// Sample holds an observed value and meta information for compression. JSON +// tags have been added for convenience. +type Sample struct { + Value float64 `json:",string"` + Width float64 `json:",string"` + Delta float64 `json:",string"` +} + +// Samples represents a slice of samples. It implements sort.Interface. +type Samples []Sample + +func (a Samples) Len() int { return len(a) } +func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value } +func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +type invariant func(s *stream, r float64) float64 + +// NewLowBiased returns an initialized Stream for low-biased quantiles +// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but +// error guarantees can still be given even for the lower ranks of the data +// distribution. +// +// The provided epsilon is a relative error, i.e. the true quantile of a value +// returned by a query is guaranteed to be within (1±Epsilon)*Quantile. +// +// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error +// properties. +func NewLowBiased(epsilon float64) *Stream { + ƒ := func(s *stream, r float64) float64 { + return 2 * epsilon * r + } + return newStream(ƒ) +} + +// NewHighBiased returns an initialized Stream for high-biased quantiles +// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but +// error guarantees can still be given even for the higher ranks of the data +// distribution. +// +// The provided epsilon is a relative error, i.e. the true quantile of a value +// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile). +// +// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error +// properties. +func NewHighBiased(epsilon float64) *Stream { + ƒ := func(s *stream, r float64) float64 { + return 2 * epsilon * (s.n - r) + } + return newStream(ƒ) +} + +// NewTargeted returns an initialized Stream concerned with a particular set of +// quantile values that are supplied a priori. Knowing these a priori reduces +// space and computation time. The targets map maps the desired quantiles to +// their absolute errors, i.e. the true quantile of a value returned by a query +// is guaranteed to be within (Quantile±Epsilon). +// +// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties. +func NewTargeted(targetMap map[float64]float64) *Stream { + // Convert map to slice to avoid slow iterations on a map. + // ƒ is called on the hot path, so converting the map to a slice + // beforehand results in significant CPU savings. + targets := targetMapToSlice(targetMap) + + ƒ := func(s *stream, r float64) float64 { + var m = math.MaxFloat64 + var f float64 + for _, t := range targets { + if t.quantile*s.n <= r { + f = (2 * t.epsilon * r) / t.quantile + } else { + f = (2 * t.epsilon * (s.n - r)) / (1 - t.quantile) + } + if f < m { + m = f + } + } + return m + } + return newStream(ƒ) +} + +type target struct { + quantile float64 + epsilon float64 +} + +func targetMapToSlice(targetMap map[float64]float64) []target { + targets := make([]target, 0, len(targetMap)) + + for quantile, epsilon := range targetMap { + t := target{ + quantile: quantile, + epsilon: epsilon, + } + targets = append(targets, t) + } + + return targets +} + +// Stream computes quantiles for a stream of float64s. It is not thread-safe by +// design. Take care when using across multiple goroutines. +type Stream struct { + *stream + b Samples + sorted bool +} + +func newStream(ƒ invariant) *Stream { + x := &stream{ƒ: ƒ} + return &Stream{x, make(Samples, 0, 500), true} +} + +// Insert inserts v into the stream. +func (s *Stream) Insert(v float64) { + s.insert(Sample{Value: v, Width: 1}) +} + +func (s *Stream) insert(sample Sample) { + s.b = append(s.b, sample) + s.sorted = false + if len(s.b) == cap(s.b) { + s.flush() + } +} + +// Query returns the computed qth percentiles value. If s was created with +// NewTargeted, and q is not in the set of quantiles provided a priori, Query +// will return an unspecified result. +func (s *Stream) Query(q float64) float64 { + if !s.flushed() { + // Fast path when there hasn't been enough data for a flush; + // this also yields better accuracy for small sets of data. + l := len(s.b) + if l == 0 { + return 0 + } + i := int(math.Ceil(float64(l) * q)) + if i > 0 { + i -= 1 + } + s.maybeSort() + return s.b[i].Value + } + s.flush() + return s.stream.query(q) +} + +// Merge merges samples into the underlying streams samples. This is handy when +// merging multiple streams from separate threads, database shards, etc. +// +// ATTENTION: This method is broken and does not yield correct results. The +// underlying algorithm is not capable of merging streams correctly. +func (s *Stream) Merge(samples Samples) { + sort.Sort(samples) + s.stream.merge(samples) +} + +// Reset reinitializes and clears the list reusing the samples buffer memory. +func (s *Stream) Reset() { + s.stream.reset() + s.b = s.b[:0] +} + +// Samples returns stream samples held by s. +func (s *Stream) Samples() Samples { + if !s.flushed() { + return s.b + } + s.flush() + return s.stream.samples() +} + +// Count returns the total number of samples observed in the stream +// since initialization. +func (s *Stream) Count() int { + return len(s.b) + s.stream.count() +} + +func (s *Stream) flush() { + s.maybeSort() + s.stream.merge(s.b) + s.b = s.b[:0] +} + +func (s *Stream) maybeSort() { + if !s.sorted { + s.sorted = true + sort.Sort(s.b) + } +} + +func (s *Stream) flushed() bool { + return len(s.stream.l) > 0 +} + +type stream struct { + n float64 + l []Sample + ƒ invariant +} + +func (s *stream) reset() { + s.l = s.l[:0] + s.n = 0 +} + +func (s *stream) insert(v float64) { + s.merge(Samples{{v, 1, 0}}) +} + +func (s *stream) merge(samples Samples) { + // TODO(beorn7): This tries to merge not only individual samples, but + // whole summaries. The paper doesn't mention merging summaries at + // all. Unittests show that the merging is inaccurate. Find out how to + // do merges properly. + var r float64 + i := 0 + for _, sample := range samples { + for ; i < len(s.l); i++ { + c := s.l[i] + if c.Value > sample.Value { + // Insert at position i. + s.l = append(s.l, Sample{}) + copy(s.l[i+1:], s.l[i:]) + s.l[i] = Sample{ + sample.Value, + sample.Width, + math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1), + // TODO(beorn7): How to calculate delta correctly? + } + i++ + goto inserted + } + r += c.Width + } + s.l = append(s.l, Sample{sample.Value, sample.Width, 0}) + i++ + inserted: + s.n += sample.Width + r += sample.Width + } + s.compress() +} + +func (s *stream) count() int { + return int(s.n) +} + +func (s *stream) query(q float64) float64 { + t := math.Ceil(q * s.n) + t += math.Ceil(s.ƒ(s, t) / 2) + p := s.l[0] + var r float64 + for _, c := range s.l[1:] { + r += p.Width + if r+c.Width+c.Delta > t { + return p.Value + } + p = c + } + return p.Value +} + +func (s *stream) compress() { + if len(s.l) < 2 { + return + } + x := s.l[len(s.l)-1] + xi := len(s.l) - 1 + r := s.n - 1 - x.Width + + for i := len(s.l) - 2; i >= 0; i-- { + c := s.l[i] + if c.Width+x.Width+x.Delta <= s.ƒ(s, r) { + x.Width += c.Width + s.l[xi] = x + // Remove element at i. + copy(s.l[i:], s.l[i+1:]) + s.l = s.l[:len(s.l)-1] + xi -= 1 + } else { + x = c + xi = i + } + r -= c.Width + } +} + +func (s *stream) samples() Samples { + samples := make(Samples, len(s.l)) + copy(samples, s.l) + return samples +} diff --git a/vendor/github.com/cheggaaa/pb/v3/LICENSE b/vendor/github.com/cheggaaa/pb/v3/LICENSE new file mode 100644 index 000000000..511970333 --- /dev/null +++ b/vendor/github.com/cheggaaa/pb/v3/LICENSE @@ -0,0 +1,12 @@ +Copyright (c) 2012-2015, Sergey Cherepanov +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + +* Neither the name of the author nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/github.com/cheggaaa/pb/v3/element.go b/vendor/github.com/cheggaaa/pb/v3/element.go new file mode 100644 index 000000000..965183fe7 --- /dev/null +++ b/vendor/github.com/cheggaaa/pb/v3/element.go @@ -0,0 +1,290 @@ +package pb + +import ( + "bytes" + "fmt" + "math" + "sync" + "time" +) + +const ( + adElPlaceholder = "%_ad_el_%" + adElPlaceholderLen = len(adElPlaceholder) +) + +var ( + defaultBarEls = [5]string{"[", "-", ">", "_", "]"} +) + +// Element is an interface for bar elements +type Element interface { + ProgressElement(state *State, args ...string) string +} + +// ElementFunc type implements Element interface and created for simplify elements +type ElementFunc func(state *State, args ...string) string + +// ProgressElement just call self func +func (e ElementFunc) ProgressElement(state *State, args ...string) string { + return e(state, args...) +} + +var elementsM sync.Mutex + +var elements = map[string]Element{ + "percent": ElementPercent, + "counters": ElementCounters, + "bar": adaptiveWrap(ElementBar), + "speed": ElementSpeed, + "rtime": ElementRemainingTime, + "etime": ElementElapsedTime, + "string": ElementString, + "cycle": ElementCycle, +} + +// RegisterElement give you a chance to use custom elements +func RegisterElement(name string, el Element, adaptive bool) { + if adaptive { + el = adaptiveWrap(el) + } + elementsM.Lock() + elements[name] = el + elementsM.Unlock() +} + +type argsHelper []string + +func (args argsHelper) getOr(n int, value string) string { + if len(args) > n { + return args[n] + } + return value +} + +func (args argsHelper) getNotEmptyOr(n int, value string) (v string) { + if v = args.getOr(n, value); v == "" { + return value + } + return +} + +func adaptiveWrap(el Element) Element { + return ElementFunc(func(state *State, args ...string) string { + state.recalc = append(state.recalc, ElementFunc(func(s *State, _ ...string) (result string) { + s.adaptive = true + result = el.ProgressElement(s, args...) + s.adaptive = false + return + })) + return adElPlaceholder + }) +} + +// ElementPercent shows current percent of progress. +// Optionally can take one or two string arguments. +// First string will be used as value for format float64, default is "%.02f%%". +// Second string will be used when percent can't be calculated, default is "?%" +// In template use as follows: {{percent .}} or {{percent . "%.03f%%"}} or {{percent . "%.03f%%" "?"}} +var ElementPercent ElementFunc = func(state *State, args ...string) string { + argsh := argsHelper(args) + if state.Total() > 0 { + return fmt.Sprintf( + argsh.getNotEmptyOr(0, "%.02f%%"), + float64(state.Value())/(float64(state.Total())/float64(100)), + ) + } + return argsh.getOr(1, "?%") +} + +// ElementCounters shows current and total values. +// Optionally can take one or two string arguments. +// First string will be used as format value when Total is present (>0). Default is "%s / %s" +// Second string will be used when total <= 0. Default is "%[1]s" +// In template use as follows: {{counters .}} or {{counters . "%s/%s"}} or {{counters . "%s/%s" "%s/?"}} +var ElementCounters ElementFunc = func(state *State, args ...string) string { + var f string + if state.Total() > 0 { + f = argsHelper(args).getNotEmptyOr(0, "%s / %s") + } else { + f = argsHelper(args).getNotEmptyOr(1, "%[1]s") + } + return fmt.Sprintf(f, state.Format(state.Value()), state.Format(state.Total())) +} + +type elementKey int + +const ( + barObj elementKey = iota + speedObj + cycleObj +) + +type bar struct { + eb [5][]byte // elements in bytes + cc [5]int // cell counts + buf *bytes.Buffer +} + +func (p *bar) write(state *State, eln, width int) int { + repeat := width / p.cc[eln] + for i := 0; i < repeat; i++ { + p.buf.Write(p.eb[eln]) + } + StripStringToBuffer(string(p.eb[eln]), width%p.cc[eln], p.buf) + return width +} + +func getProgressObj(state *State, args ...string) (p *bar) { + var ok bool + if p, ok = state.Get(barObj).(*bar); !ok { + p = &bar{ + buf: bytes.NewBuffer(nil), + } + state.Set(barObj, p) + } + argsH := argsHelper(args) + for i := range p.eb { + arg := argsH.getNotEmptyOr(i, defaultBarEls[i]) + if string(p.eb[i]) != arg { + p.cc[i] = CellCount(arg) + p.eb[i] = []byte(arg) + if p.cc[i] == 0 { + p.cc[i] = 1 + p.eb[i] = []byte(" ") + } + } + } + return +} + +// ElementBar make progress bar view [-->__] +// Optionally can take up to 5 string arguments. Defaults is "[", "-", ">", "_", "]" +// In template use as follows: {{bar . }} or {{bar . "<" "oOo" "|" "~" ">"}} +// Color args: {{bar . (red "[") (green "-") ... +var ElementBar ElementFunc = func(state *State, args ...string) string { + // init + var p = getProgressObj(state, args...) + + total, value := state.Total(), state.Value() + if total < 0 { + total = -total + } + if value < 0 { + value = -value + } + + // check for overflow + if total != 0 && value > total { + total = value + } + + p.buf.Reset() + + var widthLeft = state.AdaptiveElWidth() + if widthLeft <= 0 || !state.IsAdaptiveWidth() { + widthLeft = 30 + } + + // write left border + if p.cc[0] < widthLeft { + widthLeft -= p.write(state, 0, p.cc[0]) + } else { + p.write(state, 0, widthLeft) + return p.buf.String() + } + + // check right border size + if p.cc[4] < widthLeft { + // write later + widthLeft -= p.cc[4] + } else { + p.write(state, 4, widthLeft) + return p.buf.String() + } + + var curCount int + + if total > 0 { + // calculate count of currenct space + curCount = int(math.Ceil((float64(value) / float64(total)) * float64(widthLeft))) + } + + // write bar + if total == value && state.IsFinished() { + widthLeft -= p.write(state, 1, curCount) + } else if toWrite := curCount - p.cc[2]; toWrite > 0 { + widthLeft -= p.write(state, 1, toWrite) + widthLeft -= p.write(state, 2, p.cc[2]) + } else if curCount > 0 { + widthLeft -= p.write(state, 2, curCount) + } + if widthLeft > 0 { + widthLeft -= p.write(state, 3, widthLeft) + } + // write right border + p.write(state, 4, p.cc[4]) + // cut result and return string + return p.buf.String() +} + +// ElementRemainingTime calculates remaining time based on speed (EWMA) +// Optionally can take one or two string arguments. +// First string will be used as value for format time duration string, default is "%s". +// Second string will be used when bar finished and value indicates elapsed time, default is "%s" +// Third string will be used when value not available, default is "?" +// In template use as follows: {{rtime .}} or {{rtime . "%s remain"}} or {{rtime . "%s remain" "%s total" "???"}} +var ElementRemainingTime ElementFunc = func(state *State, args ...string) string { + var rts string + sp := getSpeedObj(state).value(state) + if !state.IsFinished() { + if sp > 0 { + remain := float64(state.Total() - state.Value()) + remainDur := time.Duration(remain/sp) * time.Second + rts = remainDur.String() + } else { + return argsHelper(args).getOr(2, "?") + } + } else { + rts = state.Time().Truncate(time.Second).Sub(state.StartTime().Truncate(time.Second)).String() + return fmt.Sprintf(argsHelper(args).getOr(1, "%s"), rts) + } + return fmt.Sprintf(argsHelper(args).getOr(0, "%s"), rts) +} + +// ElementElapsedTime shows elapsed time +// Optionally cat take one argument - it's format for time string. +// In template use as follows: {{etime .}} or {{etime . "%s elapsed"}} +var ElementElapsedTime ElementFunc = func(state *State, args ...string) string { + etm := state.Time().Truncate(time.Second).Sub(state.StartTime().Truncate(time.Second)) + return fmt.Sprintf(argsHelper(args).getOr(0, "%s"), etm.String()) +} + +// ElementString get value from bar by given key and print them +// bar.Set("myKey", "string to print") +// In template use as follows: {{string . "myKey"}} +var ElementString ElementFunc = func(state *State, args ...string) string { + if len(args) == 0 { + return "" + } + v := state.Get(args[0]) + if v == nil { + return "" + } + return fmt.Sprint(v) +} + +// ElementCycle return next argument for every call +// In template use as follows: {{cycle . "1" "2" "3"}} +// Or mix width other elements: {{ bar . "" "" (cycle . "↖" "↗" "↘" "↙" )}} +var ElementCycle ElementFunc = func(state *State, args ...string) string { + if len(args) == 0 { + return "" + } + n, _ := state.Get(cycleObj).(int) + if n >= len(args) { + n = 0 + } + state.Set(cycleObj, n+1) + return args[n] +} diff --git a/vendor/github.com/cheggaaa/pb/v3/go.mod b/vendor/github.com/cheggaaa/pb/v3/go.mod new file mode 100644 index 000000000..666c86bc6 --- /dev/null +++ b/vendor/github.com/cheggaaa/pb/v3/go.mod @@ -0,0 +1,11 @@ +module github.com/cheggaaa/pb/v3 + +require ( + github.com/VividCortex/ewma v1.1.1 + github.com/fatih/color v1.7.0 + github.com/mattn/go-colorable v0.1.2 + github.com/mattn/go-isatty v0.0.12 + github.com/mattn/go-runewidth v0.0.7 +) + +go 1.12 diff --git a/vendor/github.com/cheggaaa/pb/v3/go.sum b/vendor/github.com/cheggaaa/pb/v3/go.sum new file mode 100644 index 000000000..71cb18331 --- /dev/null +++ b/vendor/github.com/cheggaaa/pb/v3/go.sum @@ -0,0 +1,21 @@ +github.com/VividCortex/ewma v1.1.1 h1:MnEK4VOv6n0RSY4vtRe3h11qjxL3+t0B8yOL8iMXdcM= +github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmxzcbUokwA= +github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU= +github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-isatty v0.0.8 h1:HLtExJ+uU2HOZ+wI0Tt5DtUDrx8yhUqDcp7fYERX4CE= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.10 h1:qxFzApOv4WsAL965uUPIsXzAKCZxN2p9UqdhFS4ZW10= +github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= +github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-runewidth v0.0.7 h1:Ei8KR0497xHyKJPAv59M1dkC+rOZCMBJ+t3fZ+twI54= +github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223 h1:DH4skfRX4EBpamg7iV4ZlCpblAHI6s6TDM39bFZumv8= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191128015809-6d18c012aee9 h1:ZBzSG/7F4eNKz2L3GE9o300RX0Az1Bw5HF7PDraD+qU= +golang.org/x/sys v0.0.0-20191128015809-6d18c012aee9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42 h1:vEOn+mP2zCOVzKckCZy6YsCtDblrpj/w7B9nxGNELpg= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/vendor/github.com/cheggaaa/pb/v3/io.go b/vendor/github.com/cheggaaa/pb/v3/io.go new file mode 100644 index 000000000..6ad5abc24 --- /dev/null +++ b/vendor/github.com/cheggaaa/pb/v3/io.go @@ -0,0 +1,49 @@ +package pb + +import ( + "io" +) + +// Reader it's a wrapper for given reader, but with progress handle +type Reader struct { + io.Reader + bar *ProgressBar +} + +// Read reads bytes from wrapped reader and add amount of bytes to progress bar +func (r *Reader) Read(p []byte) (n int, err error) { + n, err = r.Reader.Read(p) + r.bar.Add(n) + return +} + +// Close the wrapped reader when it implements io.Closer +func (r *Reader) Close() (err error) { + r.bar.Finish() + if closer, ok := r.Reader.(io.Closer); ok { + return closer.Close() + } + return +} + +// Writer it's a wrapper for given writer, but with progress handle +type Writer struct { + io.Writer + bar *ProgressBar +} + +// Write writes bytes to wrapped writer and add amount of bytes to progress bar +func (r *Writer) Write(p []byte) (n int, err error) { + n, err = r.Writer.Write(p) + r.bar.Add(n) + return +} + +// Close the wrapped reader when it implements io.Closer +func (r *Writer) Close() (err error) { + r.bar.Finish() + if closer, ok := r.Writer.(io.Closer); ok { + return closer.Close() + } + return +} diff --git a/vendor/github.com/cheggaaa/pb/v3/pb.go b/vendor/github.com/cheggaaa/pb/v3/pb.go new file mode 100644 index 000000000..17f3750be --- /dev/null +++ b/vendor/github.com/cheggaaa/pb/v3/pb.go @@ -0,0 +1,566 @@ +package pb + +import ( + "bytes" + "fmt" + "io" + "os" + "strconv" + "strings" + "sync" + "sync/atomic" + "text/template" + "time" + + "github.com/mattn/go-colorable" + "github.com/mattn/go-isatty" + + "github.com/cheggaaa/pb/v3/termutil" +) + +// Version of ProgressBar library +const Version = "3.0.5" + +type key int + +const ( + // Bytes means we're working with byte sizes. Numbers will print as Kb, Mb, etc + // bar.Set(pb.Bytes, true) + Bytes key = 1 << iota + + // Use SI bytes prefix names (kB, MB, etc) instead of IEC prefix names (KiB, MiB, etc) + SIBytesPrefix + + // Terminal means we're will print to terminal and can use ascii sequences + // Also we're will try to use terminal width + Terminal + + // Static means progress bar will not update automaticly + Static + + // ReturnSymbol - by default in terminal mode it's '\r' + ReturnSymbol + + // Color by default is true when output is tty, but you can set to false for disabling colors + Color +) + +const ( + defaultBarWidth = 100 + defaultRefreshRate = time.Millisecond * 200 +) + +// New creates new ProgressBar object +func New(total int) *ProgressBar { + return New64(int64(total)) +} + +// New64 creates new ProgressBar object using int64 as total +func New64(total int64) *ProgressBar { + pb := new(ProgressBar) + return pb.SetTotal(total) +} + +// StartNew starts new ProgressBar with Default template +func StartNew(total int) *ProgressBar { + return New(total).Start() +} + +// Start64 starts new ProgressBar with Default template. Using int64 as total. +func Start64(total int64) *ProgressBar { + return New64(total).Start() +} + +var ( + terminalWidth = termutil.TerminalWidth + isTerminal = isatty.IsTerminal + isCygwinTerminal = isatty.IsCygwinTerminal +) + +// ProgressBar is the main object of bar +type ProgressBar struct { + current, total int64 + width int + maxWidth int + mu sync.RWMutex + rm sync.Mutex + vars map[interface{}]interface{} + elements map[string]Element + output io.Writer + coutput io.Writer + nocoutput io.Writer + startTime time.Time + refreshRate time.Duration + tmpl *template.Template + state *State + buf *bytes.Buffer + ticker *time.Ticker + finish chan struct{} + finished bool + configured bool + err error +} + +func (pb *ProgressBar) configure() { + if pb.configured { + return + } + pb.configured = true + + if pb.vars == nil { + pb.vars = make(map[interface{}]interface{}) + } + if pb.output == nil { + pb.output = os.Stderr + } + + if pb.tmpl == nil { + pb.tmpl, pb.err = getTemplate(string(Default)) + if pb.err != nil { + return + } + } + if pb.vars[Terminal] == nil { + if f, ok := pb.output.(*os.File); ok { + if isTerminal(f.Fd()) || isCygwinTerminal(f.Fd()) { + pb.vars[Terminal] = true + } + } + } + if pb.vars[ReturnSymbol] == nil { + if tm, ok := pb.vars[Terminal].(bool); ok && tm { + pb.vars[ReturnSymbol] = "\r" + } + } + if pb.vars[Color] == nil { + if tm, ok := pb.vars[Terminal].(bool); ok && tm { + pb.vars[Color] = true + } + } + if pb.refreshRate == 0 { + pb.refreshRate = defaultRefreshRate + } + if f, ok := pb.output.(*os.File); ok { + pb.coutput = colorable.NewColorable(f) + } else { + pb.coutput = pb.output + } + pb.nocoutput = colorable.NewNonColorable(pb.output) +} + +// Start starts the bar +func (pb *ProgressBar) Start() *ProgressBar { + pb.mu.Lock() + defer pb.mu.Unlock() + if pb.finish != nil { + return pb + } + pb.configure() + pb.finished = false + pb.state = nil + pb.startTime = time.Now() + if st, ok := pb.vars[Static].(bool); ok && st { + return pb + } + pb.finish = make(chan struct{}) + pb.ticker = time.NewTicker(pb.refreshRate) + go pb.writer(pb.finish) + return pb +} + +func (pb *ProgressBar) writer(finish chan struct{}) { + for { + select { + case <-pb.ticker.C: + pb.write(false) + case <-finish: + pb.ticker.Stop() + pb.write(true) + finish <- struct{}{} + return + } + } +} + +// Write performs write to the output +func (pb *ProgressBar) Write() *ProgressBar { + pb.mu.RLock() + finished := pb.finished + pb.mu.RUnlock() + pb.write(finished) + return pb +} + +func (pb *ProgressBar) write(finish bool) { + result, width := pb.render() + if pb.Err() != nil { + return + } + if pb.GetBool(Terminal) { + if r := (width - CellCount(result)); r > 0 { + result += strings.Repeat(" ", r) + } + } + if ret, ok := pb.Get(ReturnSymbol).(string); ok { + result = ret + result + if finish && ret == "\r" { + result += "\n" + } + } + if pb.GetBool(Color) { + pb.coutput.Write([]byte(result)) + } else { + pb.nocoutput.Write([]byte(result)) + } +} + +// Total return current total bar value +func (pb *ProgressBar) Total() int64 { + return atomic.LoadInt64(&pb.total) +} + +// SetTotal sets the total bar value +func (pb *ProgressBar) SetTotal(value int64) *ProgressBar { + atomic.StoreInt64(&pb.total, value) + return pb +} + +// SetCurrent sets the current bar value +func (pb *ProgressBar) SetCurrent(value int64) *ProgressBar { + atomic.StoreInt64(&pb.current, value) + return pb +} + +// Current return current bar value +func (pb *ProgressBar) Current() int64 { + return atomic.LoadInt64(&pb.current) +} + +// Add adding given int64 value to bar value +func (pb *ProgressBar) Add64(value int64) *ProgressBar { + atomic.AddInt64(&pb.current, value) + return pb +} + +// Add adding given int value to bar value +func (pb *ProgressBar) Add(value int) *ProgressBar { + return pb.Add64(int64(value)) +} + +// Increment atomically increments the progress +func (pb *ProgressBar) Increment() *ProgressBar { + return pb.Add64(1) +} + +// Set sets any value by any key +func (pb *ProgressBar) Set(key, value interface{}) *ProgressBar { + pb.mu.Lock() + defer pb.mu.Unlock() + if pb.vars == nil { + pb.vars = make(map[interface{}]interface{}) + } + pb.vars[key] = value + return pb +} + +// Get return value by key +func (pb *ProgressBar) Get(key interface{}) interface{} { + pb.mu.RLock() + defer pb.mu.RUnlock() + if pb.vars == nil { + return nil + } + return pb.vars[key] +} + +// GetBool return value by key and try to convert there to boolean +// If value doesn't set or not boolean - return false +func (pb *ProgressBar) GetBool(key interface{}) bool { + if v, ok := pb.Get(key).(bool); ok { + return v + } + return false +} + +// SetWidth sets the bar width +// When given value <= 0 would be using the terminal width (if possible) or default value. +func (pb *ProgressBar) SetWidth(width int) *ProgressBar { + pb.mu.Lock() + pb.width = width + pb.mu.Unlock() + return pb +} + +// SetMaxWidth sets the bar maximum width +// When given value <= 0 would be using the terminal width (if possible) or default value. +func (pb *ProgressBar) SetMaxWidth(maxWidth int) *ProgressBar { + pb.mu.Lock() + pb.maxWidth = maxWidth + pb.mu.Unlock() + return pb +} + +// Width return the bar width +// It's current terminal width or settled over 'SetWidth' value. +func (pb *ProgressBar) Width() (width int) { + defer func() { + if r := recover(); r != nil { + width = defaultBarWidth + } + }() + pb.mu.RLock() + width = pb.width + maxWidth := pb.maxWidth + pb.mu.RUnlock() + if width <= 0 { + var err error + if width, err = terminalWidth(); err != nil { + return defaultBarWidth + } + } + if maxWidth > 0 && width > maxWidth { + width = maxWidth + } + return +} + +func (pb *ProgressBar) SetRefreshRate(dur time.Duration) *ProgressBar { + pb.mu.Lock() + if dur > 0 { + pb.refreshRate = dur + } + pb.mu.Unlock() + return pb +} + +// SetWriter sets the io.Writer. Bar will write in this writer +// By default this is os.Stderr +func (pb *ProgressBar) SetWriter(w io.Writer) *ProgressBar { + pb.mu.Lock() + pb.output = w + pb.configured = false + pb.configure() + pb.mu.Unlock() + return pb +} + +// StartTime return the time when bar started +func (pb *ProgressBar) StartTime() time.Time { + pb.mu.RLock() + defer pb.mu.RUnlock() + return pb.startTime +} + +// Format convert int64 to string according to the current settings +func (pb *ProgressBar) Format(v int64) string { + if pb.GetBool(Bytes) { + return formatBytes(v, pb.GetBool(SIBytesPrefix)) + } + return strconv.FormatInt(v, 10) +} + +// Finish stops the bar +func (pb *ProgressBar) Finish() *ProgressBar { + pb.mu.Lock() + if pb.finished { + pb.mu.Unlock() + return pb + } + finishChan := pb.finish + pb.finished = true + pb.mu.Unlock() + if finishChan != nil { + finishChan <- struct{}{} + <-finishChan + pb.mu.Lock() + pb.finish = nil + pb.mu.Unlock() + } + return pb +} + +// IsStarted indicates progress bar state +func (pb *ProgressBar) IsStarted() bool { + pb.mu.RLock() + defer pb.mu.RUnlock() + return pb.finish != nil +} + +// SetTemplateString sets ProgressBar tempate string and parse it +func (pb *ProgressBar) SetTemplateString(tmpl string) *ProgressBar { + pb.mu.Lock() + defer pb.mu.Unlock() + pb.tmpl, pb.err = getTemplate(tmpl) + return pb +} + +// SetTemplateString sets ProgressBarTempate and parse it +func (pb *ProgressBar) SetTemplate(tmpl ProgressBarTemplate) *ProgressBar { + return pb.SetTemplateString(string(tmpl)) +} + +// NewProxyReader creates a wrapper for given reader, but with progress handle +// Takes io.Reader or io.ReadCloser +// Also, it automatically switches progress bar to handle units as bytes +func (pb *ProgressBar) NewProxyReader(r io.Reader) *Reader { + pb.Set(Bytes, true) + return &Reader{r, pb} +} + +// NewProxyWriter creates a wrapper for given writer, but with progress handle +// Takes io.Writer or io.WriteCloser +// Also, it automatically switches progress bar to handle units as bytes +func (pb *ProgressBar) NewProxyWriter(r io.Writer) *Writer { + pb.Set(Bytes, true) + return &Writer{r, pb} +} + +func (pb *ProgressBar) render() (result string, width int) { + defer func() { + if r := recover(); r != nil { + pb.SetErr(fmt.Errorf("render panic: %v", r)) + } + }() + pb.rm.Lock() + defer pb.rm.Unlock() + pb.mu.Lock() + pb.configure() + if pb.state == nil { + pb.state = &State{ProgressBar: pb} + pb.buf = bytes.NewBuffer(nil) + } + if pb.startTime.IsZero() { + pb.startTime = time.Now() + } + pb.state.id++ + pb.state.finished = pb.finished + pb.state.time = time.Now() + pb.mu.Unlock() + + pb.state.width = pb.Width() + width = pb.state.width + pb.state.total = pb.Total() + pb.state.current = pb.Current() + pb.buf.Reset() + + if e := pb.tmpl.Execute(pb.buf, pb.state); e != nil { + pb.SetErr(e) + return "", 0 + } + + result = pb.buf.String() + + aec := len(pb.state.recalc) + if aec == 0 { + // no adaptive elements + return + } + + staticWidth := CellCount(result) - (aec * adElPlaceholderLen) + + if pb.state.Width()-staticWidth <= 0 { + result = strings.Replace(result, adElPlaceholder, "", -1) + result = StripString(result, pb.state.Width()) + } else { + pb.state.adaptiveElWidth = (width - staticWidth) / aec + for _, el := range pb.state.recalc { + result = strings.Replace(result, adElPlaceholder, el.ProgressElement(pb.state), 1) + } + } + pb.state.recalc = pb.state.recalc[:0] + return +} + +// SetErr sets error to the ProgressBar +// Error will be available over Err() +func (pb *ProgressBar) SetErr(err error) *ProgressBar { + pb.mu.Lock() + pb.err = err + pb.mu.Unlock() + return pb +} + +// Err return possible error +// When all ok - will be nil +// May contain template.Execute errors +func (pb *ProgressBar) Err() error { + pb.mu.RLock() + defer pb.mu.RUnlock() + return pb.err +} + +// String return currrent string representation of ProgressBar +func (pb *ProgressBar) String() string { + res, _ := pb.render() + return res +} + +// ProgressElement implements Element interface +func (pb *ProgressBar) ProgressElement(s *State, args ...string) string { + if s.IsAdaptiveWidth() { + pb.SetWidth(s.AdaptiveElWidth()) + } + return pb.String() +} + +// State represents the current state of bar +// Need for bar elements +type State struct { + *ProgressBar + + id uint64 + total, current int64 + width, adaptiveElWidth int + finished, adaptive bool + time time.Time + + recalc []Element +} + +// Id it's the current state identifier +// - incremental +// - starts with 1 +// - resets after finish/start +func (s *State) Id() uint64 { + return s.id +} + +// Total it's bar int64 total +func (s *State) Total() int64 { + return s.total +} + +// Value it's current value +func (s *State) Value() int64 { + return s.current +} + +// Width of bar +func (s *State) Width() int { + return s.width +} + +// AdaptiveElWidth - adaptive elements must return string with given cell count (when AdaptiveElWidth > 0) +func (s *State) AdaptiveElWidth() int { + return s.adaptiveElWidth +} + +// IsAdaptiveWidth returns true when element must be shown as adaptive +func (s *State) IsAdaptiveWidth() bool { + return s.adaptive +} + +// IsFinished return true when bar is finished +func (s *State) IsFinished() bool { + return s.finished +} + +// IsFirst return true only in first render +func (s *State) IsFirst() bool { + return s.id == 1 +} + +// Time when state was created +func (s *State) Time() time.Time { + return s.time +} diff --git a/vendor/github.com/cheggaaa/pb/v3/preset.go b/vendor/github.com/cheggaaa/pb/v3/preset.go new file mode 100644 index 000000000..f5e2fff57 --- /dev/null +++ b/vendor/github.com/cheggaaa/pb/v3/preset.go @@ -0,0 +1,15 @@ +package pb + +var ( + // Full - preset with all default available elements + // Example: 'Prefix 20/100 [-->______] 20% 1 p/s ETA 1m Suffix' + Full ProgressBarTemplate = `{{string . "prefix"}}{{counters . }} {{bar . }} {{percent . }} {{speed . }} {{rtime . "ETA %s"}}{{string . "suffix"}}` + + // Default - preset like Full but without elapsed time + // Example: 'Prefix 20/100 [-->______] 20% 1 p/s ETA 1m Suffix' + Default ProgressBarTemplate = `{{string . "prefix"}}{{counters . }} {{bar . }} {{percent . }} {{speed . }}{{string . "suffix"}}` + + // Simple - preset without speed and any timers. Only counters, bar and percents + // Example: 'Prefix 20/100 [-->______] 20% Suffix' + Simple ProgressBarTemplate = `{{string . "prefix"}}{{counters . }} {{bar . }} {{percent . }}{{string . "suffix"}}` +) diff --git a/vendor/github.com/cheggaaa/pb/v3/speed.go b/vendor/github.com/cheggaaa/pb/v3/speed.go new file mode 100644 index 000000000..17a6b1bfa --- /dev/null +++ b/vendor/github.com/cheggaaa/pb/v3/speed.go @@ -0,0 +1,83 @@ +package pb + +import ( + "fmt" + "math" + "time" + + "github.com/VividCortex/ewma" +) + +var speedAddLimit = time.Second / 2 + +type speed struct { + ewma ewma.MovingAverage + lastStateId uint64 + prevValue, startValue int64 + prevTime, startTime time.Time +} + +func (s *speed) value(state *State) float64 { + if s.ewma == nil { + s.ewma = ewma.NewMovingAverage() + } + if state.IsFirst() || state.Id() < s.lastStateId { + s.reset(state) + return 0 + } + if state.Id() == s.lastStateId { + return s.ewma.Value() + } + if state.IsFinished() { + return s.absValue(state) + } + dur := state.Time().Sub(s.prevTime) + if dur < speedAddLimit { + return s.ewma.Value() + } + diff := math.Abs(float64(state.Value() - s.prevValue)) + lastSpeed := diff / dur.Seconds() + s.prevTime = state.Time() + s.prevValue = state.Value() + s.lastStateId = state.Id() + s.ewma.Add(lastSpeed) + return s.ewma.Value() +} + +func (s *speed) reset(state *State) { + s.lastStateId = state.Id() + s.startTime = state.Time() + s.prevTime = state.Time() + s.startValue = state.Value() + s.prevValue = state.Value() + s.ewma = ewma.NewMovingAverage() +} + +func (s *speed) absValue(state *State) float64 { + if dur := state.Time().Sub(s.startTime); dur > 0 { + return float64(state.Value()) / dur.Seconds() + } + return 0 +} + +func getSpeedObj(state *State) (s *speed) { + if sObj, ok := state.Get(speedObj).(*speed); ok { + return sObj + } + s = new(speed) + state.Set(speedObj, s) + return +} + +// ElementSpeed calculates current speed by EWMA +// Optionally can take one or two string arguments. +// First string will be used as value for format speed, default is "%s p/s". +// Second string will be used when speed not available, default is "? p/s" +// In template use as follows: {{speed .}} or {{speed . "%s per second"}} or {{speed . "%s ps" "..."} +var ElementSpeed ElementFunc = func(state *State, args ...string) string { + sp := getSpeedObj(state).value(state) + if sp == 0 { + return argsHelper(args).getNotEmptyOr(1, "? p/s") + } + return fmt.Sprintf(argsHelper(args).getNotEmptyOr(0, "%s p/s"), state.Format(int64(round(sp)))) +} diff --git a/vendor/github.com/cheggaaa/pb/v3/template.go b/vendor/github.com/cheggaaa/pb/v3/template.go new file mode 100644 index 000000000..ecfc27112 --- /dev/null +++ b/vendor/github.com/cheggaaa/pb/v3/template.go @@ -0,0 +1,88 @@ +package pb + +import ( + "math/rand" + "sync" + "text/template" + + "github.com/fatih/color" +) + +// ProgressBarTemplate that template string +type ProgressBarTemplate string + +// New creates new bar from template +func (pbt ProgressBarTemplate) New(total int) *ProgressBar { + return New(total).SetTemplate(pbt) +} + +// Start64 create and start new bar with given int64 total value +func (pbt ProgressBarTemplate) Start64(total int64) *ProgressBar { + return New64(total).SetTemplate(pbt).Start() +} + +// Start create and start new bar with given int total value +func (pbt ProgressBarTemplate) Start(total int) *ProgressBar { + return pbt.Start64(int64(total)) +} + +var templateCacheMu sync.Mutex +var templateCache = make(map[string]*template.Template) + +var defaultTemplateFuncs = template.FuncMap{ + // colors + "black": color.New(color.FgBlack).SprintFunc(), + "red": color.New(color.FgRed).SprintFunc(), + "green": color.New(color.FgGreen).SprintFunc(), + "yellow": color.New(color.FgYellow).SprintFunc(), + "blue": color.New(color.FgBlue).SprintFunc(), + "magenta": color.New(color.FgMagenta).SprintFunc(), + "cyan": color.New(color.FgCyan).SprintFunc(), + "white": color.New(color.FgWhite).SprintFunc(), + "resetcolor": color.New(color.Reset).SprintFunc(), + "rndcolor": rndcolor, + "rnd": rnd, +} + +func getTemplate(tmpl string) (t *template.Template, err error) { + templateCacheMu.Lock() + defer templateCacheMu.Unlock() + t = templateCache[tmpl] + if t != nil { + // found in cache + return + } + t = template.New("") + fillTemplateFuncs(t) + _, err = t.Parse(tmpl) + if err != nil { + t = nil + return + } + templateCache[tmpl] = t + return +} + +func fillTemplateFuncs(t *template.Template) { + t.Funcs(defaultTemplateFuncs) + emf := make(template.FuncMap) + elementsM.Lock() + for k, v := range elements { + emf[k] = v + } + elementsM.Unlock() + t.Funcs(emf) + return +} + +func rndcolor(s string) string { + c := rand.Intn(int(color.FgWhite-color.FgBlack)) + int(color.FgBlack) + return color.New(color.Attribute(c)).Sprint(s) +} + +func rnd(args ...string) string { + if len(args) == 0 { + return "" + } + return args[rand.Intn(len(args))] +} diff --git a/vendor/github.com/cheggaaa/pb/v3/termutil/term.go b/vendor/github.com/cheggaaa/pb/v3/termutil/term.go new file mode 100644 index 000000000..02b52797e --- /dev/null +++ b/vendor/github.com/cheggaaa/pb/v3/termutil/term.go @@ -0,0 +1,56 @@ +package termutil + +import ( + "errors" + "os" + "os/signal" + "sync" +) + +var echoLocked bool +var echoLockMutex sync.Mutex +var errLocked = errors.New("terminal locked") + +// RawModeOn switches terminal to raw mode +func RawModeOn() (quit chan struct{}, err error) { + echoLockMutex.Lock() + defer echoLockMutex.Unlock() + if echoLocked { + err = errLocked + return + } + if err = lockEcho(); err != nil { + return + } + echoLocked = true + quit = make(chan struct{}, 1) + go catchTerminate(quit) + return +} + +// RawModeOff restore previous terminal state +func RawModeOff() (err error) { + echoLockMutex.Lock() + defer echoLockMutex.Unlock() + if !echoLocked { + return + } + if err = unlockEcho(); err != nil { + return + } + echoLocked = false + return +} + +// listen exit signals and restore terminal state +func catchTerminate(quit chan struct{}) { + sig := make(chan os.Signal, 1) + signal.Notify(sig, unlockSignals...) + defer signal.Stop(sig) + select { + case <-quit: + RawModeOff() + case <-sig: + RawModeOff() + } +} diff --git a/vendor/github.com/cheggaaa/pb/v3/termutil/term_appengine.go b/vendor/github.com/cheggaaa/pb/v3/termutil/term_appengine.go new file mode 100644 index 000000000..4b7b20e6b --- /dev/null +++ b/vendor/github.com/cheggaaa/pb/v3/termutil/term_appengine.go @@ -0,0 +1,11 @@ +// +build appengine + +package termutil + +import "errors" + +// terminalWidth returns width of the terminal, which is not supported +// and should always failed on appengine classic which is a sandboxed PaaS. +func TerminalWidth() (int, error) { + return 0, errors.New("Not supported") +} diff --git a/vendor/github.com/cheggaaa/pb/v3/termutil/term_bsd.go b/vendor/github.com/cheggaaa/pb/v3/termutil/term_bsd.go new file mode 100644 index 000000000..272659a12 --- /dev/null +++ b/vendor/github.com/cheggaaa/pb/v3/termutil/term_bsd.go @@ -0,0 +1,9 @@ +// +build darwin freebsd netbsd openbsd dragonfly +// +build !appengine + +package termutil + +import "syscall" + +const ioctlReadTermios = syscall.TIOCGETA +const ioctlWriteTermios = syscall.TIOCSETA diff --git a/vendor/github.com/cheggaaa/pb/v3/termutil/term_linux.go b/vendor/github.com/cheggaaa/pb/v3/termutil/term_linux.go new file mode 100644 index 000000000..2f59e53e1 --- /dev/null +++ b/vendor/github.com/cheggaaa/pb/v3/termutil/term_linux.go @@ -0,0 +1,7 @@ +// +build linux +// +build !appengine + +package termutil + +const ioctlReadTermios = 0x5401 // syscall.TCGETS +const ioctlWriteTermios = 0x5402 // syscall.TCSETS diff --git a/vendor/github.com/cheggaaa/pb/v3/termutil/term_nix.go b/vendor/github.com/cheggaaa/pb/v3/termutil/term_nix.go new file mode 100644 index 000000000..14277e71f --- /dev/null +++ b/vendor/github.com/cheggaaa/pb/v3/termutil/term_nix.go @@ -0,0 +1,8 @@ +// +build linux darwin freebsd netbsd openbsd dragonfly +// +build !appengine + +package termutil + +import "syscall" + +const sysIoctl = syscall.SYS_IOCTL diff --git a/vendor/github.com/cheggaaa/pb/v3/termutil/term_plan9.go b/vendor/github.com/cheggaaa/pb/v3/termutil/term_plan9.go new file mode 100644 index 000000000..f3934c6ec --- /dev/null +++ b/vendor/github.com/cheggaaa/pb/v3/termutil/term_plan9.go @@ -0,0 +1,50 @@ +package termutil + +import ( + "errors" + "os" + "syscall" +) + +var ( + consctl *os.File + + // Plan 9 doesn't have syscall.SIGQUIT + unlockSignals = []os.Signal{ + os.Interrupt, syscall.SIGTERM, syscall.SIGKILL, + } +) + +// TerminalWidth returns width of the terminal. +func TerminalWidth() (int, error) { + return 0, errors.New("Not supported") +} + +func lockEcho() error { + if consctl != nil { + return errors.New("consctl already open") + } + var err error + consctl, err = os.OpenFile("/dev/consctl", os.O_WRONLY, 0) + if err != nil { + return err + } + _, err = consctl.WriteString("rawon") + if err != nil { + consctl.Close() + consctl = nil + return err + } + return nil +} + +func unlockEcho() error { + if consctl == nil { + return nil + } + if err := consctl.Close(); err != nil { + return err + } + consctl = nil + return nil +} diff --git a/vendor/github.com/cheggaaa/pb/v3/termutil/term_solaris.go b/vendor/github.com/cheggaaa/pb/v3/termutil/term_solaris.go new file mode 100644 index 000000000..fc96c2b7f --- /dev/null +++ b/vendor/github.com/cheggaaa/pb/v3/termutil/term_solaris.go @@ -0,0 +1,8 @@ +// +build solaris +// +build !appengine + +package termutil + +const ioctlReadTermios = 0x5401 // syscall.TCGETS +const ioctlWriteTermios = 0x5402 // syscall.TCSETS +const sysIoctl = 54 diff --git a/vendor/github.com/cheggaaa/pb/v3/termutil/term_win.go b/vendor/github.com/cheggaaa/pb/v3/termutil/term_win.go new file mode 100644 index 000000000..c867d2722 --- /dev/null +++ b/vendor/github.com/cheggaaa/pb/v3/termutil/term_win.go @@ -0,0 +1,155 @@ +// +build windows + +package termutil + +import ( + "fmt" + "os" + "os/exec" + "strconv" + "syscall" + "unsafe" +) + +var ( + tty = os.Stdin + + unlockSignals = []os.Signal{ + os.Interrupt, syscall.SIGQUIT, syscall.SIGTERM, syscall.SIGKILL, + } +) + +var ( + kernel32 = syscall.NewLazyDLL("kernel32.dll") + + // GetConsoleScreenBufferInfo retrieves information about the + // specified console screen buffer. + // http://msdn.microsoft.com/en-us/library/windows/desktop/ms683171(v=vs.85).aspx + procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo") + + // GetConsoleMode retrieves the current input mode of a console's + // input buffer or the current output mode of a console screen buffer. + // https://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx + getConsoleMode = kernel32.NewProc("GetConsoleMode") + + // SetConsoleMode sets the input mode of a console's input buffer + // or the output mode of a console screen buffer. + // https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx + setConsoleMode = kernel32.NewProc("SetConsoleMode") + + // SetConsoleCursorPosition sets the cursor position in the + // specified console screen buffer. + // https://msdn.microsoft.com/en-us/library/windows/desktop/ms686025(v=vs.85).aspx + setConsoleCursorPosition = kernel32.NewProc("SetConsoleCursorPosition") + + mingw = isMingw() +) + +type ( + // Defines the coordinates of the upper left and lower right corners + // of a rectangle. + // See + // http://msdn.microsoft.com/en-us/library/windows/desktop/ms686311(v=vs.85).aspx + smallRect struct { + Left, Top, Right, Bottom int16 + } + + // Defines the coordinates of a character cell in a console screen + // buffer. The origin of the coordinate system (0,0) is at the top, left cell + // of the buffer. + // See + // http://msdn.microsoft.com/en-us/library/windows/desktop/ms682119(v=vs.85).aspx + coordinates struct { + X, Y int16 + } + + word int16 + + // Contains information about a console screen buffer. + // http://msdn.microsoft.com/en-us/library/windows/desktop/ms682093(v=vs.85).aspx + consoleScreenBufferInfo struct { + dwSize coordinates + dwCursorPosition coordinates + wAttributes word + srWindow smallRect + dwMaximumWindowSize coordinates + } +) + +// TerminalWidth returns width of the terminal. +func TerminalWidth() (width int, err error) { + if mingw { + return termWidthTPut() + } + return termWidthCmd() +} + +func termWidthCmd() (width int, err error) { + var info consoleScreenBufferInfo + _, _, e := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(syscall.Stdout), uintptr(unsafe.Pointer(&info)), 0) + if e != 0 { + return 0, error(e) + } + return int(info.dwSize.X) - 1, nil +} + +func isMingw() bool { + return os.Getenv("MINGW_PREFIX") != "" || os.Getenv("MSYSTEM") == "MINGW64" +} + +func termWidthTPut() (width int, err error) { + // TODO: maybe anybody knows a better way to get it on mintty... + var res []byte + cmd := exec.Command("tput", "cols") + cmd.Stdin = os.Stdin + if res, err = cmd.CombinedOutput(); err != nil { + return 0, fmt.Errorf("%s: %v", string(res), err) + } + if len(res) > 1 { + res = res[:len(res)-1] + } + return strconv.Atoi(string(res)) +} + +func getCursorPos() (pos coordinates, err error) { + var info consoleScreenBufferInfo + _, _, e := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(syscall.Stdout), uintptr(unsafe.Pointer(&info)), 0) + if e != 0 { + return info.dwCursorPosition, error(e) + } + return info.dwCursorPosition, nil +} + +func setCursorPos(pos coordinates) error { + _, _, e := syscall.Syscall(setConsoleCursorPosition.Addr(), 2, uintptr(syscall.Stdout), uintptr(uint32(uint16(pos.Y))<<16|uint32(uint16(pos.X))), 0) + if e != 0 { + return error(e) + } + return nil +} + +var oldState word + +func lockEcho() (err error) { + if _, _, e := syscall.Syscall(getConsoleMode.Addr(), 2, uintptr(syscall.Stdout), uintptr(unsafe.Pointer(&oldState)), 0); e != 0 { + err = fmt.Errorf("Can't get terminal settings: %v", e) + return + } + + newState := oldState + const ENABLE_ECHO_INPUT = 0x0004 + const ENABLE_LINE_INPUT = 0x0002 + newState = newState & (^(ENABLE_LINE_INPUT | ENABLE_ECHO_INPUT)) + if _, _, e := syscall.Syscall(setConsoleMode.Addr(), 2, uintptr(syscall.Stdout), uintptr(newState), 0); e != 0 { + err = fmt.Errorf("Can't set terminal settings: %v", e) + return + } + return +} + +func unlockEcho() (err error) { + if _, _, e := syscall.Syscall(setConsoleMode.Addr(), 2, uintptr(syscall.Stdout), uintptr(oldState), 0); e != 0 { + err = fmt.Errorf("Can't set terminal settings") + } + return +} diff --git a/vendor/github.com/cheggaaa/pb/v3/termutil/term_x.go b/vendor/github.com/cheggaaa/pb/v3/termutil/term_x.go new file mode 100644 index 000000000..693775549 --- /dev/null +++ b/vendor/github.com/cheggaaa/pb/v3/termutil/term_x.go @@ -0,0 +1,76 @@ +// +build linux darwin freebsd netbsd openbsd solaris dragonfly +// +build !appengine + +package termutil + +import ( + "fmt" + "os" + "syscall" + "unsafe" +) + +var ( + tty *os.File + + unlockSignals = []os.Signal{ + os.Interrupt, syscall.SIGQUIT, syscall.SIGTERM, syscall.SIGKILL, + } +) + +type window struct { + Row uint16 + Col uint16 + Xpixel uint16 + Ypixel uint16 +} + +func init() { + var err error + tty, err = os.Open("/dev/tty") + if err != nil { + tty = os.Stdin + } +} + +// TerminalWidth returns width of the terminal. +func TerminalWidth() (int, error) { + w := new(window) + res, _, err := syscall.Syscall(sysIoctl, + tty.Fd(), + uintptr(syscall.TIOCGWINSZ), + uintptr(unsafe.Pointer(w)), + ) + if int(res) == -1 { + return 0, err + } + return int(w.Col), nil +} + +var oldState syscall.Termios + +func lockEcho() (err error) { + fd := tty.Fd() + if _, _, e := syscall.Syscall6(sysIoctl, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&oldState)), 0, 0, 0); e != 0 { + err = fmt.Errorf("Can't get terminal settings: %v", e) + return + } + + newState := oldState + newState.Lflag &^= syscall.ECHO + newState.Lflag |= syscall.ICANON | syscall.ISIG + newState.Iflag |= syscall.ICRNL + if _, _, e := syscall.Syscall6(sysIoctl, fd, ioctlWriteTermios, uintptr(unsafe.Pointer(&newState)), 0, 0, 0); e != 0 { + err = fmt.Errorf("Can't set terminal settings: %v", e) + return + } + return +} + +func unlockEcho() (err error) { + fd := tty.Fd() + if _, _, e := syscall.Syscall6(sysIoctl, fd, ioctlWriteTermios, uintptr(unsafe.Pointer(&oldState)), 0, 0, 0); e != 0 { + err = fmt.Errorf("Can't set terminal settings") + } + return +} diff --git a/vendor/github.com/cheggaaa/pb/v3/util.go b/vendor/github.com/cheggaaa/pb/v3/util.go new file mode 100644 index 000000000..078123420 --- /dev/null +++ b/vendor/github.com/cheggaaa/pb/v3/util.go @@ -0,0 +1,115 @@ +package pb + +import ( + "bytes" + "fmt" + "github.com/mattn/go-runewidth" + "math" + "regexp" + //"unicode/utf8" +) + +const ( + _KiB = 1024 + _MiB = 1048576 + _GiB = 1073741824 + _TiB = 1099511627776 + + _kB = 1e3 + _MB = 1e6 + _GB = 1e9 + _TB = 1e12 +) + +var ctrlFinder = regexp.MustCompile("\x1b\x5b[0-9]+\x6d") + +func CellCount(s string) int { + n := runewidth.StringWidth(s) + for _, sm := range ctrlFinder.FindAllString(s, -1) { + n -= runewidth.StringWidth(sm) + } + return n +} + +func StripString(s string, w int) string { + l := CellCount(s) + if l <= w { + return s + } + var buf = bytes.NewBuffer(make([]byte, 0, len(s))) + StripStringToBuffer(s, w, buf) + return buf.String() +} + +func StripStringToBuffer(s string, w int, buf *bytes.Buffer) { + var seqs = ctrlFinder.FindAllStringIndex(s, -1) +mainloop: + for i, r := range s { + for _, seq := range seqs { + if i >= seq[0] && i < seq[1] { + buf.WriteRune(r) + continue mainloop + } + } + if rw := CellCount(string(r)); rw <= w { + w -= rw + buf.WriteRune(r) + } else { + break + } + } + for w > 0 { + buf.WriteByte(' ') + w-- + } + return +} + +func round(val float64) (newVal float64) { + roundOn := 0.5 + places := 0 + var round float64 + pow := math.Pow(10, float64(places)) + digit := pow * val + _, div := math.Modf(digit) + if div >= roundOn { + round = math.Ceil(digit) + } else { + round = math.Floor(digit) + } + newVal = round / pow + return +} + +// Convert bytes to human readable string. Like a 2 MiB, 64.2 KiB, or 2 MB, 64.2 kB +// if useSIPrefix is set to true +func formatBytes(i int64, useSIPrefix bool) (result string) { + if !useSIPrefix { + switch { + case i >= _TiB: + result = fmt.Sprintf("%.02f TiB", float64(i)/_TiB) + case i >= _GiB: + result = fmt.Sprintf("%.02f GiB", float64(i)/_GiB) + case i >= _MiB: + result = fmt.Sprintf("%.02f MiB", float64(i)/_MiB) + case i >= _KiB: + result = fmt.Sprintf("%.02f KiB", float64(i)/_KiB) + default: + result = fmt.Sprintf("%d B", i) + } + } else { + switch { + case i >= _TB: + result = fmt.Sprintf("%.02f TB", float64(i)/_TB) + case i >= _GB: + result = fmt.Sprintf("%.02f GB", float64(i)/_GB) + case i >= _MB: + result = fmt.Sprintf("%.02f MB", float64(i)/_MB) + case i >= _kB: + result = fmt.Sprintf("%.02f kB", float64(i)/_kB) + default: + result = fmt.Sprintf("%d B", i) + } + } + return +} diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/LICENSE.md b/vendor/github.com/cpuguy83/go-md2man/v2/LICENSE.md new file mode 100644 index 000000000..1cade6cef --- /dev/null +++ b/vendor/github.com/cpuguy83/go-md2man/v2/LICENSE.md @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Brian Goff + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go new file mode 100644 index 000000000..b48005673 --- /dev/null +++ b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go @@ -0,0 +1,14 @@ +package md2man + +import ( + "github.com/russross/blackfriday/v2" +) + +// Render converts a markdown document into a roff formatted document. +func Render(doc []byte) []byte { + renderer := NewRoffRenderer() + + return blackfriday.Run(doc, + []blackfriday.Option{blackfriday.WithRenderer(renderer), + blackfriday.WithExtensions(renderer.GetExtensions())}...) +} diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go new file mode 100644 index 000000000..0668a66cf --- /dev/null +++ b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go @@ -0,0 +1,345 @@ +package md2man + +import ( + "fmt" + "io" + "os" + "strings" + + "github.com/russross/blackfriday/v2" +) + +// roffRenderer implements the blackfriday.Renderer interface for creating +// roff format (manpages) from markdown text +type roffRenderer struct { + extensions blackfriday.Extensions + listCounters []int + firstHeader bool + defineTerm bool + listDepth int +} + +const ( + titleHeader = ".TH " + topLevelHeader = "\n\n.SH " + secondLevelHdr = "\n.SH " + otherHeader = "\n.SS " + crTag = "\n" + emphTag = "\\fI" + emphCloseTag = "\\fP" + strongTag = "\\fB" + strongCloseTag = "\\fP" + breakTag = "\n.br\n" + paraTag = "\n.PP\n" + hruleTag = "\n.ti 0\n\\l'\\n(.lu'\n" + linkTag = "\n\\[la]" + linkCloseTag = "\\[ra]" + codespanTag = "\\fB\\fC" + codespanCloseTag = "\\fR" + codeTag = "\n.PP\n.RS\n\n.nf\n" + codeCloseTag = "\n.fi\n.RE\n" + quoteTag = "\n.PP\n.RS\n" + quoteCloseTag = "\n.RE\n" + listTag = "\n.RS\n" + listCloseTag = "\n.RE\n" + arglistTag = "\n.TP\n" + tableStart = "\n.TS\nallbox;\n" + tableEnd = ".TE\n" + tableCellStart = "T{\n" + tableCellEnd = "\nT}\n" +) + +// NewRoffRenderer creates a new blackfriday Renderer for generating roff documents +// from markdown +func NewRoffRenderer() *roffRenderer { // nolint: golint + var extensions blackfriday.Extensions + + extensions |= blackfriday.NoIntraEmphasis + extensions |= blackfriday.Tables + extensions |= blackfriday.FencedCode + extensions |= blackfriday.SpaceHeadings + extensions |= blackfriday.Footnotes + extensions |= blackfriday.Titleblock + extensions |= blackfriday.DefinitionLists + return &roffRenderer{ + extensions: extensions, + } +} + +// GetExtensions returns the list of extensions used by this renderer implementation +func (r *roffRenderer) GetExtensions() blackfriday.Extensions { + return r.extensions +} + +// RenderHeader handles outputting the header at document start +func (r *roffRenderer) RenderHeader(w io.Writer, ast *blackfriday.Node) { + // disable hyphenation + out(w, ".nh\n") +} + +// RenderFooter handles outputting the footer at the document end; the roff +// renderer has no footer information +func (r *roffRenderer) RenderFooter(w io.Writer, ast *blackfriday.Node) { +} + +// RenderNode is called for each node in a markdown document; based on the node +// type the equivalent roff output is sent to the writer +func (r *roffRenderer) RenderNode(w io.Writer, node *blackfriday.Node, entering bool) blackfriday.WalkStatus { + + var walkAction = blackfriday.GoToNext + + switch node.Type { + case blackfriday.Text: + r.handleText(w, node, entering) + case blackfriday.Softbreak: + out(w, crTag) + case blackfriday.Hardbreak: + out(w, breakTag) + case blackfriday.Emph: + if entering { + out(w, emphTag) + } else { + out(w, emphCloseTag) + } + case blackfriday.Strong: + if entering { + out(w, strongTag) + } else { + out(w, strongCloseTag) + } + case blackfriday.Link: + if !entering { + out(w, linkTag+string(node.LinkData.Destination)+linkCloseTag) + } + case blackfriday.Image: + // ignore images + walkAction = blackfriday.SkipChildren + case blackfriday.Code: + out(w, codespanTag) + escapeSpecialChars(w, node.Literal) + out(w, codespanCloseTag) + case blackfriday.Document: + break + case blackfriday.Paragraph: + // roff .PP markers break lists + if r.listDepth > 0 { + return blackfriday.GoToNext + } + if entering { + out(w, paraTag) + } else { + out(w, crTag) + } + case blackfriday.BlockQuote: + if entering { + out(w, quoteTag) + } else { + out(w, quoteCloseTag) + } + case blackfriday.Heading: + r.handleHeading(w, node, entering) + case blackfriday.HorizontalRule: + out(w, hruleTag) + case blackfriday.List: + r.handleList(w, node, entering) + case blackfriday.Item: + r.handleItem(w, node, entering) + case blackfriday.CodeBlock: + out(w, codeTag) + escapeSpecialChars(w, node.Literal) + out(w, codeCloseTag) + case blackfriday.Table: + r.handleTable(w, node, entering) + case blackfriday.TableCell: + r.handleTableCell(w, node, entering) + case blackfriday.TableHead: + case blackfriday.TableBody: + case blackfriday.TableRow: + // no action as cell entries do all the nroff formatting + return blackfriday.GoToNext + default: + fmt.Fprintln(os.Stderr, "WARNING: go-md2man does not handle node type "+node.Type.String()) + } + return walkAction +} + +func (r *roffRenderer) handleText(w io.Writer, node *blackfriday.Node, entering bool) { + var ( + start, end string + ) + // handle special roff table cell text encapsulation + if node.Parent.Type == blackfriday.TableCell { + if len(node.Literal) > 30 { + start = tableCellStart + end = tableCellEnd + } else { + // end rows that aren't terminated by "tableCellEnd" with a cr if end of row + if node.Parent.Next == nil && !node.Parent.IsHeader { + end = crTag + } + } + } + out(w, start) + escapeSpecialChars(w, node.Literal) + out(w, end) +} + +func (r *roffRenderer) handleHeading(w io.Writer, node *blackfriday.Node, entering bool) { + if entering { + switch node.Level { + case 1: + if !r.firstHeader { + out(w, titleHeader) + r.firstHeader = true + break + } + out(w, topLevelHeader) + case 2: + out(w, secondLevelHdr) + default: + out(w, otherHeader) + } + } +} + +func (r *roffRenderer) handleList(w io.Writer, node *blackfriday.Node, entering bool) { + openTag := listTag + closeTag := listCloseTag + if node.ListFlags&blackfriday.ListTypeDefinition != 0 { + // tags for definition lists handled within Item node + openTag = "" + closeTag = "" + } + if entering { + r.listDepth++ + if node.ListFlags&blackfriday.ListTypeOrdered != 0 { + r.listCounters = append(r.listCounters, 1) + } + out(w, openTag) + } else { + if node.ListFlags&blackfriday.ListTypeOrdered != 0 { + r.listCounters = r.listCounters[:len(r.listCounters)-1] + } + out(w, closeTag) + r.listDepth-- + } +} + +func (r *roffRenderer) handleItem(w io.Writer, node *blackfriday.Node, entering bool) { + if entering { + if node.ListFlags&blackfriday.ListTypeOrdered != 0 { + out(w, fmt.Sprintf(".IP \"%3d.\" 5\n", r.listCounters[len(r.listCounters)-1])) + r.listCounters[len(r.listCounters)-1]++ + } else if node.ListFlags&blackfriday.ListTypeDefinition != 0 { + // state machine for handling terms and following definitions + // since blackfriday does not distinguish them properly, nor + // does it seperate them into separate lists as it should + if !r.defineTerm { + out(w, arglistTag) + r.defineTerm = true + } else { + r.defineTerm = false + } + } else { + out(w, ".IP \\(bu 2\n") + } + } else { + out(w, "\n") + } +} + +func (r *roffRenderer) handleTable(w io.Writer, node *blackfriday.Node, entering bool) { + if entering { + out(w, tableStart) + //call walker to count cells (and rows?) so format section can be produced + columns := countColumns(node) + out(w, strings.Repeat("l ", columns)+"\n") + out(w, strings.Repeat("l ", columns)+".\n") + } else { + out(w, tableEnd) + } +} + +func (r *roffRenderer) handleTableCell(w io.Writer, node *blackfriday.Node, entering bool) { + var ( + start, end string + ) + if node.IsHeader { + start = codespanTag + end = codespanCloseTag + } + if entering { + if node.Prev != nil && node.Prev.Type == blackfriday.TableCell { + out(w, "\t"+start) + } else { + out(w, start) + } + } else { + // need to carriage return if we are at the end of the header row + if node.IsHeader && node.Next == nil { + end = end + crTag + } + out(w, end) + } +} + +// because roff format requires knowing the column count before outputting any table +// data we need to walk a table tree and count the columns +func countColumns(node *blackfriday.Node) int { + var columns int + + node.Walk(func(node *blackfriday.Node, entering bool) blackfriday.WalkStatus { + switch node.Type { + case blackfriday.TableRow: + if !entering { + return blackfriday.Terminate + } + case blackfriday.TableCell: + if entering { + columns++ + } + default: + } + return blackfriday.GoToNext + }) + return columns +} + +func out(w io.Writer, output string) { + io.WriteString(w, output) // nolint: errcheck +} + +func needsBackslash(c byte) bool { + for _, r := range []byte("-_&\\~") { + if c == r { + return true + } + } + return false +} + +func escapeSpecialChars(w io.Writer, text []byte) { + for i := 0; i < len(text); i++ { + // escape initial apostrophe or period + if len(text) >= 1 && (text[0] == '\'' || text[0] == '.') { + out(w, "\\&") + } + + // directly copy normal characters + org := i + + for i < len(text) && !needsBackslash(text[i]) { + i++ + } + if i > org { + w.Write(text[org:i]) // nolint: errcheck + } + + // escape a character + if i >= len(text) { + break + } + + w.Write([]byte{'\\', text[i]}) // nolint: errcheck + } +} diff --git a/vendor/github.com/fatih/color/LICENSE.md b/vendor/github.com/fatih/color/LICENSE.md new file mode 100644 index 000000000..25fdaf639 --- /dev/null +++ b/vendor/github.com/fatih/color/LICENSE.md @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2013 Fatih Arslan + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/fatih/color/README.md b/vendor/github.com/fatih/color/README.md new file mode 100644 index 000000000..42d9abc07 --- /dev/null +++ b/vendor/github.com/fatih/color/README.md @@ -0,0 +1,182 @@ +# Archived project. No maintenance. + +This project is not maintained anymore and is archived. Feel free to fork and +make your own changes if needed. For more detail read my blog post: [Taking an indefinite sabbatical from my projects](https://arslan.io/2018/10/09/taking-an-indefinite-sabbatical-from-my-projects/) + +Thanks to everyone for their valuable feedback and contributions. + + +# Color [![GoDoc](https://godoc.org/github.com/fatih/color?status.svg)](https://godoc.org/github.com/fatih/color) + +Color lets you use colorized outputs in terms of [ANSI Escape +Codes](http://en.wikipedia.org/wiki/ANSI_escape_code#Colors) in Go (Golang). It +has support for Windows too! The API can be used in several ways, pick one that +suits you. + + +![Color](https://i.imgur.com/c1JI0lA.png) + + +## Install + +```bash +go get github.com/fatih/color +``` + +## Examples + +### Standard colors + +```go +// Print with default helper functions +color.Cyan("Prints text in cyan.") + +// A newline will be appended automatically +color.Blue("Prints %s in blue.", "text") + +// These are using the default foreground colors +color.Red("We have red") +color.Magenta("And many others ..") + +``` + +### Mix and reuse colors + +```go +// Create a new color object +c := color.New(color.FgCyan).Add(color.Underline) +c.Println("Prints cyan text with an underline.") + +// Or just add them to New() +d := color.New(color.FgCyan, color.Bold) +d.Printf("This prints bold cyan %s\n", "too!.") + +// Mix up foreground and background colors, create new mixes! +red := color.New(color.FgRed) + +boldRed := red.Add(color.Bold) +boldRed.Println("This will print text in bold red.") + +whiteBackground := red.Add(color.BgWhite) +whiteBackground.Println("Red text with white background.") +``` + +### Use your own output (io.Writer) + +```go +// Use your own io.Writer output +color.New(color.FgBlue).Fprintln(myWriter, "blue color!") + +blue := color.New(color.FgBlue) +blue.Fprint(writer, "This will print text in blue.") +``` + +### Custom print functions (PrintFunc) + +```go +// Create a custom print function for convenience +red := color.New(color.FgRed).PrintfFunc() +red("Warning") +red("Error: %s", err) + +// Mix up multiple attributes +notice := color.New(color.Bold, color.FgGreen).PrintlnFunc() +notice("Don't forget this...") +``` + +### Custom fprint functions (FprintFunc) + +```go +blue := color.New(FgBlue).FprintfFunc() +blue(myWriter, "important notice: %s", stars) + +// Mix up with multiple attributes +success := color.New(color.Bold, color.FgGreen).FprintlnFunc() +success(myWriter, "Don't forget this...") +``` + +### Insert into noncolor strings (SprintFunc) + +```go +// Create SprintXxx functions to mix strings with other non-colorized strings: +yellow := color.New(color.FgYellow).SprintFunc() +red := color.New(color.FgRed).SprintFunc() +fmt.Printf("This is a %s and this is %s.\n", yellow("warning"), red("error")) + +info := color.New(color.FgWhite, color.BgGreen).SprintFunc() +fmt.Printf("This %s rocks!\n", info("package")) + +// Use helper functions +fmt.Println("This", color.RedString("warning"), "should be not neglected.") +fmt.Printf("%v %v\n", color.GreenString("Info:"), "an important message.") + +// Windows supported too! Just don't forget to change the output to color.Output +fmt.Fprintf(color.Output, "Windows support: %s", color.GreenString("PASS")) +``` + +### Plug into existing code + +```go +// Use handy standard colors +color.Set(color.FgYellow) + +fmt.Println("Existing text will now be in yellow") +fmt.Printf("This one %s\n", "too") + +color.Unset() // Don't forget to unset + +// You can mix up parameters +color.Set(color.FgMagenta, color.Bold) +defer color.Unset() // Use it in your function + +fmt.Println("All text will now be bold magenta.") +``` + +### Disable/Enable color + +There might be a case where you want to explicitly disable/enable color output. the +`go-isatty` package will automatically disable color output for non-tty output streams +(for example if the output were piped directly to `less`) + +`Color` has support to disable/enable colors both globally and for single color +definitions. For example suppose you have a CLI app and a `--no-color` bool flag. You +can easily disable the color output with: + +```go + +var flagNoColor = flag.Bool("no-color", false, "Disable color output") + +if *flagNoColor { + color.NoColor = true // disables colorized output +} +``` + +It also has support for single color definitions (local). You can +disable/enable color output on the fly: + +```go +c := color.New(color.FgCyan) +c.Println("Prints cyan text") + +c.DisableColor() +c.Println("This is printed without any color") + +c.EnableColor() +c.Println("This prints again cyan...") +``` + +## Todo + +* Save/Return previous values +* Evaluate fmt.Formatter interface + + +## Credits + + * [Fatih Arslan](https://github.com/fatih) + * Windows support via @mattn: [colorable](https://github.com/mattn/go-colorable) + +## License + +The MIT License (MIT) - see [`LICENSE.md`](https://github.com/fatih/color/blob/master/LICENSE.md) for more details + diff --git a/vendor/github.com/fatih/color/color.go b/vendor/github.com/fatih/color/color.go new file mode 100644 index 000000000..91c8e9f06 --- /dev/null +++ b/vendor/github.com/fatih/color/color.go @@ -0,0 +1,603 @@ +package color + +import ( + "fmt" + "io" + "os" + "strconv" + "strings" + "sync" + + "github.com/mattn/go-colorable" + "github.com/mattn/go-isatty" +) + +var ( + // NoColor defines if the output is colorized or not. It's dynamically set to + // false or true based on the stdout's file descriptor referring to a terminal + // or not. This is a global option and affects all colors. For more control + // over each color block use the methods DisableColor() individually. + NoColor = os.Getenv("TERM") == "dumb" || + (!isatty.IsTerminal(os.Stdout.Fd()) && !isatty.IsCygwinTerminal(os.Stdout.Fd())) + + // Output defines the standard output of the print functions. By default + // os.Stdout is used. + Output = colorable.NewColorableStdout() + + // Error defines a color supporting writer for os.Stderr. + Error = colorable.NewColorableStderr() + + // colorsCache is used to reduce the count of created Color objects and + // allows to reuse already created objects with required Attribute. + colorsCache = make(map[Attribute]*Color) + colorsCacheMu sync.Mutex // protects colorsCache +) + +// Color defines a custom color object which is defined by SGR parameters. +type Color struct { + params []Attribute + noColor *bool +} + +// Attribute defines a single SGR Code +type Attribute int + +const escape = "\x1b" + +// Base attributes +const ( + Reset Attribute = iota + Bold + Faint + Italic + Underline + BlinkSlow + BlinkRapid + ReverseVideo + Concealed + CrossedOut +) + +// Foreground text colors +const ( + FgBlack Attribute = iota + 30 + FgRed + FgGreen + FgYellow + FgBlue + FgMagenta + FgCyan + FgWhite +) + +// Foreground Hi-Intensity text colors +const ( + FgHiBlack Attribute = iota + 90 + FgHiRed + FgHiGreen + FgHiYellow + FgHiBlue + FgHiMagenta + FgHiCyan + FgHiWhite +) + +// Background text colors +const ( + BgBlack Attribute = iota + 40 + BgRed + BgGreen + BgYellow + BgBlue + BgMagenta + BgCyan + BgWhite +) + +// Background Hi-Intensity text colors +const ( + BgHiBlack Attribute = iota + 100 + BgHiRed + BgHiGreen + BgHiYellow + BgHiBlue + BgHiMagenta + BgHiCyan + BgHiWhite +) + +// New returns a newly created color object. +func New(value ...Attribute) *Color { + c := &Color{params: make([]Attribute, 0)} + c.Add(value...) + return c +} + +// Set sets the given parameters immediately. It will change the color of +// output with the given SGR parameters until color.Unset() is called. +func Set(p ...Attribute) *Color { + c := New(p...) + c.Set() + return c +} + +// Unset resets all escape attributes and clears the output. Usually should +// be called after Set(). +func Unset() { + if NoColor { + return + } + + fmt.Fprintf(Output, "%s[%dm", escape, Reset) +} + +// Set sets the SGR sequence. +func (c *Color) Set() *Color { + if c.isNoColorSet() { + return c + } + + fmt.Fprintf(Output, c.format()) + return c +} + +func (c *Color) unset() { + if c.isNoColorSet() { + return + } + + Unset() +} + +func (c *Color) setWriter(w io.Writer) *Color { + if c.isNoColorSet() { + return c + } + + fmt.Fprintf(w, c.format()) + return c +} + +func (c *Color) unsetWriter(w io.Writer) { + if c.isNoColorSet() { + return + } + + if NoColor { + return + } + + fmt.Fprintf(w, "%s[%dm", escape, Reset) +} + +// Add is used to chain SGR parameters. Use as many as parameters to combine +// and create custom color objects. Example: Add(color.FgRed, color.Underline). +func (c *Color) Add(value ...Attribute) *Color { + c.params = append(c.params, value...) + return c +} + +func (c *Color) prepend(value Attribute) { + c.params = append(c.params, 0) + copy(c.params[1:], c.params[0:]) + c.params[0] = value +} + +// Fprint formats using the default formats for its operands and writes to w. +// Spaces are added between operands when neither is a string. +// It returns the number of bytes written and any write error encountered. +// On Windows, users should wrap w with colorable.NewColorable() if w is of +// type *os.File. +func (c *Color) Fprint(w io.Writer, a ...interface{}) (n int, err error) { + c.setWriter(w) + defer c.unsetWriter(w) + + return fmt.Fprint(w, a...) +} + +// Print formats using the default formats for its operands and writes to +// standard output. Spaces are added between operands when neither is a +// string. It returns the number of bytes written and any write error +// encountered. This is the standard fmt.Print() method wrapped with the given +// color. +func (c *Color) Print(a ...interface{}) (n int, err error) { + c.Set() + defer c.unset() + + return fmt.Fprint(Output, a...) +} + +// Fprintf formats according to a format specifier and writes to w. +// It returns the number of bytes written and any write error encountered. +// On Windows, users should wrap w with colorable.NewColorable() if w is of +// type *os.File. +func (c *Color) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { + c.setWriter(w) + defer c.unsetWriter(w) + + return fmt.Fprintf(w, format, a...) +} + +// Printf formats according to a format specifier and writes to standard output. +// It returns the number of bytes written and any write error encountered. +// This is the standard fmt.Printf() method wrapped with the given color. +func (c *Color) Printf(format string, a ...interface{}) (n int, err error) { + c.Set() + defer c.unset() + + return fmt.Fprintf(Output, format, a...) +} + +// Fprintln formats using the default formats for its operands and writes to w. +// Spaces are always added between operands and a newline is appended. +// On Windows, users should wrap w with colorable.NewColorable() if w is of +// type *os.File. +func (c *Color) Fprintln(w io.Writer, a ...interface{}) (n int, err error) { + c.setWriter(w) + defer c.unsetWriter(w) + + return fmt.Fprintln(w, a...) +} + +// Println formats using the default formats for its operands and writes to +// standard output. Spaces are always added between operands and a newline is +// appended. It returns the number of bytes written and any write error +// encountered. This is the standard fmt.Print() method wrapped with the given +// color. +func (c *Color) Println(a ...interface{}) (n int, err error) { + c.Set() + defer c.unset() + + return fmt.Fprintln(Output, a...) +} + +// Sprint is just like Print, but returns a string instead of printing it. +func (c *Color) Sprint(a ...interface{}) string { + return c.wrap(fmt.Sprint(a...)) +} + +// Sprintln is just like Println, but returns a string instead of printing it. +func (c *Color) Sprintln(a ...interface{}) string { + return c.wrap(fmt.Sprintln(a...)) +} + +// Sprintf is just like Printf, but returns a string instead of printing it. +func (c *Color) Sprintf(format string, a ...interface{}) string { + return c.wrap(fmt.Sprintf(format, a...)) +} + +// FprintFunc returns a new function that prints the passed arguments as +// colorized with color.Fprint(). +func (c *Color) FprintFunc() func(w io.Writer, a ...interface{}) { + return func(w io.Writer, a ...interface{}) { + c.Fprint(w, a...) + } +} + +// PrintFunc returns a new function that prints the passed arguments as +// colorized with color.Print(). +func (c *Color) PrintFunc() func(a ...interface{}) { + return func(a ...interface{}) { + c.Print(a...) + } +} + +// FprintfFunc returns a new function that prints the passed arguments as +// colorized with color.Fprintf(). +func (c *Color) FprintfFunc() func(w io.Writer, format string, a ...interface{}) { + return func(w io.Writer, format string, a ...interface{}) { + c.Fprintf(w, format, a...) + } +} + +// PrintfFunc returns a new function that prints the passed arguments as +// colorized with color.Printf(). +func (c *Color) PrintfFunc() func(format string, a ...interface{}) { + return func(format string, a ...interface{}) { + c.Printf(format, a...) + } +} + +// FprintlnFunc returns a new function that prints the passed arguments as +// colorized with color.Fprintln(). +func (c *Color) FprintlnFunc() func(w io.Writer, a ...interface{}) { + return func(w io.Writer, a ...interface{}) { + c.Fprintln(w, a...) + } +} + +// PrintlnFunc returns a new function that prints the passed arguments as +// colorized with color.Println(). +func (c *Color) PrintlnFunc() func(a ...interface{}) { + return func(a ...interface{}) { + c.Println(a...) + } +} + +// SprintFunc returns a new function that returns colorized strings for the +// given arguments with fmt.Sprint(). Useful to put into or mix into other +// string. Windows users should use this in conjunction with color.Output, example: +// +// put := New(FgYellow).SprintFunc() +// fmt.Fprintf(color.Output, "This is a %s", put("warning")) +func (c *Color) SprintFunc() func(a ...interface{}) string { + return func(a ...interface{}) string { + return c.wrap(fmt.Sprint(a...)) + } +} + +// SprintfFunc returns a new function that returns colorized strings for the +// given arguments with fmt.Sprintf(). Useful to put into or mix into other +// string. Windows users should use this in conjunction with color.Output. +func (c *Color) SprintfFunc() func(format string, a ...interface{}) string { + return func(format string, a ...interface{}) string { + return c.wrap(fmt.Sprintf(format, a...)) + } +} + +// SprintlnFunc returns a new function that returns colorized strings for the +// given arguments with fmt.Sprintln(). Useful to put into or mix into other +// string. Windows users should use this in conjunction with color.Output. +func (c *Color) SprintlnFunc() func(a ...interface{}) string { + return func(a ...interface{}) string { + return c.wrap(fmt.Sprintln(a...)) + } +} + +// sequence returns a formatted SGR sequence to be plugged into a "\x1b[...m" +// an example output might be: "1;36" -> bold cyan +func (c *Color) sequence() string { + format := make([]string, len(c.params)) + for i, v := range c.params { + format[i] = strconv.Itoa(int(v)) + } + + return strings.Join(format, ";") +} + +// wrap wraps the s string with the colors attributes. The string is ready to +// be printed. +func (c *Color) wrap(s string) string { + if c.isNoColorSet() { + return s + } + + return c.format() + s + c.unformat() +} + +func (c *Color) format() string { + return fmt.Sprintf("%s[%sm", escape, c.sequence()) +} + +func (c *Color) unformat() string { + return fmt.Sprintf("%s[%dm", escape, Reset) +} + +// DisableColor disables the color output. Useful to not change any existing +// code and still being able to output. Can be used for flags like +// "--no-color". To enable back use EnableColor() method. +func (c *Color) DisableColor() { + c.noColor = boolPtr(true) +} + +// EnableColor enables the color output. Use it in conjunction with +// DisableColor(). Otherwise this method has no side effects. +func (c *Color) EnableColor() { + c.noColor = boolPtr(false) +} + +func (c *Color) isNoColorSet() bool { + // check first if we have user setted action + if c.noColor != nil { + return *c.noColor + } + + // if not return the global option, which is disabled by default + return NoColor +} + +// Equals returns a boolean value indicating whether two colors are equal. +func (c *Color) Equals(c2 *Color) bool { + if len(c.params) != len(c2.params) { + return false + } + + for _, attr := range c.params { + if !c2.attrExists(attr) { + return false + } + } + + return true +} + +func (c *Color) attrExists(a Attribute) bool { + for _, attr := range c.params { + if attr == a { + return true + } + } + + return false +} + +func boolPtr(v bool) *bool { + return &v +} + +func getCachedColor(p Attribute) *Color { + colorsCacheMu.Lock() + defer colorsCacheMu.Unlock() + + c, ok := colorsCache[p] + if !ok { + c = New(p) + colorsCache[p] = c + } + + return c +} + +func colorPrint(format string, p Attribute, a ...interface{}) { + c := getCachedColor(p) + + if !strings.HasSuffix(format, "\n") { + format += "\n" + } + + if len(a) == 0 { + c.Print(format) + } else { + c.Printf(format, a...) + } +} + +func colorString(format string, p Attribute, a ...interface{}) string { + c := getCachedColor(p) + + if len(a) == 0 { + return c.SprintFunc()(format) + } + + return c.SprintfFunc()(format, a...) +} + +// Black is a convenient helper function to print with black foreground. A +// newline is appended to format by default. +func Black(format string, a ...interface{}) { colorPrint(format, FgBlack, a...) } + +// Red is a convenient helper function to print with red foreground. A +// newline is appended to format by default. +func Red(format string, a ...interface{}) { colorPrint(format, FgRed, a...) } + +// Green is a convenient helper function to print with green foreground. A +// newline is appended to format by default. +func Green(format string, a ...interface{}) { colorPrint(format, FgGreen, a...) } + +// Yellow is a convenient helper function to print with yellow foreground. +// A newline is appended to format by default. +func Yellow(format string, a ...interface{}) { colorPrint(format, FgYellow, a...) } + +// Blue is a convenient helper function to print with blue foreground. A +// newline is appended to format by default. +func Blue(format string, a ...interface{}) { colorPrint(format, FgBlue, a...) } + +// Magenta is a convenient helper function to print with magenta foreground. +// A newline is appended to format by default. +func Magenta(format string, a ...interface{}) { colorPrint(format, FgMagenta, a...) } + +// Cyan is a convenient helper function to print with cyan foreground. A +// newline is appended to format by default. +func Cyan(format string, a ...interface{}) { colorPrint(format, FgCyan, a...) } + +// White is a convenient helper function to print with white foreground. A +// newline is appended to format by default. +func White(format string, a ...interface{}) { colorPrint(format, FgWhite, a...) } + +// BlackString is a convenient helper function to return a string with black +// foreground. +func BlackString(format string, a ...interface{}) string { return colorString(format, FgBlack, a...) } + +// RedString is a convenient helper function to return a string with red +// foreground. +func RedString(format string, a ...interface{}) string { return colorString(format, FgRed, a...) } + +// GreenString is a convenient helper function to return a string with green +// foreground. +func GreenString(format string, a ...interface{}) string { return colorString(format, FgGreen, a...) } + +// YellowString is a convenient helper function to return a string with yellow +// foreground. +func YellowString(format string, a ...interface{}) string { return colorString(format, FgYellow, a...) } + +// BlueString is a convenient helper function to return a string with blue +// foreground. +func BlueString(format string, a ...interface{}) string { return colorString(format, FgBlue, a...) } + +// MagentaString is a convenient helper function to return a string with magenta +// foreground. +func MagentaString(format string, a ...interface{}) string { + return colorString(format, FgMagenta, a...) +} + +// CyanString is a convenient helper function to return a string with cyan +// foreground. +func CyanString(format string, a ...interface{}) string { return colorString(format, FgCyan, a...) } + +// WhiteString is a convenient helper function to return a string with white +// foreground. +func WhiteString(format string, a ...interface{}) string { return colorString(format, FgWhite, a...) } + +// HiBlack is a convenient helper function to print with hi-intensity black foreground. A +// newline is appended to format by default. +func HiBlack(format string, a ...interface{}) { colorPrint(format, FgHiBlack, a...) } + +// HiRed is a convenient helper function to print with hi-intensity red foreground. A +// newline is appended to format by default. +func HiRed(format string, a ...interface{}) { colorPrint(format, FgHiRed, a...) } + +// HiGreen is a convenient helper function to print with hi-intensity green foreground. A +// newline is appended to format by default. +func HiGreen(format string, a ...interface{}) { colorPrint(format, FgHiGreen, a...) } + +// HiYellow is a convenient helper function to print with hi-intensity yellow foreground. +// A newline is appended to format by default. +func HiYellow(format string, a ...interface{}) { colorPrint(format, FgHiYellow, a...) } + +// HiBlue is a convenient helper function to print with hi-intensity blue foreground. A +// newline is appended to format by default. +func HiBlue(format string, a ...interface{}) { colorPrint(format, FgHiBlue, a...) } + +// HiMagenta is a convenient helper function to print with hi-intensity magenta foreground. +// A newline is appended to format by default. +func HiMagenta(format string, a ...interface{}) { colorPrint(format, FgHiMagenta, a...) } + +// HiCyan is a convenient helper function to print with hi-intensity cyan foreground. A +// newline is appended to format by default. +func HiCyan(format string, a ...interface{}) { colorPrint(format, FgHiCyan, a...) } + +// HiWhite is a convenient helper function to print with hi-intensity white foreground. A +// newline is appended to format by default. +func HiWhite(format string, a ...interface{}) { colorPrint(format, FgHiWhite, a...) } + +// HiBlackString is a convenient helper function to return a string with hi-intensity black +// foreground. +func HiBlackString(format string, a ...interface{}) string { + return colorString(format, FgHiBlack, a...) +} + +// HiRedString is a convenient helper function to return a string with hi-intensity red +// foreground. +func HiRedString(format string, a ...interface{}) string { return colorString(format, FgHiRed, a...) } + +// HiGreenString is a convenient helper function to return a string with hi-intensity green +// foreground. +func HiGreenString(format string, a ...interface{}) string { + return colorString(format, FgHiGreen, a...) +} + +// HiYellowString is a convenient helper function to return a string with hi-intensity yellow +// foreground. +func HiYellowString(format string, a ...interface{}) string { + return colorString(format, FgHiYellow, a...) +} + +// HiBlueString is a convenient helper function to return a string with hi-intensity blue +// foreground. +func HiBlueString(format string, a ...interface{}) string { return colorString(format, FgHiBlue, a...) } + +// HiMagentaString is a convenient helper function to return a string with hi-intensity magenta +// foreground. +func HiMagentaString(format string, a ...interface{}) string { + return colorString(format, FgHiMagenta, a...) +} + +// HiCyanString is a convenient helper function to return a string with hi-intensity cyan +// foreground. +func HiCyanString(format string, a ...interface{}) string { return colorString(format, FgHiCyan, a...) } + +// HiWhiteString is a convenient helper function to return a string with hi-intensity white +// foreground. +func HiWhiteString(format string, a ...interface{}) string { + return colorString(format, FgHiWhite, a...) +} diff --git a/vendor/github.com/fatih/color/doc.go b/vendor/github.com/fatih/color/doc.go new file mode 100644 index 000000000..cf1e96500 --- /dev/null +++ b/vendor/github.com/fatih/color/doc.go @@ -0,0 +1,133 @@ +/* +Package color is an ANSI color package to output colorized or SGR defined +output to the standard output. The API can be used in several way, pick one +that suits you. + +Use simple and default helper functions with predefined foreground colors: + + color.Cyan("Prints text in cyan.") + + // a newline will be appended automatically + color.Blue("Prints %s in blue.", "text") + + // More default foreground colors.. + color.Red("We have red") + color.Yellow("Yellow color too!") + color.Magenta("And many others ..") + + // Hi-intensity colors + color.HiGreen("Bright green color.") + color.HiBlack("Bright black means gray..") + color.HiWhite("Shiny white color!") + +However there are times where custom color mixes are required. Below are some +examples to create custom color objects and use the print functions of each +separate color object. + + // Create a new color object + c := color.New(color.FgCyan).Add(color.Underline) + c.Println("Prints cyan text with an underline.") + + // Or just add them to New() + d := color.New(color.FgCyan, color.Bold) + d.Printf("This prints bold cyan %s\n", "too!.") + + + // Mix up foreground and background colors, create new mixes! + red := color.New(color.FgRed) + + boldRed := red.Add(color.Bold) + boldRed.Println("This will print text in bold red.") + + whiteBackground := red.Add(color.BgWhite) + whiteBackground.Println("Red text with White background.") + + // Use your own io.Writer output + color.New(color.FgBlue).Fprintln(myWriter, "blue color!") + + blue := color.New(color.FgBlue) + blue.Fprint(myWriter, "This will print text in blue.") + +You can create PrintXxx functions to simplify even more: + + // Create a custom print function for convenient + red := color.New(color.FgRed).PrintfFunc() + red("warning") + red("error: %s", err) + + // Mix up multiple attributes + notice := color.New(color.Bold, color.FgGreen).PrintlnFunc() + notice("don't forget this...") + +You can also FprintXxx functions to pass your own io.Writer: + + blue := color.New(FgBlue).FprintfFunc() + blue(myWriter, "important notice: %s", stars) + + // Mix up with multiple attributes + success := color.New(color.Bold, color.FgGreen).FprintlnFunc() + success(myWriter, don't forget this...") + + +Or create SprintXxx functions to mix strings with other non-colorized strings: + + yellow := New(FgYellow).SprintFunc() + red := New(FgRed).SprintFunc() + + fmt.Printf("this is a %s and this is %s.\n", yellow("warning"), red("error")) + + info := New(FgWhite, BgGreen).SprintFunc() + fmt.Printf("this %s rocks!\n", info("package")) + +Windows support is enabled by default. All Print functions work as intended. +However only for color.SprintXXX functions, user should use fmt.FprintXXX and +set the output to color.Output: + + fmt.Fprintf(color.Output, "Windows support: %s", color.GreenString("PASS")) + + info := New(FgWhite, BgGreen).SprintFunc() + fmt.Fprintf(color.Output, "this %s rocks!\n", info("package")) + +Using with existing code is possible. Just use the Set() method to set the +standard output to the given parameters. That way a rewrite of an existing +code is not required. + + // Use handy standard colors. + color.Set(color.FgYellow) + + fmt.Println("Existing text will be now in Yellow") + fmt.Printf("This one %s\n", "too") + + color.Unset() // don't forget to unset + + // You can mix up parameters + color.Set(color.FgMagenta, color.Bold) + defer color.Unset() // use it in your function + + fmt.Println("All text will be now bold magenta.") + +There might be a case where you want to disable color output (for example to +pipe the standard output of your app to somewhere else). `Color` has support to +disable colors both globally and for single color definition. For example +suppose you have a CLI app and a `--no-color` bool flag. You can easily disable +the color output with: + + var flagNoColor = flag.Bool("no-color", false, "Disable color output") + + if *flagNoColor { + color.NoColor = true // disables colorized output + } + +It also has support for single color definitions (local). You can +disable/enable color output on the fly: + + c := color.New(color.FgCyan) + c.Println("Prints cyan text") + + c.DisableColor() + c.Println("This is printed without any color") + + c.EnableColor() + c.Println("This prints again cyan...") +*/ +package color diff --git a/vendor/github.com/fatih/color/go.mod b/vendor/github.com/fatih/color/go.mod new file mode 100644 index 000000000..bc0df7545 --- /dev/null +++ b/vendor/github.com/fatih/color/go.mod @@ -0,0 +1,8 @@ +module github.com/fatih/color + +go 1.13 + +require ( + github.com/mattn/go-colorable v0.1.4 + github.com/mattn/go-isatty v0.0.11 +) diff --git a/vendor/github.com/fatih/color/go.sum b/vendor/github.com/fatih/color/go.sum new file mode 100644 index 000000000..44328a8db --- /dev/null +++ b/vendor/github.com/fatih/color/go.sum @@ -0,0 +1,8 @@ +github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.11 h1:FxPOTFNqGkuDUGi3H/qkUbQO4ZiBa2brKq5r0l8TGeM= +github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037 h1:YyJpGZS1sBuBCzLAR1VEpK193GlqGZbnPFnPV/5Rsb4= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/vendor/github.com/go-kit/kit/LICENSE b/vendor/github.com/go-kit/kit/LICENSE new file mode 100644 index 000000000..9d83342ac --- /dev/null +++ b/vendor/github.com/go-kit/kit/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015 Peter Bourgon + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/go-kit/kit/log/README.md b/vendor/github.com/go-kit/kit/log/README.md new file mode 100644 index 000000000..a201a3d92 --- /dev/null +++ b/vendor/github.com/go-kit/kit/log/README.md @@ -0,0 +1,151 @@ +# package log + +`package log` provides a minimal interface for structured logging in services. +It may be wrapped to encode conventions, enforce type-safety, provide leveled +logging, and so on. It can be used for both typical application log events, +and log-structured data streams. + +## Structured logging + +Structured logging is, basically, conceding to the reality that logs are +_data_, and warrant some level of schematic rigor. Using a stricter, +key/value-oriented message format for our logs, containing contextual and +semantic information, makes it much easier to get insight into the +operational activity of the systems we build. Consequently, `package log` is +of the strong belief that "[the benefits of structured logging outweigh the +minimal effort involved](https://www.thoughtworks.com/radar/techniques/structured-logging)". + +Migrating from unstructured to structured logging is probably a lot easier +than you'd expect. + +```go +// Unstructured +log.Printf("HTTP server listening on %s", addr) + +// Structured +logger.Log("transport", "HTTP", "addr", addr, "msg", "listening") +``` + +## Usage + +### Typical application logging + +```go +w := log.NewSyncWriter(os.Stderr) +logger := log.NewLogfmtLogger(w) +logger.Log("question", "what is the meaning of life?", "answer", 42) + +// Output: +// question="what is the meaning of life?" answer=42 +``` + +### Contextual Loggers + +```go +func main() { + var logger log.Logger + logger = log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)) + logger = log.With(logger, "instance_id", 123) + + logger.Log("msg", "starting") + NewWorker(log.With(logger, "component", "worker")).Run() + NewSlacker(log.With(logger, "component", "slacker")).Run() +} + +// Output: +// instance_id=123 msg=starting +// instance_id=123 component=worker msg=running +// instance_id=123 component=slacker msg=running +``` + +### Interact with stdlib logger + +Redirect stdlib logger to Go kit logger. + +```go +import ( + "os" + stdlog "log" + kitlog "github.com/go-kit/kit/log" +) + +func main() { + logger := kitlog.NewJSONLogger(kitlog.NewSyncWriter(os.Stdout)) + stdlog.SetOutput(kitlog.NewStdlibAdapter(logger)) + stdlog.Print("I sure like pie") +} + +// Output: +// {"msg":"I sure like pie","ts":"2016/01/01 12:34:56"} +``` + +Or, if, for legacy reasons, you need to pipe all of your logging through the +stdlib log package, you can redirect Go kit logger to the stdlib logger. + +```go +logger := kitlog.NewLogfmtLogger(kitlog.StdlibWriter{}) +logger.Log("legacy", true, "msg", "at least it's something") + +// Output: +// 2016/01/01 12:34:56 legacy=true msg="at least it's something" +``` + +### Timestamps and callers + +```go +var logger log.Logger +logger = log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)) +logger = log.With(logger, "ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller) + +logger.Log("msg", "hello") + +// Output: +// ts=2016-01-01T12:34:56Z caller=main.go:15 msg=hello +``` + +## Levels + +Log levels are supported via the [level package](https://godoc.org/github.com/go-kit/kit/log/level). + +## Supported output formats + +- [Logfmt](https://brandur.org/logfmt) ([see also](https://blog.codeship.com/logfmt-a-log-format-thats-easy-to-read-and-write)) +- JSON + +## Enhancements + +`package log` is centered on the one-method Logger interface. + +```go +type Logger interface { + Log(keyvals ...interface{}) error +} +``` + +This interface, and its supporting code like is the product of much iteration +and evaluation. For more details on the evolution of the Logger interface, +see [The Hunt for a Logger Interface](http://go-talks.appspot.com/github.com/ChrisHines/talks/structured-logging/structured-logging.slide#1), +a talk by [Chris Hines](https://github.com/ChrisHines). +Also, please see +[#63](https://github.com/go-kit/kit/issues/63), +[#76](https://github.com/go-kit/kit/pull/76), +[#131](https://github.com/go-kit/kit/issues/131), +[#157](https://github.com/go-kit/kit/pull/157), +[#164](https://github.com/go-kit/kit/issues/164), and +[#252](https://github.com/go-kit/kit/pull/252) +to review historical conversations about package log and the Logger interface. + +Value-add packages and suggestions, +like improvements to [the leveled logger](https://godoc.org/github.com/go-kit/kit/log/level), +are of course welcome. Good proposals should + +- Be composable with [contextual loggers](https://godoc.org/github.com/go-kit/kit/log#With), +- Not break the behavior of [log.Caller](https://godoc.org/github.com/go-kit/kit/log#Caller) in any wrapped contextual loggers, and +- Be friendly to packages that accept only an unadorned log.Logger. + +## Benchmarks & comparisons + +There are a few Go logging benchmarks and comparisons that include Go kit's package log. + +- [imkira/go-loggers-bench](https://github.com/imkira/go-loggers-bench) includes kit/log +- [uber-common/zap](https://github.com/uber-common/zap), a zero-alloc logging library, includes a comparison with kit/log diff --git a/vendor/github.com/go-kit/kit/log/doc.go b/vendor/github.com/go-kit/kit/log/doc.go new file mode 100644 index 000000000..918c0af46 --- /dev/null +++ b/vendor/github.com/go-kit/kit/log/doc.go @@ -0,0 +1,116 @@ +// Package log provides a structured logger. +// +// Structured logging produces logs easily consumed later by humans or +// machines. Humans might be interested in debugging errors, or tracing +// specific requests. Machines might be interested in counting interesting +// events, or aggregating information for off-line processing. In both cases, +// it is important that the log messages are structured and actionable. +// Package log is designed to encourage both of these best practices. +// +// Basic Usage +// +// The fundamental interface is Logger. Loggers create log events from +// key/value data. The Logger interface has a single method, Log, which +// accepts a sequence of alternating key/value pairs, which this package names +// keyvals. +// +// type Logger interface { +// Log(keyvals ...interface{}) error +// } +// +// Here is an example of a function using a Logger to create log events. +// +// func RunTask(task Task, logger log.Logger) string { +// logger.Log("taskID", task.ID, "event", "starting task") +// ... +// logger.Log("taskID", task.ID, "event", "task complete") +// } +// +// The keys in the above example are "taskID" and "event". The values are +// task.ID, "starting task", and "task complete". Every key is followed +// immediately by its value. +// +// Keys are usually plain strings. Values may be any type that has a sensible +// encoding in the chosen log format. With structured logging it is a good +// idea to log simple values without formatting them. This practice allows +// the chosen logger to encode values in the most appropriate way. +// +// Contextual Loggers +// +// A contextual logger stores keyvals that it includes in all log events. +// Building appropriate contextual loggers reduces repetition and aids +// consistency in the resulting log output. With and WithPrefix add context to +// a logger. We can use With to improve the RunTask example. +// +// func RunTask(task Task, logger log.Logger) string { +// logger = log.With(logger, "taskID", task.ID) +// logger.Log("event", "starting task") +// ... +// taskHelper(task.Cmd, logger) +// ... +// logger.Log("event", "task complete") +// } +// +// The improved version emits the same log events as the original for the +// first and last calls to Log. Passing the contextual logger to taskHelper +// enables each log event created by taskHelper to include the task.ID even +// though taskHelper does not have access to that value. Using contextual +// loggers this way simplifies producing log output that enables tracing the +// life cycle of individual tasks. (See the Contextual example for the full +// code of the above snippet.) +// +// Dynamic Contextual Values +// +// A Valuer function stored in a contextual logger generates a new value each +// time an event is logged. The Valuer example demonstrates how this feature +// works. +// +// Valuers provide the basis for consistently logging timestamps and source +// code location. The log package defines several valuers for that purpose. +// See Timestamp, DefaultTimestamp, DefaultTimestampUTC, Caller, and +// DefaultCaller. A common logger initialization sequence that ensures all log +// entries contain a timestamp and source location looks like this: +// +// logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stdout)) +// logger = log.With(logger, "ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller) +// +// Concurrent Safety +// +// Applications with multiple goroutines want each log event written to the +// same logger to remain separate from other log events. Package log provides +// two simple solutions for concurrent safe logging. +// +// NewSyncWriter wraps an io.Writer and serializes each call to its Write +// method. Using a SyncWriter has the benefit that the smallest practical +// portion of the logging logic is performed within a mutex, but it requires +// the formatting Logger to make only one call to Write per log event. +// +// NewSyncLogger wraps any Logger and serializes each call to its Log method. +// Using a SyncLogger has the benefit that it guarantees each log event is +// handled atomically within the wrapped logger, but it typically serializes +// both the formatting and output logic. Use a SyncLogger if the formatting +// logger may perform multiple writes per log event. +// +// Error Handling +// +// This package relies on the practice of wrapping or decorating loggers with +// other loggers to provide composable pieces of functionality. It also means +// that Logger.Log must return an error because some +// implementations—especially those that output log data to an io.Writer—may +// encounter errors that cannot be handled locally. This in turn means that +// Loggers that wrap other loggers should return errors from the wrapped +// logger up the stack. +// +// Fortunately, the decorator pattern also provides a way to avoid the +// necessity to check for errors every time an application calls Logger.Log. +// An application required to panic whenever its Logger encounters +// an error could initialize its logger as follows. +// +// fmtlogger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stdout)) +// logger := log.LoggerFunc(func(keyvals ...interface{}) error { +// if err := fmtlogger.Log(keyvals...); err != nil { +// panic(err) +// } +// return nil +// }) +package log diff --git a/vendor/github.com/go-kit/kit/log/json_logger.go b/vendor/github.com/go-kit/kit/log/json_logger.go new file mode 100644 index 000000000..0cedbf824 --- /dev/null +++ b/vendor/github.com/go-kit/kit/log/json_logger.go @@ -0,0 +1,91 @@ +package log + +import ( + "encoding" + "encoding/json" + "fmt" + "io" + "reflect" +) + +type jsonLogger struct { + io.Writer +} + +// NewJSONLogger returns a Logger that encodes keyvals to the Writer as a +// single JSON object. Each log event produces no more than one call to +// w.Write. The passed Writer must be safe for concurrent use by multiple +// goroutines if the returned Logger will be used concurrently. +func NewJSONLogger(w io.Writer) Logger { + return &jsonLogger{w} +} + +func (l *jsonLogger) Log(keyvals ...interface{}) error { + n := (len(keyvals) + 1) / 2 // +1 to handle case when len is odd + m := make(map[string]interface{}, n) + for i := 0; i < len(keyvals); i += 2 { + k := keyvals[i] + var v interface{} = ErrMissingValue + if i+1 < len(keyvals) { + v = keyvals[i+1] + } + merge(m, k, v) + } + enc := json.NewEncoder(l.Writer) + enc.SetEscapeHTML(false) + return enc.Encode(m) +} + +func merge(dst map[string]interface{}, k, v interface{}) { + var key string + switch x := k.(type) { + case string: + key = x + case fmt.Stringer: + key = safeString(x) + default: + key = fmt.Sprint(x) + } + + // We want json.Marshaler and encoding.TextMarshaller to take priority over + // err.Error() and v.String(). But json.Marshall (called later) does that by + // default so we force a no-op if it's one of those 2 case. + switch x := v.(type) { + case json.Marshaler: + case encoding.TextMarshaler: + case error: + v = safeError(x) + case fmt.Stringer: + v = safeString(x) + } + + dst[key] = v +} + +func safeString(str fmt.Stringer) (s string) { + defer func() { + if panicVal := recover(); panicVal != nil { + if v := reflect.ValueOf(str); v.Kind() == reflect.Ptr && v.IsNil() { + s = "NULL" + } else { + panic(panicVal) + } + } + }() + s = str.String() + return +} + +func safeError(err error) (s interface{}) { + defer func() { + if panicVal := recover(); panicVal != nil { + if v := reflect.ValueOf(err); v.Kind() == reflect.Ptr && v.IsNil() { + s = nil + } else { + panic(panicVal) + } + } + }() + s = err.Error() + return +} diff --git a/vendor/github.com/go-kit/kit/log/level/doc.go b/vendor/github.com/go-kit/kit/log/level/doc.go new file mode 100644 index 000000000..505d307b1 --- /dev/null +++ b/vendor/github.com/go-kit/kit/log/level/doc.go @@ -0,0 +1,22 @@ +// Package level implements leveled logging on top of Go kit's log package. To +// use the level package, create a logger as per normal in your func main, and +// wrap it with level.NewFilter. +// +// var logger log.Logger +// logger = log.NewLogfmtLogger(os.Stderr) +// logger = level.NewFilter(logger, level.AllowInfo()) // <-- +// logger = log.With(logger, "ts", log.DefaultTimestampUTC) +// +// Then, at the callsites, use one of the level.Debug, Info, Warn, or Error +// helper methods to emit leveled log events. +// +// logger.Log("foo", "bar") // as normal, no level +// level.Debug(logger).Log("request_id", reqID, "trace_data", trace.Get()) +// if value > 100 { +// level.Error(logger).Log("value", value) +// } +// +// NewFilter allows precise control over what happens when a log event is +// emitted without a level key, or if a squelched level is used. Check the +// Option functions for details. +package level diff --git a/vendor/github.com/go-kit/kit/log/level/level.go b/vendor/github.com/go-kit/kit/log/level/level.go new file mode 100644 index 000000000..fceafc454 --- /dev/null +++ b/vendor/github.com/go-kit/kit/log/level/level.go @@ -0,0 +1,205 @@ +package level + +import "github.com/go-kit/kit/log" + +// Error returns a logger that includes a Key/ErrorValue pair. +func Error(logger log.Logger) log.Logger { + return log.WithPrefix(logger, Key(), ErrorValue()) +} + +// Warn returns a logger that includes a Key/WarnValue pair. +func Warn(logger log.Logger) log.Logger { + return log.WithPrefix(logger, Key(), WarnValue()) +} + +// Info returns a logger that includes a Key/InfoValue pair. +func Info(logger log.Logger) log.Logger { + return log.WithPrefix(logger, Key(), InfoValue()) +} + +// Debug returns a logger that includes a Key/DebugValue pair. +func Debug(logger log.Logger) log.Logger { + return log.WithPrefix(logger, Key(), DebugValue()) +} + +// NewFilter wraps next and implements level filtering. See the commentary on +// the Option functions for a detailed description of how to configure levels. +// If no options are provided, all leveled log events created with Debug, +// Info, Warn or Error helper methods are squelched and non-leveled log +// events are passed to next unmodified. +func NewFilter(next log.Logger, options ...Option) log.Logger { + l := &logger{ + next: next, + } + for _, option := range options { + option(l) + } + return l +} + +type logger struct { + next log.Logger + allowed level + squelchNoLevel bool + errNotAllowed error + errNoLevel error +} + +func (l *logger) Log(keyvals ...interface{}) error { + var hasLevel, levelAllowed bool + for i := 1; i < len(keyvals); i += 2 { + if v, ok := keyvals[i].(*levelValue); ok { + hasLevel = true + levelAllowed = l.allowed&v.level != 0 + break + } + } + if !hasLevel && l.squelchNoLevel { + return l.errNoLevel + } + if hasLevel && !levelAllowed { + return l.errNotAllowed + } + return l.next.Log(keyvals...) +} + +// Option sets a parameter for the leveled logger. +type Option func(*logger) + +// AllowAll is an alias for AllowDebug. +func AllowAll() Option { + return AllowDebug() +} + +// AllowDebug allows error, warn, info and debug level log events to pass. +func AllowDebug() Option { + return allowed(levelError | levelWarn | levelInfo | levelDebug) +} + +// AllowInfo allows error, warn and info level log events to pass. +func AllowInfo() Option { + return allowed(levelError | levelWarn | levelInfo) +} + +// AllowWarn allows error and warn level log events to pass. +func AllowWarn() Option { + return allowed(levelError | levelWarn) +} + +// AllowError allows only error level log events to pass. +func AllowError() Option { + return allowed(levelError) +} + +// AllowNone allows no leveled log events to pass. +func AllowNone() Option { + return allowed(0) +} + +func allowed(allowed level) Option { + return func(l *logger) { l.allowed = allowed } +} + +// ErrNotAllowed sets the error to return from Log when it squelches a log +// event disallowed by the configured Allow[Level] option. By default, +// ErrNotAllowed is nil; in this case the log event is squelched with no +// error. +func ErrNotAllowed(err error) Option { + return func(l *logger) { l.errNotAllowed = err } +} + +// SquelchNoLevel instructs Log to squelch log events with no level, so that +// they don't proceed through to the wrapped logger. If SquelchNoLevel is set +// to true and a log event is squelched in this way, the error value +// configured with ErrNoLevel is returned to the caller. +func SquelchNoLevel(squelch bool) Option { + return func(l *logger) { l.squelchNoLevel = squelch } +} + +// ErrNoLevel sets the error to return from Log when it squelches a log event +// with no level. By default, ErrNoLevel is nil; in this case the log event is +// squelched with no error. +func ErrNoLevel(err error) Option { + return func(l *logger) { l.errNoLevel = err } +} + +// NewInjector wraps next and returns a logger that adds a Key/level pair to +// the beginning of log events that don't already contain a level. In effect, +// this gives a default level to logs without a level. +func NewInjector(next log.Logger, level Value) log.Logger { + return &injector{ + next: next, + level: level, + } +} + +type injector struct { + next log.Logger + level interface{} +} + +func (l *injector) Log(keyvals ...interface{}) error { + for i := 1; i < len(keyvals); i += 2 { + if _, ok := keyvals[i].(*levelValue); ok { + return l.next.Log(keyvals...) + } + } + kvs := make([]interface{}, len(keyvals)+2) + kvs[0], kvs[1] = key, l.level + copy(kvs[2:], keyvals) + return l.next.Log(kvs...) +} + +// Value is the interface that each of the canonical level values implement. +// It contains unexported methods that prevent types from other packages from +// implementing it and guaranteeing that NewFilter can distinguish the levels +// defined in this package from all other values. +type Value interface { + String() string + levelVal() +} + +// Key returns the unique key added to log events by the loggers in this +// package. +func Key() interface{} { return key } + +// ErrorValue returns the unique value added to log events by Error. +func ErrorValue() Value { return errorValue } + +// WarnValue returns the unique value added to log events by Warn. +func WarnValue() Value { return warnValue } + +// InfoValue returns the unique value added to log events by Info. +func InfoValue() Value { return infoValue } + +// DebugValue returns the unique value added to log events by Warn. +func DebugValue() Value { return debugValue } + +var ( + // key is of type interface{} so that it allocates once during package + // initialization and avoids allocating every time the value is added to a + // []interface{} later. + key interface{} = "level" + + errorValue = &levelValue{level: levelError, name: "error"} + warnValue = &levelValue{level: levelWarn, name: "warn"} + infoValue = &levelValue{level: levelInfo, name: "info"} + debugValue = &levelValue{level: levelDebug, name: "debug"} +) + +type level byte + +const ( + levelDebug level = 1 << iota + levelInfo + levelWarn + levelError +) + +type levelValue struct { + name string + level +} + +func (v *levelValue) String() string { return v.name } +func (v *levelValue) levelVal() {} diff --git a/vendor/github.com/go-kit/kit/log/log.go b/vendor/github.com/go-kit/kit/log/log.go new file mode 100644 index 000000000..66a9e2fde --- /dev/null +++ b/vendor/github.com/go-kit/kit/log/log.go @@ -0,0 +1,135 @@ +package log + +import "errors" + +// Logger is the fundamental interface for all log operations. Log creates a +// log event from keyvals, a variadic sequence of alternating keys and values. +// Implementations must be safe for concurrent use by multiple goroutines. In +// particular, any implementation of Logger that appends to keyvals or +// modifies or retains any of its elements must make a copy first. +type Logger interface { + Log(keyvals ...interface{}) error +} + +// ErrMissingValue is appended to keyvals slices with odd length to substitute +// the missing value. +var ErrMissingValue = errors.New("(MISSING)") + +// With returns a new contextual logger with keyvals prepended to those passed +// to calls to Log. If logger is also a contextual logger created by With or +// WithPrefix, keyvals is appended to the existing context. +// +// The returned Logger replaces all value elements (odd indexes) containing a +// Valuer with their generated value for each call to its Log method. +func With(logger Logger, keyvals ...interface{}) Logger { + if len(keyvals) == 0 { + return logger + } + l := newContext(logger) + kvs := append(l.keyvals, keyvals...) + if len(kvs)%2 != 0 { + kvs = append(kvs, ErrMissingValue) + } + return &context{ + logger: l.logger, + // Limiting the capacity of the stored keyvals ensures that a new + // backing array is created if the slice must grow in Log or With. + // Using the extra capacity without copying risks a data race that + // would violate the Logger interface contract. + keyvals: kvs[:len(kvs):len(kvs)], + hasValuer: l.hasValuer || containsValuer(keyvals), + } +} + +// WithPrefix returns a new contextual logger with keyvals prepended to those +// passed to calls to Log. If logger is also a contextual logger created by +// With or WithPrefix, keyvals is prepended to the existing context. +// +// The returned Logger replaces all value elements (odd indexes) containing a +// Valuer with their generated value for each call to its Log method. +func WithPrefix(logger Logger, keyvals ...interface{}) Logger { + if len(keyvals) == 0 { + return logger + } + l := newContext(logger) + // Limiting the capacity of the stored keyvals ensures that a new + // backing array is created if the slice must grow in Log or With. + // Using the extra capacity without copying risks a data race that + // would violate the Logger interface contract. + n := len(l.keyvals) + len(keyvals) + if len(keyvals)%2 != 0 { + n++ + } + kvs := make([]interface{}, 0, n) + kvs = append(kvs, keyvals...) + if len(kvs)%2 != 0 { + kvs = append(kvs, ErrMissingValue) + } + kvs = append(kvs, l.keyvals...) + return &context{ + logger: l.logger, + keyvals: kvs, + hasValuer: l.hasValuer || containsValuer(keyvals), + } +} + +// context is the Logger implementation returned by With and WithPrefix. It +// wraps a Logger and holds keyvals that it includes in all log events. Its +// Log method calls bindValues to generate values for each Valuer in the +// context keyvals. +// +// A context must always have the same number of stack frames between calls to +// its Log method and the eventual binding of Valuers to their value. This +// requirement comes from the functional requirement to allow a context to +// resolve application call site information for a Caller stored in the +// context. To do this we must be able to predict the number of logging +// functions on the stack when bindValues is called. +// +// Two implementation details provide the needed stack depth consistency. +// +// 1. newContext avoids introducing an additional layer when asked to +// wrap another context. +// 2. With and WithPrefix avoid introducing an additional layer by +// returning a newly constructed context with a merged keyvals rather +// than simply wrapping the existing context. +type context struct { + logger Logger + keyvals []interface{} + hasValuer bool +} + +func newContext(logger Logger) *context { + if c, ok := logger.(*context); ok { + return c + } + return &context{logger: logger} +} + +// Log replaces all value elements (odd indexes) containing a Valuer in the +// stored context with their generated value, appends keyvals, and passes the +// result to the wrapped Logger. +func (l *context) Log(keyvals ...interface{}) error { + kvs := append(l.keyvals, keyvals...) + if len(kvs)%2 != 0 { + kvs = append(kvs, ErrMissingValue) + } + if l.hasValuer { + // If no keyvals were appended above then we must copy l.keyvals so + // that future log events will reevaluate the stored Valuers. + if len(keyvals) == 0 { + kvs = append([]interface{}{}, l.keyvals...) + } + bindValues(kvs[:len(l.keyvals)]) + } + return l.logger.Log(kvs...) +} + +// LoggerFunc is an adapter to allow use of ordinary functions as Loggers. If +// f is a function with the appropriate signature, LoggerFunc(f) is a Logger +// object that calls f. +type LoggerFunc func(...interface{}) error + +// Log implements Logger by calling f(keyvals...). +func (f LoggerFunc) Log(keyvals ...interface{}) error { + return f(keyvals...) +} diff --git a/vendor/github.com/go-kit/kit/log/logfmt_logger.go b/vendor/github.com/go-kit/kit/log/logfmt_logger.go new file mode 100644 index 000000000..a00305298 --- /dev/null +++ b/vendor/github.com/go-kit/kit/log/logfmt_logger.go @@ -0,0 +1,62 @@ +package log + +import ( + "bytes" + "io" + "sync" + + "github.com/go-logfmt/logfmt" +) + +type logfmtEncoder struct { + *logfmt.Encoder + buf bytes.Buffer +} + +func (l *logfmtEncoder) Reset() { + l.Encoder.Reset() + l.buf.Reset() +} + +var logfmtEncoderPool = sync.Pool{ + New: func() interface{} { + var enc logfmtEncoder + enc.Encoder = logfmt.NewEncoder(&enc.buf) + return &enc + }, +} + +type logfmtLogger struct { + w io.Writer +} + +// NewLogfmtLogger returns a logger that encodes keyvals to the Writer in +// logfmt format. Each log event produces no more than one call to w.Write. +// The passed Writer must be safe for concurrent use by multiple goroutines if +// the returned Logger will be used concurrently. +func NewLogfmtLogger(w io.Writer) Logger { + return &logfmtLogger{w} +} + +func (l logfmtLogger) Log(keyvals ...interface{}) error { + enc := logfmtEncoderPool.Get().(*logfmtEncoder) + enc.Reset() + defer logfmtEncoderPool.Put(enc) + + if err := enc.EncodeKeyvals(keyvals...); err != nil { + return err + } + + // Add newline to the end of the buffer + if err := enc.EndRecord(); err != nil { + return err + } + + // The Logger interface requires implementations to be safe for concurrent + // use by multiple goroutines. For this implementation that means making + // only one call to l.w.Write() for each call to Log. + if _, err := l.w.Write(enc.buf.Bytes()); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/go-kit/kit/log/nop_logger.go b/vendor/github.com/go-kit/kit/log/nop_logger.go new file mode 100644 index 000000000..1047d626c --- /dev/null +++ b/vendor/github.com/go-kit/kit/log/nop_logger.go @@ -0,0 +1,8 @@ +package log + +type nopLogger struct{} + +// NewNopLogger returns a logger that doesn't do anything. +func NewNopLogger() Logger { return nopLogger{} } + +func (nopLogger) Log(...interface{}) error { return nil } diff --git a/vendor/github.com/go-kit/kit/log/stdlib.go b/vendor/github.com/go-kit/kit/log/stdlib.go new file mode 100644 index 000000000..ff96b5dee --- /dev/null +++ b/vendor/github.com/go-kit/kit/log/stdlib.go @@ -0,0 +1,116 @@ +package log + +import ( + "io" + "log" + "regexp" + "strings" +) + +// StdlibWriter implements io.Writer by invoking the stdlib log.Print. It's +// designed to be passed to a Go kit logger as the writer, for cases where +// it's necessary to redirect all Go kit log output to the stdlib logger. +// +// If you have any choice in the matter, you shouldn't use this. Prefer to +// redirect the stdlib log to the Go kit logger via NewStdlibAdapter. +type StdlibWriter struct{} + +// Write implements io.Writer. +func (w StdlibWriter) Write(p []byte) (int, error) { + log.Print(strings.TrimSpace(string(p))) + return len(p), nil +} + +// StdlibAdapter wraps a Logger and allows it to be passed to the stdlib +// logger's SetOutput. It will extract date/timestamps, filenames, and +// messages, and place them under relevant keys. +type StdlibAdapter struct { + Logger + timestampKey string + fileKey string + messageKey string +} + +// StdlibAdapterOption sets a parameter for the StdlibAdapter. +type StdlibAdapterOption func(*StdlibAdapter) + +// TimestampKey sets the key for the timestamp field. By default, it's "ts". +func TimestampKey(key string) StdlibAdapterOption { + return func(a *StdlibAdapter) { a.timestampKey = key } +} + +// FileKey sets the key for the file and line field. By default, it's "caller". +func FileKey(key string) StdlibAdapterOption { + return func(a *StdlibAdapter) { a.fileKey = key } +} + +// MessageKey sets the key for the actual log message. By default, it's "msg". +func MessageKey(key string) StdlibAdapterOption { + return func(a *StdlibAdapter) { a.messageKey = key } +} + +// NewStdlibAdapter returns a new StdlibAdapter wrapper around the passed +// logger. It's designed to be passed to log.SetOutput. +func NewStdlibAdapter(logger Logger, options ...StdlibAdapterOption) io.Writer { + a := StdlibAdapter{ + Logger: logger, + timestampKey: "ts", + fileKey: "caller", + messageKey: "msg", + } + for _, option := range options { + option(&a) + } + return a +} + +func (a StdlibAdapter) Write(p []byte) (int, error) { + result := subexps(p) + keyvals := []interface{}{} + var timestamp string + if date, ok := result["date"]; ok && date != "" { + timestamp = date + } + if time, ok := result["time"]; ok && time != "" { + if timestamp != "" { + timestamp += " " + } + timestamp += time + } + if timestamp != "" { + keyvals = append(keyvals, a.timestampKey, timestamp) + } + if file, ok := result["file"]; ok && file != "" { + keyvals = append(keyvals, a.fileKey, file) + } + if msg, ok := result["msg"]; ok { + keyvals = append(keyvals, a.messageKey, msg) + } + if err := a.Logger.Log(keyvals...); err != nil { + return 0, err + } + return len(p), nil +} + +const ( + logRegexpDate = `(?P[0-9]{4}/[0-9]{2}/[0-9]{2})?[ ]?` + logRegexpTime = `(?P