diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index d0291479d..2fcfe52aa 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -45,16 +45,19 @@ jobs: GOOS=freebsd go build -mod=vendor ./app/vmalert GOOS=freebsd go build -mod=vendor ./app/vmbackup GOOS=freebsd go build -mod=vendor ./app/vmrestore + GOOS=freebsd go build -mod=vendor ./app/vmctl GOOS=openbsd go build -mod=vendor ./app/victoria-metrics GOOS=openbsd go build -mod=vendor ./app/vmagent GOOS=openbsd go build -mod=vendor ./app/vmalert GOOS=openbsd go build -mod=vendor ./app/vmbackup GOOS=openbsd go build -mod=vendor ./app/vmrestore + GOOS=openbsd go build -mod=vendor ./app/vmctl GOOS=darwin go build -mod=vendor ./app/victoria-metrics GOOS=darwin go build -mod=vendor ./app/vmagent GOOS=darwin go build -mod=vendor ./app/vmalert GOOS=darwin go build -mod=vendor ./app/vmbackup GOOS=darwin go build -mod=vendor ./app/vmrestore + GOOS=darwin go build -mod=vendor ./app/vmctl - name: Publish coverage uses: codecov/codecov-action@v1.0.6 with: diff --git a/Makefile b/Makefile index 9d7ffaad2..e69e16662 100644 --- a/Makefile +++ b/Makefile @@ -18,7 +18,8 @@ all: \ vmalert-prod \ vmauth-prod \ vmbackup-prod \ - vmrestore-prod + vmrestore-prod \ + vmctl-prod include app/*/Makefile include deployment/*/Makefile @@ -32,7 +33,8 @@ publish: \ publish-vmalert \ publish-vmauth \ publish-vmbackup \ - publish-vmrestore + publish-vmrestore \ + publish-vmctl package: \ package-victoria-metrics \ @@ -40,21 +42,24 @@ package: \ package-vmalert \ package-vmauth \ package-vmbackup \ - package-vmrestore + package-vmrestore \ + package-vmctl vmutils: \ vmagent \ vmalert \ vmauth \ vmbackup \ - vmrestore + vmrestore \ + vmctl vmutils-arm64: \ vmagent-arm64 \ vmalert-arm64 \ vmauth-arm64 \ vmbackup-arm64 \ - vmrestore-arm64 + vmrestore-arm64 \ + vmctl-arm64 release-snap: snapcraft @@ -97,7 +102,8 @@ release-vmutils-generic: \ vmalert-$(GOARCH)-prod \ vmauth-$(GOARCH)-prod \ vmbackup-$(GOARCH)-prod \ - vmrestore-$(GOARCH)-prod + vmrestore-$(GOARCH)-prod \ + vmctl-$(GOARCH)-prod cd bin && \ tar --transform="flags=r;s|-$(GOARCH)||" -czf vmutils-$(GOARCH)-$(PKG_TAG).tar.gz \ vmagent-$(GOARCH)-prod \ @@ -105,12 +111,14 @@ release-vmutils-generic: \ vmauth-$(GOARCH)-prod \ vmbackup-$(GOARCH)-prod \ vmrestore-$(GOARCH)-prod \ + vmctl-$(GOARCH)-prod \ && sha256sum vmutils-$(GOARCH)-$(PKG_TAG).tar.gz \ vmagent-$(GOARCH)-prod \ vmalert-$(GOARCH)-prod \ vmauth-$(GOARCH)-prod \ vmbackup-$(GOARCH)-prod \ vmrestore-$(GOARCH)-prod \ + vmctl-$(GOARCH)-prod \ | sed s/-$(GOARCH)// > vmutils-$(GOARCH)-$(PKG_TAG)_checksums.txt pprof-cpu: @@ -141,6 +149,7 @@ errcheck: install-errcheck errcheck -exclude=errcheck_excludes.txt ./app/vmauth/... errcheck -exclude=errcheck_excludes.txt ./app/vmbackup/... errcheck -exclude=errcheck_excludes.txt ./app/vmrestore/... + errcheck -exclude=errcheck_excludes.txt ./app/vmctl/... install-errcheck: which errcheck || go install github.com/kisielk/errcheck @@ -204,4 +213,5 @@ docs-sync: cp app/vmauth/README.md docs/vmauth.md cp app/vmbackup/README.md docs/vmbackup.md cp app/vmrestore/README.md docs/vmrestore.md + cp app/vmctl/README.md docs/vmctl.md cp README.md docs/Single-server-VictoriaMetrics.md diff --git a/README.md b/README.md index e60d13534..d2e3d8798 100644 --- a/README.md +++ b/README.md @@ -154,6 +154,7 @@ Alphabetically sorted links to case studies: * [Tuning](#tuning) * [Monitoring](#monitoring) * [Troubleshooting](#troubleshooting) +* [Data migration](#data-migration) * [Backfilling](#backfilling) * [Data updates](#data-updates) * [Replication](#replication) @@ -547,10 +548,15 @@ These handlers can be queried from Prometheus-compatible clients such as Grafana ### Prometheus querying API enhancements -Additionally to unix timestamps and [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) VictoriaMetrics accepts relative times in `time`, `start` and `end` query args. +VictoriaMetrics accepts optional `extra_label==` query arg, which can be used for enforcing additional label filters for queries. For example, +`/api/v1/query_range?extra_label=user_id=123&query=` would automatically add `{user_id="123"}` label filter to the given ``. This functionality can be used +for limiting the scope of time series visible to the given tenant. It is expected that the `extra_label` query arg is automatically set by auth proxy sitting +in front of VictoriaMetrics. [Contact us](mailto:sales@victoriametrics.com) if you need assistance with such a proxy. + +VictoriaMetrics accepts relative times in `time`, `start` and `end` query args additionally to unix timestamps and [RFC3339](https://www.ietf.org/rfc/rfc3339.txt). For example, the following query would return data for the last 30 minutes: `/api/v1/query_range?start=-30m&query=...`. -By default, VictoriaMetrics returns time series for the last 5 minutes from /api/v1/series, while the Prometheus API defaults to all time. Use `start` and `end` to select a different time range. +By default, VictoriaMetrics returns time series for the last 5 minutes from `/api/v1/series`, while the Prometheus API defaults to all time. Use `start` and `end` to select a different time range. VictoriaMetrics accepts additional args for `/api/v1/labels` and `/api/v1/label/.../values` handlers. See [this feature request](https://github.com/prometheus/prometheus/issues/6178) for details: @@ -1353,6 +1359,17 @@ See the example of alerting rules for VM components [here](https://github.com/Vi * VictoriaMetrics ignores `NaN` values during data ingestion. +## Data migration + +Use [vmctl](https://victoriametrics.github.io/vmctl.html) for data migration. It supports the following data migration types: + +* From Prometheus to VictoriaMetrics +* From InfluxDB to VictoriaMetrics +* From VictoriaMetrics to VictoriaMetrics + +See [vmctl docs](https://victoriametrics.github.io/vmctl.html) for more details. + + ## Backfilling VictoriaMetrics accepts historical data in arbitrary order of time via [any supported ingestion method](#how-to-import-time-series-data). @@ -1420,7 +1437,6 @@ The collected profiles may be analyzed with [go tool pprof](https://github.com/g * [Helm charts for single-node and cluster versions of VictoriaMetrics](https://github.com/VictoriaMetrics/helm-charts). * [Kubernetes operator for VictoriaMetrics](https://github.com/VictoriaMetrics/operator). -* [vmctl tool for data migration to VictoriaMetrics](https://github.com/VictoriaMetrics/vmctl). * [netdata](https://github.com/netdata/netdata) can push data into VictoriaMetrics via `Prometheus remote_write API`. See [these docs](https://github.com/netdata/netdata#integrations). * [go-graphite/carbonapi](https://github.com/go-graphite/carbonapi) can use VictoriaMetrics as time series backend. diff --git a/app/victoria-metrics/testdata/graphite/comparison-not-inf-not-nan.json b/app/victoria-metrics/testdata/graphite/comparison-not-inf-not-nan.json index edea7ab01..6dbfb07a8 100644 --- a/app/victoria-metrics/testdata/graphite/comparison-not-inf-not-nan.json +++ b/app/victoria-metrics/testdata/graphite/comparison-not-inf-not-nan.json @@ -11,6 +11,6 @@ "status":"success", "data":{"resultType":"matrix", "result":[ - {"metric":{"item":"y"},"values":[["{TIME_S-1m}","0.5"],["{TIME_S}","0.5"]]} + {"metric":{"item":"y"},"values":[["{TIME_S-1m}","0.5"]]} ]}} } diff --git a/app/victoria-metrics/testdata/graphite/not-nan-as-missing-data.json b/app/victoria-metrics/testdata/graphite/not-nan-as-missing-data.json index fb37f2fc2..694c9fabf 100644 --- a/app/victoria-metrics/testdata/graphite/not-nan-as-missing-data.json +++ b/app/victoria-metrics/testdata/graphite/not-nan-as-missing-data.json @@ -13,6 +13,6 @@ "data":{"resultType":"matrix", "result":[ {"metric":{"__name__":"not_nan_as_missing_data","item":"x"},"values":[["{TIME_S-2m}","2"]]}, - {"metric":{"__name__":"not_nan_as_missing_data","item":"y"},"values":[["{TIME_S-2m}","4"],["{TIME_S-1m}","3"],["{TIME_S}","3"]]} + {"metric":{"__name__":"not_nan_as_missing_data","item":"y"},"values":[["{TIME_S-2m}","4"],["{TIME_S-1m}","3"]]} ]}} } diff --git a/app/vmagent/README.md b/app/vmagent/README.md index 0c5eb9e58..d2d48e5ec 100644 --- a/app/vmagent/README.md +++ b/app/vmagent/README.md @@ -7,7 +7,7 @@ or any other Prometheus-compatible storage system that supports the `remote_writ vmagent -### Motivation +## Motivation While VictoriaMetrics provides an efficient solution to store and observe metrics, our users needed something fast and RAM friendly to scrape metrics from Prometheus-compatible exporters to VictoriaMetrics. @@ -15,7 +15,7 @@ Also, we found that users’ infrastructure are snowflakes - no two are alike, a to `vmagent` (like the ability to push metrics instead of pulling them). We did our best and plan to do even more. -### Features +## Features * Can be used as drop-in replacement for Prometheus for scraping targets such as [node_exporter](https://github.com/prometheus/node_exporter). See [Quick Start](#quick-start) for details. @@ -36,7 +36,7 @@ to `vmagent` (like the ability to push metrics instead of pulling them). We did * Uses lower amounts of RAM, CPU, disk IO and network bandwidth compared to Prometheus. -### Quick Start +## Quick Start Just download `vmutils-*` archive from [releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases), unpack it and pass the following flags to `vmagent` binary in order to start scraping Prometheus targets: @@ -63,7 +63,7 @@ Then send Influx data to `http://vmagent-host:8429`. See [these docs](https://vi Pass `-help` to `vmagent` in order to see the full list of supported command-line flags with their descriptions. -### Configuration update +## Configuration update `vmagent` should be restarted in order to update config options set via command-line args. @@ -79,10 +79,10 @@ Pass `-help` to `vmagent` in order to see the full list of supported command-lin There is also `-promscrape.configCheckInterval` command-line option, which can be used for automatic reloading configs from updated `-promscrape.config` file. -### Use cases +## Use cases -#### IoT and Edge monitoring +### IoT and Edge monitoring `vmagent` can run and collect metrics in IoT and industrial networks with unreliable or scheduled connections to the remote storage. It buffers the collected data in local files until the connection to remote storage becomes available and then sends the buffered @@ -93,14 +93,14 @@ The maximum buffer size can be limited with `-remoteWrite.maxDiskUsagePerURL`. See [the corresponding Makefile rules](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/app/vmagent/Makefile) for details. -#### Drop-in replacement for Prometheus +### Drop-in replacement for Prometheus If you use Prometheus only for scraping metrics from various targets and forwarding these metrics to remote storage, then `vmagent` can replace such Prometheus setup. Usually `vmagent` requires lower amounts of RAM, CPU and network bandwidth comparing to Prometheus for such a setup. See [these docs](#how-to-collect-metrics-in-prometheus-format) for details. -#### Replication and high availability +### Replication and high availability `vmagent` replicates the collected metrics among multiple remote storage instances configured via `-remoteWrite.url` args. If a single remote storage instance temporarily is out of service, then the collected data remains available in another remote storage instances. @@ -108,14 +108,14 @@ If a single remote storage instance temporarily is out of service, then the coll Then it sends the buffered data to the remote storage in order to prevent data gaps in the remote storage. -#### Relabeling and filtering +### Relabeling and filtering `vmagent` can add, remove or update labels on the collected data before sending it to remote storage. Additionally, it can remove unwanted samples via Prometheus-like relabeling before sending the collected data to remote storage. See [these docs](#relabeling) for details. -#### Splitting data streams among multiple systems +### Splitting data streams among multiple systems `vmagent` supports splitting the collected data between muliple destinations with the help of `-remoteWrite.urlRelabelConfig`, which is applied independently for each configured `-remoteWrite.url` destination. For instance, it is possible to replicate or split @@ -123,7 +123,7 @@ data among long-term remote storage, short-term remote storage and real-time ana Note that each destination can receive its own subset of the collected data thanks to per-destination relabeling via `-remoteWrite.urlRelabelConfig`. -#### Prometheus remote_write proxy +### Prometheus remote_write proxy `vmagent` may be used as a proxy for Prometheus data sent via Prometheus `remote_write` protocol. It can accept data via `remote_write` API at `/api/v1/write` endpoint, apply relabeling and filtering and then proxy it to another `remote_write` systems. @@ -131,12 +131,12 @@ The `vmagent` can be configured to encrypt the incoming `remote_write` requests Additionally, Basic Auth can be enabled for the incoming `remote_write` requests with `-httpAuth.*` command-line flags. -#### remote_write for clustered version +### remote_write for clustered version Despite `vmagent` can accept data in several supported protocols (OpenTSDB, Influx, Prometheus, Graphite) and scrape data from various targets, writes always peformed in Promethes remote_write protocol. Therefore for clustered version `-remoteWrite.url` command-line flag should be configured as `://:8480/insert//prometheus/api/v1/write` -### How to collect metrics in Prometheus format +## How to collect metrics in Prometheus format Pass the path to `prometheus.yml` to `-promscrape.config` command-line flag. `vmagent` takes into account the following sections from [Prometheus config file](https://prometheus.io/docs/prometheus/latest/configuration/configuration/): @@ -192,7 +192,7 @@ entries to 60s. Run `vmagent -help` in order to see default values for `-promscr The file pointed by `-promscrape.config` may contain `%{ENV_VAR}` placeholders, which are substituted by the corresponding `ENV_VAR` environment variable values. -### Adding labels to metrics +## Adding labels to metrics Labels can be added to metrics via the following mechanisms: @@ -200,7 +200,7 @@ Labels can be added to metrics via the following mechanisms: * Via `-remoteWrite.label` command-line flag. These labels are added to all the collected metrics before sending them to `-remoteWrite.url`. -### Relabeling +## Relabeling `vmagent` supports [Prometheus relabeling](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config). Additionally it provides the following extra actions: @@ -227,7 +227,7 @@ Read more about relabeling in the following articles: * [relabel_configs vs metric_relabel_configs](https://www.robustperception.io/relabel_configs-vs-metric_relabel_configs) -### Monitoring +## Monitoring `vmagent` exports various metrics in Prometheus exposition format at `http://vmagent-host:8429/metrics` page. It is recommended setting up regular scraping of this page either via `vmagent` itself or via Prometheus, so the exported metrics could be analyzed later. @@ -246,7 +246,7 @@ This information may be useful for debugging target relabeling. It may be useful for performing `vmagent` rolling update without scrape loss. -### Troubleshooting +## Troubleshooting * It is recommended [setting up the official Grafana dashboard](#monitoring) in order to monitor `vmagent` state. @@ -322,24 +322,24 @@ It may be useful for performing `vmagent` rolling update without scrape loss. ``` -### How to build from sources +## How to build from sources It is recommended using [binary releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) - `vmagent` is located in `vmutils-*` archives there. -#### Development build +### Development build 1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.13. 2. Run `make vmagent` from the root folder of the repository. It builds `vmagent` binary and puts it into the `bin` folder. -#### Production build +### Production build 1. [Install docker](https://docs.docker.com/install/). 2. Run `make vmagent-prod` from the root folder of the repository. It builds `vmagent-prod` binary and puts it into the `bin` folder. -#### Building docker images +### Building docker images Run `make package-vmagent`. It builds `victoriametrics/vmagent:` docker image locally. `` is auto-generated image tag, which depends on source code in the repository. @@ -352,24 +352,24 @@ by setting it via `` environment variable. For example, the followin ROOT_IMAGE=scratch make package-vmagent ``` -#### ARM build +### ARM build ARM build may run on Raspberry Pi or on [energy-efficient ARM servers](https://blog.cloudflare.com/arm-takes-wing/). -#### Development ARM build +### Development ARM build 1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.13. 2. Run `make vmagent-arm` or `make vmagent-arm64` from the root folder of the repository. It builds `vmagent-arm` or `vmagent-arm64` binary respectively and puts it into the `bin` folder. -#### Production ARM build +### Production ARM build 1. [Install docker](https://docs.docker.com/install/). 2. Run `make vmagent-arm-prod` or `make vmagent-arm64-prod` from the root folder of the repository. It builds `vmagent-arm-prod` or `vmagent-arm64-prod` binary respectively and puts it into the `bin` folder. -### Profiling +## Profiling `vmagent` provides handlers for collecting the following [Go profiles](https://blog.golang.org/profiling-go-programs): diff --git a/app/vmagent/remotewrite/client.go b/app/vmagent/remotewrite/client.go index b5897822c..2b0332f05 100644 --- a/app/vmagent/remotewrite/client.go +++ b/app/vmagent/remotewrite/client.go @@ -8,7 +8,6 @@ import ( "io/ioutil" "net/http" "net/url" - "strconv" "strings" "sync" "time" @@ -22,7 +21,7 @@ import ( ) var ( - rateLimit = flagutil.NewArray("remoteWrite.rateLimit", "Optional rate limit in bytes per second for data sent to -remoteWrite.url. "+ + rateLimit = flagutil.NewArrayInt("remoteWrite.rateLimit", "Optional rate limit in bytes per second for data sent to -remoteWrite.url. "+ "By default the rate limit is disabled. It can be useful for limiting load on remote storage when big amounts of buffered data "+ "is sent after temporary unavailability of the remote storage") sendTimeout = flagutil.NewArrayDuration("remoteWrite.sendTimeout", "Timeout for sending a single block of data to -remoteWrite.url") @@ -120,15 +119,9 @@ func newClient(argIdx int, remoteWriteURL, sanitizedURL string, fq *persistentqu }, stopCh: make(chan struct{}), } - if bytesPerSec := rateLimit.GetOptionalArg(argIdx); bytesPerSec != "" { - limit, err := strconv.ParseInt(bytesPerSec, 10, 64) - if err != nil { - logger.Fatalf("cannot parse -remoteWrite.rateLimit=%q for -remoteWrite.url=%q: %s", bytesPerSec, sanitizedURL, err) - } - if limit > 0 { - logger.Infof("applying %d bytes per second rate limit for -remoteWrite.url=%q", limit, sanitizedURL) - c.rl.perSecondLimit = limit - } + if bytesPerSec := rateLimit.GetOptionalArgOrDefault(argIdx, 0); bytesPerSec > 0 { + logger.Infof("applying %d bytes per second rate limit for -remoteWrite.url=%q", bytesPerSec, sanitizedURL) + c.rl.perSecondLimit = int64(bytesPerSec) } c.rl.limitReached = metrics.GetOrCreateCounter(fmt.Sprintf(`vmagent_remote_write_rate_limit_reached_total{url=%q}`, c.sanitizedURL)) diff --git a/app/vmagent/remotewrite/pendingseries.go b/app/vmagent/remotewrite/pendingseries.go index 69f249989..a1660bc47 100644 --- a/app/vmagent/remotewrite/pendingseries.go +++ b/app/vmagent/remotewrite/pendingseries.go @@ -7,6 +7,7 @@ import ( "time" "github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/decimal" "github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime" "github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil" "github.com/VictoriaMetrics/VictoriaMetrics/lib/persistentqueue" @@ -35,9 +36,11 @@ type pendingSeries struct { periodicFlusherWG sync.WaitGroup } -func newPendingSeries(pushBlock func(block []byte)) *pendingSeries { +func newPendingSeries(pushBlock func(block []byte), significantFigures, roundDigits int) *pendingSeries { var ps pendingSeries ps.wr.pushBlock = pushBlock + ps.wr.significantFigures = significantFigures + ps.wr.roundDigits = roundDigits ps.stopCh = make(chan struct{}) ps.periodicFlusherWG.Add(1) go func() { @@ -85,9 +88,17 @@ type writeRequest struct { // Move lastFlushTime to the top of the struct in order to guarantee atomic access on 32-bit architectures. lastFlushTime uint64 - wr prompbmarshal.WriteRequest + // pushBlock is called when whe write request is ready to be sent. pushBlock func(block []byte) + // How many significant figures must be left before sending the writeRequest to pushBlock. + significantFigures int + + // How many decimal digits after point must be left before sending the writeRequest to pushBlock. + roundDigits int + + wr prompbmarshal.WriteRequest + tss []prompbmarshal.TimeSeries labels []prompbmarshal.Label @@ -96,6 +107,8 @@ type writeRequest struct { } func (wr *writeRequest) reset() { + // Do not reset pushBlock, significantFigures and roundDigits, since they are re-used. + wr.wr.Timeseries = nil for i := range wr.tss { @@ -114,11 +127,28 @@ func (wr *writeRequest) reset() { func (wr *writeRequest) flush() { wr.wr.Timeseries = wr.tss + wr.adjustSampleValues() atomic.StoreUint64(&wr.lastFlushTime, fasttime.UnixTimestamp()) pushWriteRequest(&wr.wr, wr.pushBlock) wr.reset() } +func (wr *writeRequest) adjustSampleValues() { + samples := wr.samples + if n := wr.significantFigures; n > 0 { + for i := range samples { + s := &samples[i] + s.Value = decimal.RoundToSignificantFigures(s.Value, n) + } + } + if n := wr.roundDigits; n < 100 { + for i := range samples { + s := &samples[i] + s.Value = decimal.RoundToDecimalDigits(s.Value, n) + } + } +} + func (wr *writeRequest) push(src []prompbmarshal.TimeSeries) { tssDst := wr.tss for i := range src { diff --git a/app/vmagent/remotewrite/remotewrite.go b/app/vmagent/remotewrite/remotewrite.go index 80c742a1b..ee3631519 100644 --- a/app/vmagent/remotewrite/remotewrite.go +++ b/app/vmagent/remotewrite/remotewrite.go @@ -7,7 +7,6 @@ import ( "sync/atomic" "github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup" - "github.com/VictoriaMetrics/VictoriaMetrics/lib/decimal" "github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil" "github.com/VictoriaMetrics/VictoriaMetrics/lib/logger" "github.com/VictoriaMetrics/VictoriaMetrics/lib/memory" @@ -31,9 +30,13 @@ var ( "for each -remoteWrite.url. When buffer size reaches the configured maximum, then old data is dropped when adding new data to the buffer. "+ "Buffered data is stored in ~500MB chunks, so the minimum practical value for this flag is 500000000. "+ "Disk usage is unlimited if the value is set to 0") - significantFigures = flag.Int("remoteWrite.significantFigures", 0, "The number of significant figures to leave in metric values before writing them to remote storage. "+ - "See https://en.wikipedia.org/wiki/Significant_figures . Zero value saves all the significant figures. "+ - "This option may be used for increasing on-disk compression level for the stored metrics") + significantFigures = flagutil.NewArrayInt("remoteWrite.significantFigures", "The number of significant figures to leave in metric values before writing them "+ + "to remote storage. See https://en.wikipedia.org/wiki/Significant_figures . Zero value saves all the significant figures. "+ + "This option may be used for improving data compression for the stored metrics. See also -remoteWrite.roundDigits") + roundDigits = flagutil.NewArrayInt("remoteWrite.roundDigits", "Round metric values to this number of decimal digits after the point before writing them to remote storage. "+ + "Examples: -remoteWrite.roundDigits=2 would round 1.236 to 1.24, while -remoteWrite.roundDigits=-1 would round 126.78 to 130. "+ + "By default digits rounding is disabled. Set it to 100 for disabling it for a particular remote storage. "+ + "This option may be used for improving data compression for the stored metrics") ) var rwctxs []*remoteWriteCtx @@ -137,17 +140,6 @@ func Stop() { // // Note that wr may be modified by Push due to relabeling and rounding. func Push(wr *prompbmarshal.WriteRequest) { - if *significantFigures > 0 { - // Round values according to significantFigures - for i := range wr.Timeseries { - samples := wr.Timeseries[i].Samples - for j := range samples { - s := &samples[j] - s.Value = decimal.Round(s.Value, *significantFigures) - } - } - } - var rctx *relabelCtx rcs := allRelabelConfigs.Load().(*relabelConfigs) prcsGlobal := rcs.global @@ -213,9 +205,11 @@ func newRemoteWriteCtx(argIdx int, remoteWriteURL string, maxInmemoryBlocks int, return float64(fq.GetInmemoryQueueLen()) }) c := newClient(argIdx, remoteWriteURL, sanitizedURL, fq, *queues) + sf := significantFigures.GetOptionalArgOrDefault(argIdx, 0) + rd := roundDigits.GetOptionalArgOrDefault(argIdx, 100) pss := make([]*pendingSeries, *queues) for i := range pss { - pss[i] = newPendingSeries(fq.MustWriteBlock) + pss[i] = newPendingSeries(fq.MustWriteBlock, sf, rd) } return &remoteWriteCtx{ idx: argIdx, diff --git a/app/vmalert/README.md b/app/vmalert/README.md index aba8231d6..1d0e785e0 100644 --- a/app/vmalert/README.md +++ b/app/vmalert/README.md @@ -12,6 +12,7 @@ rules against configured address. support; * Integration with [Alertmanager](https://github.com/prometheus/alertmanager); * Keeps the alerts [state on restarts](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/master/app/vmalert#alerts-state-on-restarts); +* Graphite datasource can be used for alerting and recording rules. See [these docs](#graphite) for details. * Lightweight without extra dependencies. ### Limitations: @@ -105,7 +106,13 @@ The syntax for alerting rule is following: # The name of the alert. Must be a valid metric name. alert: -# The MetricsQL expression to evaluate. +# Optional type for the rule. Supported values: "graphite", "prometheus". +# By default "prometheus" rule type is used. +[ type: ] + +# The expression to evaluate. The expression language depends on the type value. +# By default MetricsQL expression is used. If type="graphite", then the expression +# must contain valid Graphite expression. expr: # Alerts are considered firing once they have been returned for this long. @@ -128,7 +135,13 @@ The syntax for recording rules is following: # The name of the time series to output to. Must be a valid metric name. record: -# The MetricsQL expression to evaluate. +# Optional type for the rule. Supported values: "graphite", "prometheus". +# By default "prometheus" rule type is used. +[ type: ] + +# The expression to evaluate. The expression language depends on the type value. +# By default MetricsQL expression is used. If type="graphite", then the expression +# must contain valid Graphite expression. expr: # Labels to add or overwrite before storing the result. @@ -166,6 +179,13 @@ Used as alert source in AlertManager. * `http:///-/reload` - hot configuration reload. +### Graphite + +vmalert sends requests to `<-datasource.url>/render?format=json` during evaluation of alerting and recording rules +if the corresponding rule contains `type: "graphite"` config option. It is expected that the `<-datasource.url>/render` +implements [Graphite Render API](https://graphite.readthedocs.io/en/stable/render_api.html) for `format=json`. + + ### Configuration The shortlist of configuration flags is the following: diff --git a/app/vmalert/alerting.go b/app/vmalert/alerting.go index d7e23b234..9eff009bc 100644 --- a/app/vmalert/alerting.go +++ b/app/vmalert/alerting.go @@ -19,6 +19,7 @@ import ( // AlertingRule is basic alert entity type AlertingRule struct { + Type datasource.Type RuleID uint64 Name string Expr string @@ -50,6 +51,7 @@ type alertingRuleMetrics struct { func newAlertingRule(group *Group, cfg config.Rule) *AlertingRule { ar := &AlertingRule{ + Type: cfg.Type, RuleID: cfg.ID, Name: cfg.Alert, Expr: cfg.Expr, @@ -120,7 +122,7 @@ func (ar *AlertingRule) ID() uint64 { // Exec executes AlertingRule expression via the given Querier. // Based on the Querier results AlertingRule maintains notifier.Alerts func (ar *AlertingRule) Exec(ctx context.Context, q datasource.Querier, series bool) ([]prompbmarshal.TimeSeries, error) { - qMetrics, err := q.Query(ctx, ar.Expr) + qMetrics, err := q.Query(ctx, ar.Expr, ar.Type) ar.mu.Lock() defer ar.mu.Unlock() @@ -137,7 +139,7 @@ func (ar *AlertingRule) Exec(ctx context.Context, q datasource.Querier, series b } } - qFn := func(query string) ([]datasource.Metric, error) { return q.Query(ctx, query) } + qFn := func(query string) ([]datasource.Metric, error) { return q.Query(ctx, query, ar.Type) } updated := make(map[uint64]struct{}) // update list of active alerts for _, m := range qMetrics { @@ -310,6 +312,7 @@ func (ar *AlertingRule) RuleAPI() APIAlertingRule { // encode as strings to avoid rounding ID: fmt.Sprintf("%d", ar.ID()), GroupID: fmt.Sprintf("%d", ar.GroupID), + Type: ar.Type.String(), Name: ar.Name, Expression: ar.Expr, For: ar.For.String(), @@ -404,7 +407,7 @@ func (ar *AlertingRule) Restore(ctx context.Context, q datasource.Querier, lookb return fmt.Errorf("querier is nil") } - qFn := func(query string) ([]datasource.Metric, error) { return q.Query(ctx, query) } + qFn := func(query string) ([]datasource.Metric, error) { return q.Query(ctx, query, ar.Type) } // account for external labels in filter var labelsFilter string @@ -417,7 +420,7 @@ func (ar *AlertingRule) Restore(ctx context.Context, q datasource.Querier, lookb // remote write protocol which is used for state persistence in vmalert. expr := fmt.Sprintf("last_over_time(%s{alertname=%q%s}[%ds])", alertForStateMetricName, ar.Name, labelsFilter, int(lookback.Seconds())) - qMetrics, err := q.Query(ctx, expr) + qMetrics, err := q.Query(ctx, expr, ar.Type) if err != nil { return err } diff --git a/app/vmalert/config/config.go b/app/vmalert/config/config.go index da998ca5c..f639febed 100644 --- a/app/vmalert/config/config.go +++ b/app/vmalert/config/config.go @@ -10,6 +10,8 @@ import ( "strings" "time" + "github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/datasource" + "github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/notifier" "github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/utils" "github.com/VictoriaMetrics/VictoriaMetrics/lib/envtemplate" @@ -21,6 +23,7 @@ import ( // Group contains list of Rules grouped into // entity with one name and evaluation interval type Group struct { + Type datasource.Type `yaml:"type,omitempty"` File string Name string `yaml:"name"` Interval time.Duration `yaml:"interval,omitempty"` @@ -44,6 +47,19 @@ func (g *Group) UnmarshalYAML(unmarshal func(interface{}) error) error { if err != nil { return fmt.Errorf("failed to marshal group configuration for checksum: %w", err) } + // change default value to prometheus datasource. + if g.Type.Get() == "" { + g.Type.Set(datasource.NewPrometheusType()) + } + // update rules with empty type. + for i, r := range g.Rules { + if r.Type.Get() == "" { + r.Type.Set(g.Type) + r.ID = HashRule(r) + g.Rules[i] = r + } + } + h := md5.New() h.Write(b) g.Checksum = fmt.Sprintf("%x", h.Sum(nil)) @@ -58,6 +74,7 @@ func (g *Group) Validate(validateAnnotations, validateExpressions bool) error { if len(g.Rules) == 0 { return fmt.Errorf("group %q can't contain no rules", g.Name) } + uniqueRules := map[uint64]struct{}{} for _, r := range g.Rules { ruleName := r.Record @@ -72,7 +89,13 @@ func (g *Group) Validate(validateAnnotations, validateExpressions bool) error { return fmt.Errorf("invalid rule %q.%q: %w", g.Name, ruleName, err) } if validateExpressions { - if _, err := metricsql.Parse(r.Expr); err != nil { + // its needed only for tests. + // because correct types must be inherited after unmarshalling. + exprValidator := g.Type.ValidateExpr + if r.Type.Get() != "" { + exprValidator = r.Type.ValidateExpr + } + if err := exprValidator(r.Expr); err != nil { return fmt.Errorf("invalid expression for rule %q.%q: %w", g.Name, ruleName, err) } } @@ -92,6 +115,7 @@ func (g *Group) Validate(validateAnnotations, validateExpressions bool) error { // recording rule or alerting rule. type Rule struct { ID uint64 + Type datasource.Type `yaml:"type,omitempty"` Record string `yaml:"record,omitempty"` Alert string `yaml:"alert,omitempty"` Expr string `yaml:"expr"` @@ -169,6 +193,7 @@ func HashRule(r Rule) uint64 { h.Write([]byte("alerting")) h.Write([]byte(r.Alert)) } + h.Write([]byte(r.Type.Get())) kv := sortMap(r.Labels) for _, i := range kv { h.Write([]byte(i.key)) diff --git a/app/vmalert/config/config_test.go b/app/vmalert/config/config_test.go index 13dd21731..1cdaa3b9a 100644 --- a/app/vmalert/config/config_test.go +++ b/app/vmalert/config/config_test.go @@ -7,6 +7,8 @@ import ( "testing" "time" + "github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/datasource" + "gopkg.in/yaml.v2" "github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/notifier" @@ -53,6 +55,10 @@ func TestParseBad(t *testing.T) { []string{"testdata/dir/rules4-bad.rules"}, "either `record` or `alert` must be set", }, + { + []string{"testdata/rules1-bad.rules"}, + "bad graphite expr", + }, } for _, tc := range testCases { _, err := Parse(tc.path, true, true) @@ -215,6 +221,75 @@ func TestGroup_Validate(t *testing.T) { }, expErr: "", }, + { + group: &Group{Name: "test thanos", + Type: datasource.NewRawType("thanos"), + Rules: []Rule{ + {Alert: "alert", Expr: "up == 1", Labels: map[string]string{ + "description": "{{ value|query }}", + }}, + }, + }, + validateExpressions: true, + expErr: "unknown datasource type", + }, + { + group: &Group{Name: "test graphite", + Type: datasource.NewGraphiteType(), + Rules: []Rule{ + {Alert: "alert", Expr: "up == 1", Labels: map[string]string{ + "description": "some-description", + }}, + }, + }, + validateExpressions: true, + expErr: "", + }, + { + group: &Group{Name: "test prometheus", + Type: datasource.NewPrometheusType(), + Rules: []Rule{ + {Alert: "alert", Expr: "up == 1", Labels: map[string]string{ + "description": "{{ value|query }}", + }}, + }, + }, + validateExpressions: true, + expErr: "", + }, + { + group: &Group{ + Name: "test graphite inherit", + Type: datasource.NewGraphiteType(), + Rules: []Rule{ + { + Expr: "sumSeries(time('foo.bar',10))", + For: PromDuration{milliseconds: 10}, + }, + { + Expr: "sum(up == 0 ) by (host)", + Type: datasource.NewPrometheusType(), + }, + }, + }, + }, + { + group: &Group{ + Name: "test graphite prometheus bad expr", + Type: datasource.NewGraphiteType(), + Rules: []Rule{ + { + Expr: "sum(up == 0 ) by (host)", + For: PromDuration{milliseconds: 10}, + }, + { + Expr: "sumSeries(time('foo.bar',10))", + Type: datasource.NewPrometheusType(), + }, + }, + }, + expErr: "invalid rule", + }, } for _, tc := range testCases { err := tc.group.Validate(tc.validateAnnotations, tc.validateExpressions) diff --git a/app/vmalert/config/testdata/dir/rules-update0-good.rules b/app/vmalert/config/testdata/dir/rules-update0-good.rules new file mode 100644 index 000000000..ffbfdf30f --- /dev/null +++ b/app/vmalert/config/testdata/dir/rules-update0-good.rules @@ -0,0 +1,13 @@ +groups: + - name: TestUpdateGroup + interval: 2s + concurrency: 2 + type: prometheus + rules: + - alert: up + expr: up == 0 + for: 30s + - alert: up graphite + expr: filterSeries(time('host.1',20),'>','0') + for: 30s + type: graphite diff --git a/app/vmalert/config/testdata/dir/rules-update1-good.rules b/app/vmalert/config/testdata/dir/rules-update1-good.rules new file mode 100644 index 000000000..0fcad7687 --- /dev/null +++ b/app/vmalert/config/testdata/dir/rules-update1-good.rules @@ -0,0 +1,12 @@ +groups: + - name: TestUpdateGroup + interval: 30s + type: graphite + rules: + - alert: up + expr: filterSeries(time('host.2',20),'>','0') + for: 30s + - alert: up graphite + expr: filterSeries(time('host.1',20),'>','0') + for: 30s + type: graphite diff --git a/app/vmalert/config/testdata/rules1-bad.rules b/app/vmalert/config/testdata/rules1-bad.rules new file mode 100644 index 000000000..37913318d --- /dev/null +++ b/app/vmalert/config/testdata/rules1-bad.rules @@ -0,0 +1,12 @@ +groups: + - name: TestGraphiteBadGroup + interval: 2s + concurrency: 2 + type: graphite + rules: + - alert: Conns + expr: filterSeries(sumSeries(host.receiver.interface.cons),'last','>', 500) by instance + for: 3m + annotations: + summary: Too high connection number for {{$labels.instance}} + description: "It is {{ $value }} connections for {{$labels.instance}}" \ No newline at end of file diff --git a/app/vmalert/config/testdata/rules3-good.rules b/app/vmalert/config/testdata/rules3-good.rules new file mode 100644 index 000000000..3e260b94d --- /dev/null +++ b/app/vmalert/config/testdata/rules3-good.rules @@ -0,0 +1,30 @@ +groups: + - name: TestGroup + interval: 2s + concurrency: 2 + type: graphite + rules: + - alert: Conns + expr: filterSeries(sumSeries(host.receiver.interface.cons),'last','>', 500) + for: 3m + annotations: + summary: Too high connection number for {{$labels.instance}} + description: "It is {{ $value }} connections for {{$labels.instance}}" + - name: TestGroupPromMixed + interval: 2s + concurrency: 2 + type: prometheus + rules: + - alert: Conns + expr: sum(vm_tcplistener_conns) by (instance) > 1 + for: 3m + annotations: + summary: Too high connection number for {{$labels.instance}} + description: "It is {{ $value }} connections for {{$labels.instance}}" + - alert: HostDown + type: graphite + expr: filterSeries(sumSeries(host.receiver.interface.up),'last','=', 0) + for: 3m + annotations: + summary: Too high connection number for {{$labels.instance}} + description: "It is {{ $value }} connections for {{$labels.instance}}" diff --git a/app/vmalert/datasource/datasource.go b/app/vmalert/datasource/datasource.go index 5e6aa32d3..4625d09ee 100644 --- a/app/vmalert/datasource/datasource.go +++ b/app/vmalert/datasource/datasource.go @@ -1,12 +1,14 @@ package datasource -import "context" +import ( + "context" +) // Querier interface wraps Query method which // executes given query and returns list of Metrics // as result type Querier interface { - Query(ctx context.Context, query string) ([]Metric, error) + Query(ctx context.Context, query string, engine Type) ([]Metric, error) } // Metric is the basic entity which should be return by datasource diff --git a/app/vmalert/datasource/type.go b/app/vmalert/datasource/type.go new file mode 100644 index 000000000..10dfc0ca4 --- /dev/null +++ b/app/vmalert/datasource/type.go @@ -0,0 +1,89 @@ +package datasource + +import ( + "fmt" + + "github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/graphiteql" + "github.com/VictoriaMetrics/metricsql" +) + +const graphiteType = "graphite" +const prometheusType = "prometheus" + +// Type represents data source type +type Type struct { + name string +} + +// NewPrometheusType returns prometheus datasource type +func NewPrometheusType() Type { + return Type{name: prometheusType} +} + +// NewGraphiteType returns graphite datasource type +func NewGraphiteType() Type { + return Type{name: graphiteType} +} + +// NewRawType returns datasource type from raw string +// without validation. +func NewRawType(d string) Type { + return Type{name: d} +} + +// Get returns datasource type +func (t *Type) Get() string { + return t.name +} + +// Set changes datasource type +func (t *Type) Set(d Type) { + t.name = d.name +} + +// String implements String interface with default value. +func (t Type) String() string { + if t.name == "" { + return prometheusType + } + return t.name +} + +// ValidateExpr validates query expression with datasource ql. +func (t *Type) ValidateExpr(expr string) error { + switch t.name { + case graphiteType: + if _, err := graphiteql.Parse(expr); err != nil { + return fmt.Errorf("bad graphite expr: %q, err: %w", expr, err) + } + case "", prometheusType: + if _, err := metricsql.Parse(expr); err != nil { + return fmt.Errorf("bad prometheus expr: %q, err: %w", expr, err) + } + default: + return fmt.Errorf("unknown datasource type=%q", t.name) + } + return nil +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (t *Type) UnmarshalYAML(unmarshal func(interface{}) error) error { + var s string + if err := unmarshal(&s); err != nil { + return err + } + switch s { + case "": + s = prometheusType + case graphiteType, prometheusType: + default: + return fmt.Errorf("unknown datasource type=%q, want %q or %q", s, prometheusType, graphiteType) + } + t.name = s + return nil +} + +// MarshalYAML implements the yaml.Unmarshaler interface. +func (t Type) MarshalYAML() (interface{}, error) { + return t.name, nil +} diff --git a/app/vmalert/datasource/vm.go b/app/vmalert/datasource/vm.go index ae99df121..0c0626fbc 100644 --- a/app/vmalert/datasource/vm.go +++ b/app/vmalert/datasource/vm.go @@ -6,7 +6,6 @@ import ( "fmt" "io/ioutil" "net/http" - "net/url" "strconv" "strings" "time" @@ -46,17 +45,45 @@ func (r response) metrics() ([]Metric, error) { return ms, nil } +type graphiteResponse []graphiteResponseTarget + +type graphiteResponseTarget struct { + Target string `json:"target"` + Tags map[string]string `json:"tags"` + DataPoints [][2]float64 `json:"datapoints"` +} + +func (r graphiteResponse) metrics() []Metric { + var ms []Metric + for _, res := range r { + if len(res.DataPoints) < 1 { + continue + } + var m Metric + // add only last value to the result. + last := res.DataPoints[len(res.DataPoints)-1] + m.Value = last[0] + m.Timestamp = int64(last[1]) + for k, v := range res.Tags { + m.AddLabel(k, v) + } + ms = append(ms, m) + } + return ms +} + // VMStorage represents vmstorage entity with ability to read and write metrics type VMStorage struct { c *http.Client - queryURL string + datasourceURL string basicAuthUser string basicAuthPass string lookBack time.Duration queryStep time.Duration } -const queryPath = "/api/v1/query?query=" +const queryPath = "/api/v1/query" +const graphitePath = "/render" // NewVMStorage is a constructor for VMStorage func NewVMStorage(baseURL, basicAuthUser, basicAuthPass string, lookBack time.Duration, queryStep time.Duration, c *http.Client) *VMStorage { @@ -64,26 +91,31 @@ func NewVMStorage(baseURL, basicAuthUser, basicAuthPass string, lookBack time.Du c: c, basicAuthUser: basicAuthUser, basicAuthPass: basicAuthPass, - queryURL: strings.TrimSuffix(baseURL, "/") + queryPath, + datasourceURL: strings.TrimSuffix(baseURL, "/"), lookBack: lookBack, queryStep: queryStep, } } -// Query reads metrics from datasource by given query -func (s *VMStorage) Query(ctx context.Context, query string) ([]Metric, error) { - const ( - statusSuccess, statusError, rtVector = "success", "error", "vector" - ) - q := s.queryURL + url.QueryEscape(query) - if s.lookBack > 0 { - lookBack := time.Now().Add(-s.lookBack) - q += fmt.Sprintf("&time=%d", lookBack.Unix()) +// Query reads metrics from datasource by given query and type +func (s *VMStorage) Query(ctx context.Context, query string, dataSourceType Type) ([]Metric, error) { + switch dataSourceType.name { + case "", prometheusType: + return s.queryDataSource(ctx, query, s.setPrometheusReqParams, parsePrometheusResponse) + case graphiteType: + return s.queryDataSource(ctx, query, s.setGraphiteReqParams, parseGraphiteResponse) + default: + return nil, fmt.Errorf("engine not found: %q", dataSourceType) } - if s.queryStep > 0 { - q += fmt.Sprintf("&step=%s", s.queryStep.String()) - } - req, err := http.NewRequest("POST", q, nil) +} + +func (s *VMStorage) queryDataSource( + ctx context.Context, + query string, + setReqParams func(r *http.Request, query string), + processResponse func(r *http.Request, resp *http.Response, + ) ([]Metric, error)) ([]Metric, error) { + req, err := http.NewRequest("POST", s.datasourceURL, nil) if err != nil { return nil, err } @@ -91,6 +123,7 @@ func (s *VMStorage) Query(ctx context.Context, query string) ([]Metric, error) { if s.basicAuthPass != "" { req.SetBasicAuth(s.basicAuthUser, s.basicAuthPass) } + setReqParams(req, query) resp, err := s.c.Do(req.WithContext(ctx)) if err != nil { return nil, fmt.Errorf("error getting response from %s: %w", req.URL, err) @@ -100,9 +133,46 @@ func (s *VMStorage) Query(ctx context.Context, query string) ([]Metric, error) { body, _ := ioutil.ReadAll(resp.Body) return nil, fmt.Errorf("datasource returns unexpected response code %d for %s. Response body %s", resp.StatusCode, req.URL, body) } + return processResponse(req, resp) +} + +func (s *VMStorage) setPrometheusReqParams(r *http.Request, query string) { + r.URL.Path += queryPath + q := r.URL.Query() + q.Set("query", query) + if s.lookBack > 0 { + lookBack := time.Now().Add(-s.lookBack) + q.Set("time", fmt.Sprintf("%d", lookBack.Unix())) + } + if s.queryStep > 0 { + q.Set("step", s.queryStep.String()) + } + r.URL.RawQuery = q.Encode() +} + +func (s *VMStorage) setGraphiteReqParams(r *http.Request, query string) { + r.URL.Path += graphitePath + q := r.URL.Query() + q.Set("format", "json") + q.Set("target", query) + from := "-5min" + if s.lookBack > 0 { + lookBack := time.Now().Add(-s.lookBack) + from = strconv.FormatInt(lookBack.Unix(), 10) + } + q.Set("from", from) + q.Set("until", "now") + r.URL.RawQuery = q.Encode() +} + +const ( + statusSuccess, statusError, rtVector = "success", "error", "vector" +) + +func parsePrometheusResponse(req *http.Request, resp *http.Response) ([]Metric, error) { r := &response{} if err := json.NewDecoder(resp.Body).Decode(r); err != nil { - return nil, fmt.Errorf("error parsing metrics for %s: %w", req.URL, err) + return nil, fmt.Errorf("error parsing prometheus metrics for %s: %w", req.URL, err) } if r.Status == statusError { return nil, fmt.Errorf("response error, query: %s, errorType: %s, error: %s", req.URL, r.ErrorType, r.Error) @@ -115,3 +185,11 @@ func (s *VMStorage) Query(ctx context.Context, query string) ([]Metric, error) { } return r.metrics() } + +func parseGraphiteResponse(req *http.Request, resp *http.Response) ([]Metric, error) { + r := &graphiteResponse{} + if err := json.NewDecoder(resp.Body).Decode(r); err != nil { + return nil, fmt.Errorf("error parsing graphite metrics for %s: %w", req.URL, err) + } + return r.metrics(), nil +} diff --git a/app/vmalert/datasource/vm_test.go b/app/vmalert/datasource/vm_test.go index af99c8421..1afe0d8e3 100644 --- a/app/vmalert/datasource/vm_test.go +++ b/app/vmalert/datasource/vm_test.go @@ -14,6 +14,7 @@ var ( basicAuthName = "foo" basicAuthPass = "bar" query = "vm_rows" + queryRender = "constantLine(10)" ) func TestVMSelectQuery(t *testing.T) { @@ -22,6 +23,13 @@ func TestVMSelectQuery(t *testing.T) { t.Errorf("should not be called") }) c := -1 + mux.HandleFunc("/render", func(w http.ResponseWriter, request *http.Request) { + c++ + switch c { + case 7: + w.Write([]byte(`[{"target":"constantLine(10)","tags":{"name":"constantLine(10)"},"datapoints":[[10,1611758343],[10,1611758373],[10,1611758403]]}]`)) + } + }) mux.HandleFunc("/api/v1/query", func(w http.ResponseWriter, r *http.Request) { c++ if r.Method != http.MethodPost { @@ -62,25 +70,25 @@ func TestVMSelectQuery(t *testing.T) { srv := httptest.NewServer(mux) defer srv.Close() am := NewVMStorage(srv.URL, basicAuthName, basicAuthPass, time.Minute, 0, srv.Client()) - if _, err := am.Query(ctx, query); err == nil { + if _, err := am.Query(ctx, query, NewPrometheusType()); err == nil { t.Fatalf("expected connection error got nil") } - if _, err := am.Query(ctx, query); err == nil { + if _, err := am.Query(ctx, query, NewPrometheusType()); err == nil { t.Fatalf("expected invalid response status error got nil") } - if _, err := am.Query(ctx, query); err == nil { + if _, err := am.Query(ctx, query, NewPrometheusType()); err == nil { t.Fatalf("expected response body error got nil") } - if _, err := am.Query(ctx, query); err == nil { + if _, err := am.Query(ctx, query, NewPrometheusType()); err == nil { t.Fatalf("expected error status got nil") } - if _, err := am.Query(ctx, query); err == nil { + if _, err := am.Query(ctx, query, NewPrometheusType()); err == nil { t.Fatalf("expected unknown status got nil") } - if _, err := am.Query(ctx, query); err == nil { + if _, err := am.Query(ctx, query, NewPrometheusType()); err == nil { t.Fatalf("expected non-vector resultType error got nil") } - m, err := am.Query(ctx, query) + m, err := am.Query(ctx, query, NewPrometheusType()) if err != nil { t.Fatalf("unexpected %s", err) } @@ -98,4 +106,22 @@ func TestVMSelectQuery(t *testing.T) { m[0].Labels[0].Name != expected.Labels[0].Name { t.Fatalf("unexpected metric %+v want %+v", m[0], expected) } + m, err = am.Query(ctx, queryRender, NewGraphiteType()) + if err != nil { + t.Fatalf("unexpected %s", err) + } + if len(m) != 1 { + t.Fatalf("expected 1 metric got %d in %+v", len(m), m) + } + expected = Metric{ + Labels: []Label{{Value: "constantLine(10)", Name: "name"}}, + Timestamp: 1611758403, + Value: 10, + } + if m[0].Timestamp != expected.Timestamp && + m[0].Value != expected.Value && + m[0].Labels[0].Value != expected.Labels[0].Value && + m[0].Labels[0].Name != expected.Labels[0].Name { + t.Fatalf("unexpected metric %+v want %+v", m[0], expected) + } } diff --git a/app/vmalert/group.go b/app/vmalert/group.go index f2a1b5bd7..e8a73b25f 100644 --- a/app/vmalert/group.go +++ b/app/vmalert/group.go @@ -22,6 +22,7 @@ type Group struct { Name string File string Rules []Rule + Type datasource.Type Interval time.Duration Concurrency int Checksum string @@ -50,6 +51,7 @@ func newGroupMetrics(name, file string) *groupMetrics { func newGroup(cfg config.Group, defaultInterval time.Duration, labels map[string]string) *Group { g := &Group{ + Type: cfg.Type, Name: cfg.Name, File: cfg.File, Interval: cfg.Interval, @@ -99,6 +101,7 @@ func (g *Group) ID() uint64 { hash.Write([]byte(g.File)) hash.Write([]byte("\xff")) hash.Write([]byte(g.Name)) + hash.Write([]byte(g.Type.Get())) return hash.Sum64() } @@ -157,6 +160,7 @@ func (g *Group) updateWith(newGroup *Group) error { for _, nr := range rulesRegistry { newRules = append(newRules, nr) } + g.Type = newGroup.Type g.Concurrency = newGroup.Concurrency g.Checksum = newGroup.Checksum g.Rules = newRules diff --git a/app/vmalert/helpers_test.go b/app/vmalert/helpers_test.go index e4fcc1d59..3f879d466 100644 --- a/app/vmalert/helpers_test.go +++ b/app/vmalert/helpers_test.go @@ -38,7 +38,7 @@ func (fq *fakeQuerier) add(metrics ...datasource.Metric) { fq.Unlock() } -func (fq *fakeQuerier) Query(_ context.Context, _ string) ([]datasource.Metric, error) { +func (fq *fakeQuerier) Query(_ context.Context, _ string, _ datasource.Type) ([]datasource.Metric, error) { fq.Lock() defer fq.Unlock() if fq.err != nil { diff --git a/app/vmalert/manager.go b/app/vmalert/manager.go index 3abdd1340..baaf0effb 100644 --- a/app/vmalert/manager.go +++ b/app/vmalert/manager.go @@ -142,6 +142,7 @@ func (g *Group) toAPI() APIGroup { // encode as string to avoid rounding ID: fmt.Sprintf("%d", g.ID()), Name: g.Name, + Type: g.Type.String(), File: g.File, Interval: g.Interval.String(), Concurrency: g.Concurrency, diff --git a/app/vmalert/manager_test.go b/app/vmalert/manager_test.go index a3cd3d31f..8762ef6cb 100644 --- a/app/vmalert/manager_test.go +++ b/app/vmalert/manager_test.go @@ -9,6 +9,8 @@ import ( "testing" "time" + "github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/datasource" + "github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/notifier" ) @@ -106,6 +108,18 @@ func TestManagerUpdate(t *testing.T) { Name: "ExampleAlertAlwaysFiring", Expr: "sum by(job) (up == 1)", } + ExampleAlertGraphite = &AlertingRule{ + Name: "up graphite", + Expr: "filterSeries(time('host.1',20),'>','0')", + Type: datasource.NewGraphiteType(), + For: defaultEvalInterval, + } + ExampleAlertGraphite2 = &AlertingRule{ + Name: "up", + Expr: "filterSeries(time('host.2',20),'>','0')", + Type: datasource.NewGraphiteType(), + For: defaultEvalInterval, + } ) testCases := []struct { @@ -122,6 +136,7 @@ func TestManagerUpdate(t *testing.T) { { File: "config/testdata/dir/rules1-good.rules", Name: "duplicatedGroupDiffFiles", + Type: datasource.NewPrometheusType(), Interval: defaultEvalInterval, Rules: []Rule{ &AlertingRule{ @@ -146,12 +161,14 @@ func TestManagerUpdate(t *testing.T) { { File: "config/testdata/rules0-good.rules", Name: "groupGorSingleAlert", + Type: datasource.NewPrometheusType(), Rules: []Rule{VMRows}, Interval: defaultEvalInterval, }, { File: "config/testdata/rules0-good.rules", Interval: defaultEvalInterval, + Type: datasource.NewPrometheusType(), Name: "TestGroup", Rules: []Rule{ Conns, ExampleAlertAlwaysFiring, @@ -166,13 +183,16 @@ func TestManagerUpdate(t *testing.T) { { File: "config/testdata/rules0-good.rules", Name: "groupGorSingleAlert", + Type: datasource.NewPrometheusType(), Interval: defaultEvalInterval, Rules: []Rule{VMRows}, }, { File: "config/testdata/rules0-good.rules", Interval: defaultEvalInterval, - Name: "TestGroup", Rules: []Rule{ + Name: "TestGroup", + Type: datasource.NewPrometheusType(), + Rules: []Rule{ Conns, ExampleAlertAlwaysFiring, }}, @@ -186,12 +206,14 @@ func TestManagerUpdate(t *testing.T) { { File: "config/testdata/rules0-good.rules", Name: "groupGorSingleAlert", + Type: datasource.NewPrometheusType(), Interval: defaultEvalInterval, Rules: []Rule{VMRows}, }, { File: "config/testdata/rules0-good.rules", Interval: defaultEvalInterval, + Type: datasource.NewPrometheusType(), Name: "TestGroup", Rules: []Rule{ Conns, ExampleAlertAlwaysFiring, @@ -199,6 +221,23 @@ func TestManagerUpdate(t *testing.T) { }, }, }, + { + name: "update prometheus to graphite type", + initPath: "config/testdata/dir/rules-update0-good.rules", + updatePath: "config/testdata/dir/rules-update1-good.rules", + want: []*Group{ + { + File: "config/testdata/dir/rules-update1-good.rules", + Interval: defaultEvalInterval, + Type: datasource.NewGraphiteType(), + Name: "TestUpdateGroup", + Rules: []Rule{ + ExampleAlertGraphite2, + ExampleAlertGraphite, + }, + }, + }, + }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { diff --git a/app/vmalert/recording.go b/app/vmalert/recording.go index 89f0c57e1..4f075cdf1 100644 --- a/app/vmalert/recording.go +++ b/app/vmalert/recording.go @@ -18,6 +18,7 @@ import ( // to evaluate configured Expression and // return TimeSeries as result. type RecordingRule struct { + Type datasource.Type RuleID uint64 Name string Expr string @@ -53,6 +54,7 @@ func (rr *RecordingRule) ID() uint64 { func newRecordingRule(group *Group, cfg config.Rule) *RecordingRule { rr := &RecordingRule{ + Type: cfg.Type, RuleID: cfg.ID, Name: cfg.Record, Expr: cfg.Expr, @@ -60,6 +62,7 @@ func newRecordingRule(group *Group, cfg config.Rule) *RecordingRule { GroupID: group.ID(), metrics: &recordingRuleMetrics{}, } + labels := fmt.Sprintf(`recording=%q, group=%q, id="%d"`, rr.Name, group.Name, rr.ID()) rr.metrics.errors = getOrCreateGauge(fmt.Sprintf(`vmalert_recording_rules_error{%s}`, labels), func() float64 { @@ -84,8 +87,7 @@ func (rr *RecordingRule) Exec(ctx context.Context, q datasource.Querier, series return nil, nil } - qMetrics, err := q.Query(ctx, rr.Expr) - + qMetrics, err := q.Query(ctx, rr.Expr, rr.Type) rr.mu.Lock() defer rr.mu.Unlock() @@ -162,6 +164,7 @@ func (rr *RecordingRule) RuleAPI() APIRecordingRule { ID: fmt.Sprintf("%d", rr.ID()), GroupID: fmt.Sprintf("%d", rr.GroupID), Name: rr.Name, + Type: rr.Type.String(), Expression: rr.Expr, LastError: lastErr, LastExec: rr.lastExecTime, diff --git a/app/vmalert/web_types.go b/app/vmalert/web_types.go index b26a20d2c..ca3cbdd05 100644 --- a/app/vmalert/web_types.go +++ b/app/vmalert/web_types.go @@ -21,6 +21,7 @@ type APIAlert struct { // APIGroup represents Group for WEB view type APIGroup struct { Name string `json:"name"` + Type string `json:"type"` ID string `json:"id"` File string `json:"file"` Interval string `json:"interval"` @@ -33,6 +34,7 @@ type APIGroup struct { type APIAlertingRule struct { ID string `json:"id"` Name string `json:"name"` + Type string `json:"type"` GroupID string `json:"group_id"` Expression string `json:"expression"` For string `json:"for"` @@ -46,6 +48,7 @@ type APIAlertingRule struct { type APIRecordingRule struct { ID string `json:"id"` Name string `json:"name"` + Type string `json:"type"` GroupID string `json:"group_id"` Expression string `json:"expression"` LastError string `json:"last_error"` diff --git a/app/vmauth/README.md b/app/vmauth/README.md index 017a9278e..aeed7f1e2 100644 --- a/app/vmauth/README.md +++ b/app/vmauth/README.md @@ -5,7 +5,7 @@ It reads username and password from [Basic Auth headers](https://en.wikipedia.or matches them against configs pointed by `-auth.config` command-line flag and proxies incoming HTTP requests to the configured per-user `url_prefix` on successful match. -### Quick start +## Quick start Just download `vmutils-*` archive from [releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases), unpack it and pass the following flag to `vmauth` binary in order to start authorizing and routing requests: @@ -26,7 +26,7 @@ Pass `-help` to `vmauth` in order to see all the supported command-line flags wi Feel free [contacting us](mailto:info@victoriametrics.com) if you need customized auth proxy for VictoriaMetrics with the support of LDAP, SSO, RBAC, SAML, accounting, limits, etc. -### Auth config +## Auth config Auth config is represented in the following simple `yml` format: @@ -68,7 +68,7 @@ The config may contain `%{ENV_VAR}` placeholders, which are substituted by the c This may be useful for passing secrets to the config. -### Security +## Security Do not transfer Basic Auth headers in plaintext over untrusted networks. Enable https. This can be done by passing the following `-tls*` command-line flags to `vmauth`: @@ -84,30 +84,30 @@ Do not transfer Basic Auth headers in plaintext over untrusted networks. Enable Alternatively, [https termination proxy](https://en.wikipedia.org/wiki/TLS_termination_proxy) may be put in front of `vmauth`. -### Monitoring +## Monitoring `vmauth` exports various metrics in Prometheus exposition format at `http://vmauth-host:8427/metrics` page. It is recommended setting up regular scraping of this page either via [vmagent](https://victoriametrics.github.io/vmagent.html) or via Prometheus, so the exported metrics could be analyzed later. -### How to build from sources +## How to build from sources It is recommended using [binary releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) - `vmauth` is located in `vmutils-*` archives there. -#### Development build +### Development build 1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.13. 2. Run `make vmauth` from the root folder of the repository. It builds `vmauth` binary and puts it into the `bin` folder. -#### Production build +### Production build 1. [Install docker](https://docs.docker.com/install/). 2. Run `make vmauth-prod` from the root folder of the repository. It builds `vmauth-prod` binary and puts it into the `bin` folder. -#### Building docker images +### Building docker images Run `make package-vmauth`. It builds `victoriametrics/vmauth:` docker image locally. `` is auto-generated image tag, which depends on source code in the repository. @@ -121,7 +121,7 @@ ROOT_IMAGE=scratch make package-vmauth ``` -### Profiling +## Profiling `vmauth` provides handlers for collecting the following [Go profiles](https://blog.golang.org/profiling-go-programs): @@ -142,7 +142,7 @@ The command for collecting CPU profile waits for 30 seconds before returning. The collected profiles may be analyzed with [go tool pprof](https://github.com/google/pprof). -### Advanced usage +## Advanced usage Pass `-help` command-line arg to `vmauth` in order to see all the configuration options: diff --git a/app/vmbackup/README.md b/app/vmbackup/README.md index e3ae3f22b..8f62311c2 100644 --- a/app/vmbackup/README.md +++ b/app/vmbackup/README.md @@ -23,9 +23,9 @@ See also [vmbackuper](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/ creation of hourly, daily, weekly and monthly backups. -### Use cases +## Use cases -#### Regular backups +### Regular backups Regular backup can be performed with the following command: @@ -40,7 +40,7 @@ vmbackup -storageDataPath= -snapshotName=` is the destination path where new backup will be placed. -#### Regular backups with server-side copy from existing backup +### Regular backups with server-side copy from existing backup If the destination GCS bucket already contains the previous backup at `-origin` path, then new backup can be sped up with the following command: @@ -52,7 +52,7 @@ vmbackup -storageDataPath= -snapshotName= -snapshotName=` docker image locally. `` is auto-generated image tag, which depends on source code in the repository. diff --git a/app/vmctl/Makefile b/app/vmctl/Makefile new file mode 100644 index 000000000..b039877ba --- /dev/null +++ b/app/vmctl/Makefile @@ -0,0 +1,73 @@ +# All these commands must run from repository root. + +vmctl: + APP_NAME=vmctl $(MAKE) app-local + +vmctl-race: + APP_NAME=vmctl RACE=-race $(MAKE) app-local + +vmctl-prod: + APP_NAME=vmctl $(MAKE) app-via-docker + +vmctl-pure-prod: + APP_NAME=vmctl $(MAKE) app-via-docker-pure + +vmctl-amd64-prod: + APP_NAME=vmctl $(MAKE) app-via-docker-amd64 + +vmctl-arm-prod: + APP_NAME=vmctl $(MAKE) app-via-docker-arm + +vmctl-arm64-prod: + APP_NAME=vmctl $(MAKE) app-via-docker-arm64 + +vmctl-ppc64le-prod: + APP_NAME=vmctl $(MAKE) app-via-docker-ppc64le + +vmctl-386-prod: + APP_NAME=vmctl $(MAKE) app-via-docker-386 + +package-vmctl: + APP_NAME=vmctl $(MAKE) package-via-docker + +package-vmctl-pure: + APP_NAME=vmctl $(MAKE) package-via-docker-pure + +package-vmctl-amd64: + APP_NAME=vmctl $(MAKE) package-via-docker-amd64 + +package-vmctl-arm: + APP_NAME=vmctl $(MAKE) package-via-docker-arm + +package-vmctl-arm64: + APP_NAME=vmctl $(MAKE) package-via-docker-arm64 + +package-vmctl-ppc64le: + APP_NAME=vmctl $(MAKE) package-via-docker-ppc64le + +package-vmctl-386: + APP_NAME=vmctl $(MAKE) package-via-docker-386 + +publish-vmctl: + APP_NAME=vmctl $(MAKE) publish-via-docker + +vmctl-amd64: + CGO_ENABLED=1 GOARCH=amd64 $(MAKE) vmctl-local-with-goarch + +vmctl-arm: + CGO_ENABLED=0 GOARCH=arm $(MAKE) vmctl-local-with-goarch + +vmctl-arm64: + CGO_ENABLED=0 GOARCH=arm64 $(MAKE) vmctl-local-with-goarch + +vmctl-ppc64le: + CGO_ENABLED=0 GOARCH=ppc64le $(MAKE) vmctl-local-with-goarch + +vmctl-386: + CGO_ENABLED=0 GOARCH=386 $(MAKE) vmctl-local-with-goarch + +vmctl-local-with-goarch: + APP_NAME=vmctl $(MAKE) app-local-with-goarch + +vmctl-pure: + APP_NAME=vmctl $(MAKE) app-local-pure diff --git a/app/vmctl/README.md b/app/vmctl/README.md new file mode 100644 index 000000000..52563a0f7 --- /dev/null +++ b/app/vmctl/README.md @@ -0,0 +1,473 @@ +# vmctl - Victoria metrics command-line tool + +Features: +- [x] Prometheus: migrate data from Prometheus to VictoriaMetrics using snapshot API +- [x] Thanos: migrate data from Thanos to VictoriaMetrics +- [ ] ~~Prometheus: migrate data from Prometheus to VictoriaMetrics by query~~(discarded) +- [x] InfluxDB: migrate data from InfluxDB to VictoriaMetrics +- [ ] Storage Management: data re-balancing between nodes + +# Table of contents + +* [Articles](#articles) +* [How to build](#how-to-build) +* [Migrating data from InfluxDB 1.x](#migrating-data-from-influxdb-1x) + * [Data mapping](#data-mapping) + * [Configuration](#configuration) + * [Filtering](#filtering) +* [Migrating data from InfluxDB 2.x](#migrating-data-from-influxdb-2x) +* [Migrating data from Prometheus](#migrating-data-from-prometheus) + * [Data mapping](#data-mapping-1) + * [Configuration](#configuration-1) + * [Filtering](#filtering-1) +* [Migrating data from Thanos](#migrating-data-from-thanos) + * [Current data](#current-data) + * [Historical data](#historical-data) +* [Migrating data from VictoriaMetrics](#migrating-data-from-victoriametrics) + * [Native protocol](#native-protocol) +* [Tuning](#tuning) + * [Influx mode](#influx-mode) + * [Prometheus mode](#prometheus-mode) + * [VictoriaMetrics importer](#victoriametrics-importer) + * [Importer stats](#importer-stats) +* [Significant figures](#significant-figures) +* [Adding extra labels](#adding-extra-labels) + + +## Articles + +* [How to migrate data from Prometheus](https://medium.com/@romanhavronenko/victoriametrics-how-to-migrate-data-from-prometheus-d44a6728f043) +* [How to migrate data from Prometheus. Filtering and modifying time series](https://medium.com/@romanhavronenko/victoriametrics-how-to-migrate-data-from-prometheus-filtering-and-modifying-time-series-6d40cea4bf21) + +## How to build + +It is recommended using [binary releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) - `vmctl` is located in `vmutils-*` archives there. + + +### Development build + +1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.13. +2. Run `make vmctl` from the root folder of the repository. + It builds `vmctl` binary and puts it into the `bin` folder. + +### Production build + +1. [Install docker](https://docs.docker.com/install/). +2. Run `make vmctl-prod` from the root folder of the repository. + It builds `vmctl-prod` binary and puts it into the `bin` folder. + +### Building docker images + +Run `make package-vmctl`. It builds `victoriametrics/vmctl:` docker image locally. +`` is auto-generated image tag, which depends on source code in the repository. +The `` may be manually set via `PKG_TAG=foobar make package-vmctl`. + +The base docker image is [alpine](https://hub.docker.com/_/alpine) but it is possible to use any other base image +by setting it via `` environment variable. For example, the following command builds the image on top of [scratch](https://hub.docker.com/_/scratch) image: + +```bash +ROOT_IMAGE=scratch make package-vmctl +``` + +### ARM build + +ARM build may run on Raspberry Pi or on [energy-efficient ARM servers](https://blog.cloudflare.com/arm-takes-wing/). + +#### Development ARM build + +1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.13. +2. Run `make vmctl-arm` or `make vmctl-arm64` from the root folder of the repository. + It builds `vmctl-arm` or `vmctl-arm64` binary respectively and puts it into the `bin` folder. + +#### Production ARM build + +1. [Install docker](https://docs.docker.com/install/). +2. Run `make vmctl-arm-prod` or `make vmctl-arm64-prod` from the root folder of the repository. + It builds `vmctl-arm-prod` or `vmctl-arm64-prod` binary respectively and puts it into the `bin` folder. + + +## Migrating data from InfluxDB (1.x) + +`vmctl` supports the `influx` mode to migrate data from InfluxDB to VictoriaMetrics time-series database. + +See `./vmctl influx --help` for details and full list of flags. + +To use migration tool please specify the InfluxDB address `--influx-addr`, the database `--influx-database` and VictoriaMetrics address `--vm-addr`. +Flag `--vm-addr` for single-node VM is usually equal to `--httpListenAddr`, and for cluster version +is equal to `--httpListenAddr` flag of VMInsert component. Please note, that vmctl performs initial readiness check for the given address +by checking `/health` endpoint. For cluster version it is additionally required to specify the `--vm-account-id` flag. +See more details for cluster version [here](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/cluster). + +As soon as required flags are provided and all endpoints are accessible, `vmctl` will start the InfluxDB scheme exploration. +Basically, it just fetches all fields and timeseries from the provided database and builds up registry of all available timeseries. +Then `vmctl` sends fetch requests for each timeseries to InfluxDB one by one and pass results to VM importer. +VM importer then accumulates received samples in batches and sends import requests to VM. + +The importing process example for local installation of InfluxDB(`http://localhost:8086`) +and single-node VictoriaMetrics(`http://localhost:8428`): +``` +./vmctl influx --influx-database benchmark +InfluxDB import mode +2020/01/18 20:47:11 Exploring scheme for database "benchmark" +2020/01/18 20:47:11 fetching fields: command: "show field keys"; database: "benchmark"; retention: "autogen" +2020/01/18 20:47:11 found 10 fields +2020/01/18 20:47:11 fetching series: command: "show series "; database: "benchmark"; retention: "autogen" +Found 40000 timeseries to import. Continue? [Y/n] y +40000 / 40000 [-----------------------------------------------------------------------------------------------------------------------------------------------] 100.00% 21 p/s +2020/01/18 21:19:00 Import finished! +2020/01/18 21:19:00 VictoriaMetrics importer stats: + idle duration: 13m51.461434876s; + time spent while importing: 17m56.923899847s; + total samples: 345600000; + samples/s: 320914.04; + total bytes: 5.9 GB; + bytes/s: 5.4 MB; + import requests: 40001; +2020/01/18 21:19:00 Total time: 31m48.467044016s +``` + +### Data mapping + +Vmctl maps Influx data the same way as VictoriaMetrics does by using the following rules: + +* `influx-database` arg is mapped into `db` label value unless `db` tag exists in the Influx line. +* Field names are mapped to time series names prefixed with {measurement}{separator} value, +where {separator} equals to _ by default. +It can be changed with `--influx-measurement-field-separator` command-line flag. +* Field values are mapped to time series values. +* Tags are mapped to Prometheus labels format as-is. + +For example, the following Influx line: +``` +foo,tag1=value1,tag2=value2 field1=12,field2=40 +``` + +is converted into the following Prometheus format data points: +``` +foo_field1{tag1="value1", tag2="value2"} 12 +foo_field2{tag1="value1", tag2="value2"} 40 +``` + +### Configuration + +The configuration flags should contain self-explanatory descriptions. + +### Filtering + +The filtering consists of two parts: timeseries and time. +The first step of application is to select all available timeseries +for given database and retention. User may specify additional filtering +condition via `--influx-filter-series` flag. For example: +``` +./vmctl influx --influx-database benchmark \ + --influx-filter-series "on benchmark from cpu where hostname='host_1703'" +InfluxDB import mode +2020/01/26 14:23:29 Exploring scheme for database "benchmark" +2020/01/26 14:23:29 fetching fields: command: "show field keys"; database: "benchmark"; retention: "autogen" +2020/01/26 14:23:29 found 12 fields +2020/01/26 14:23:29 fetching series: command: "show series on benchmark from cpu where hostname='host_1703'"; database: "benchmark"; retention: "autogen" +Found 10 timeseries to import. Continue? [Y/n] +``` +The timeseries select query would be following: + `fetching series: command: "show series on benchmark from cpu where hostname='host_1703'"; database: "benchmark"; retention: "autogen"` + +The second step of filtering is a time filter and it applies when fetching the datapoints from Influx. +Time filtering may be configured with two flags: +* --influx-filter-time-start +* --influx-filter-time-end +Here's an example of importing timeseries for one day only: +`./vmctl influx --influx-database benchmark --influx-filter-series "where hostname='host_1703'" --influx-filter-time-start "2020-01-01T10:07:00Z" --influx-filter-time-end "2020-01-01T15:07:00Z"` + +Please see more about time filtering [here](https://docs.influxdata.com/influxdb/v1.7/query_language/schema_exploration#filter-meta-queries-by-time). + +## Migrating data from InfluxDB (2.x) + +Migrating data from InfluxDB v2.x is not supported yet ([#32](https://github.com/VictoriaMetrics/vmctl/issues/32)). +You may find useful a 3rd party solution for this - https://github.com/jonppe/influx_to_victoriametrics. + + +## Migrating data from Prometheus + +`vmctl` supports the `prometheus` mode for migrating data from Prometheus to VictoriaMetrics time-series database. +Migration is based on reading Prometheus snapshot, which is basically a hard-link to Prometheus data files. + +See `./vmctl prometheus --help` for details and full list of flags. + +To use migration tool please specify the path to Prometheus snapshot `--prom-snapshot` and VictoriaMetrics address `--vm-addr`. +More about Prometheus snapshots may be found [here](https://www.robustperception.io/taking-snapshots-of-prometheus-data). +Flag `--vm-addr` for single-node VM is usually equal to `--httpListenAddr`, and for cluster version +is equal to `--httpListenAddr` flag of VMInsert component. Please note, that vmctl performs initial readiness check for the given address +by checking `/health` endpoint. For cluster version it is additionally required to specify the `--vm-account-id` flag. +See more details for cluster version [here](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/cluster). + +As soon as required flags are provided and all endpoints are accessible, `vmctl` will start the Prometheus snapshot exploration. +Basically, it just fetches all available blocks in provided snapshot and read the metadata. It also does initial filtering by time +if flags `--prom-filter-time-start` or `--prom-filter-time-end` were set. The exploration procedure prints some stats from read blocks. +Please note that stats are not taking into account timeseries or samples filtering. This will be done during importing process. + +The importing process takes the snapshot blocks revealed from Explore procedure and processes them one by one +accumulating timeseries and samples. Please note, that `vmctl` relies on responses from Influx on this stage, +so ensure that Explore queries are executed without errors or limits. Please see this +[issue](https://github.com/VictoriaMetrics/vmctl/issues/30) for details. +The data processed in chunks and then sent to VM. + +The importing process example for local installation of Prometheus +and single-node VictoriaMetrics(`http://localhost:8428`): +``` +./vmctl prometheus --prom-snapshot=/path/to/snapshot \ + --vm-concurrency=1 \ + --vm-batch-size=200000 \ + --prom-concurrency=3 +Prometheus import mode +Prometheus snapshot stats: + blocks found: 14; + blocks skipped: 0; + min time: 1581288163058 (2020-02-09T22:42:43Z); + max time: 1582409128139 (2020-02-22T22:05:28Z); + samples: 32549106; + series: 27289. +Found 14 blocks to import. Continue? [Y/n] y +14 / 14 [-------------------------------------------------------------------------------------------] 100.00% 0 p/s +2020/02/23 15:50:03 Import finished! +2020/02/23 15:50:03 VictoriaMetrics importer stats: + idle duration: 6.152953029s; + time spent while importing: 44.908522491s; + total samples: 32549106; + samples/s: 724786.84; + total bytes: 669.1 MB; + bytes/s: 14.9 MB; + import requests: 323; + import requests retries: 0; +2020/02/23 15:50:03 Total time: 51.077451066s +``` + +### Data mapping + +VictoriaMetrics has very similar data model to Prometheus and supports [RemoteWrite integration](https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage). +So no data changes will be applied. + +### Configuration + +The configuration flags should contain self-explanatory descriptions. + +### Filtering + +The filtering consists of three parts: by timeseries and time. + +Filtering by time may be configured via flags `--prom-filter-time-start` and `--prom-filter-time-end` +in in RFC3339 format. This filter applied twice: to drop blocks out of range and to filter timeseries in blocks with +overlapping time range. + +Example of applying time filter: +``` +./vmctl prometheus --prom-snapshot=/path/to/snapshot \ + --prom-filter-time-start=2020-02-07T00:07:01Z \ + --prom-filter-time-end=2020-02-11T00:07:01Z +Prometheus import mode +Prometheus snapshot stats: + blocks found: 2; + blocks skipped: 12; + min time: 1581288163058 (2020-02-09T22:42:43Z); + max time: 1581328800000 (2020-02-10T10:00:00Z); + samples: 1657698; + series: 3930. +Found 2 blocks to import. Continue? [Y/n] y +``` + +Please notice, that total amount of blocks in provided snapshot is 14, but only 2 of them were in provided +time range. So other 12 blocks were marked as `skipped`. The amount of samples and series is not taken into account, +since this is heavy operation and will be done during import process. + + +Filtering by timeseries is configured with following flags: +* `--prom-filter-label` - the label name, e.g. `__name__` or `instance`; +* `--prom-filter-label-value` - the regular expression to filter the label value. By default matches all `.*` + +For example: +``` +./vmctl prometheus --prom-snapshot=/path/to/snapshot \ + --prom-filter-label="__name__" \ + --prom-filter-label-value="promhttp.*" \ + --prom-filter-time-start=2020-02-07T00:07:01Z \ + --prom-filter-time-end=2020-02-11T00:07:01Z +Prometheus import mode +Prometheus snapshot stats: + blocks found: 2; + blocks skipped: 12; + min time: 1581288163058 (2020-02-09T22:42:43Z); + max time: 1581328800000 (2020-02-10T10:00:00Z); + samples: 1657698; + series: 3930. +Found 2 blocks to import. Continue? [Y/n] y +14 / 14 [------------------------------------------------------------------------------------------------------------------------------------------------------] 100.00% ? p/s +2020/02/23 15:51:07 Import finished! +2020/02/23 15:51:07 VictoriaMetrics importer stats: + idle duration: 0s; + time spent while importing: 37.415461ms; + total samples: 10128; + samples/s: 270690.24; + total bytes: 195.2 kB; + bytes/s: 5.2 MB; + import requests: 2; + import requests retries: 0; +2020/02/23 15:51:07 Total time: 7.153158218s +``` + +## Migrating data from Thanos + +Thanos uses the same storage engine as Prometheus and the data layout on-disk should be the same. That means +`vmctl` in mode `prometheus` may be used for Thanos historical data migration as well. +These instructions may vary based on the details of your Thanos configuration. +Please read carefully and verify as you go. We assume you're using Thanos Sidecar on your Prometheus pods, +and that you have a separate Thanos Store installation. + +### Current data + +1. For now, keep your Thanos Sidecar and Thanos-related Prometheus configuration, but add this to also stream + metrics to VictoriaMetrics: + ``` + remote_write: + - url: http://victoria-metrics:8428/api/v1/write + ``` +2. Make sure VM is running, of course. Now check the logs to make sure that Prometheus is sending and VM is receiving. + In Prometheus, make sure there are no errors. On the VM side, you should see messages like this: + ``` + 2020-04-27T18:38:46.474Z info VictoriaMetrics/lib/storage/partition.go:207 creating a partition "2020_04" with smallPartsPath="/victoria-metrics-data/data/small/2020_04", bigPartsPath="/victoria-metrics-data/data/big/2020_04" + 2020-04-27T18:38:46.506Z info VictoriaMetrics/lib/storage/partition.go:222 partition "2020_04" has been created + ``` +3. Now just wait. Within two hours, Prometheus should finish its current data file and hand it off to Thanos Store for long term + storage. + +### Historical data + +Let's assume your data is stored on S3 served by minio. You first need to copy that out to a local filesystem, +then import it into VM using `vmctl` in `prometheus` mode. +1. Copy data from minio. + 1. Run the `minio/mc` Docker container. + 1. `mc config host add minio http://minio:9000 accessKey secretKey`, substituting appropriate values for the last 3 items. + 1. `mc cp -r minio/prometheus thanos-data` +1. Import using `vmctl`. + 1. Follow the [instructions](#how-to-build) to compile `vmctl` on your machine. + 1. Use [prometheus](#migrating-data-from-prometheus) mode to import data: + ``` + vmctl prometheus --prom-snapshot thanos-data --vm-addr http://victoria-metrics:8428 + ``` + +## Migrating data from VictoriaMetrics + +### Native protocol + +The [native binary protocol](https://victoriametrics.github.io/#how-to-export-data-in-native-format) +was introduced in [1.42.0 release](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.42.0) +and provides the most efficient way to migrate data between VM instances: single to single, cluster to cluster, +single to cluster and vice versa. Please note that both instances (source and destination) should be of v1.42.0 +or higher. + +See `./vmctl vm-native --help` for details and full list of flags. + +In this mode `vmctl` acts as a proxy between two VM instances, where time series filtering is done by "source" (`src`) +and processing is done by "destination" (`dst`). Because of that, `vmctl` doesn't actually know how much data will be +processed and can't show the progress bar. It will show the current processing speed and total number of processed bytes: + +``` +./vmctl vm-native --vm-native-src-addr=http://localhost:8528 \ + --vm-native-dst-addr=http://localhost:8428 \ + --vm-native-filter-match='{job="vmagent"}' \ + --vm-native-filter-time-start='2020-01-01T20:07:00Z' +VictoriaMetrics Native import mode +Initing export pipe from "http://localhost:8528" with filters: + filter: match[]={job="vmagent"} +Initing import process to "http://localhost:8428": +Total: 336.75 KiB ↖ Speed: 454.46 KiB p/s +2020/10/13 17:04:59 Total time: 952.143376ms +``` + +Importing tips: +1. Migrating all the metrics from one VM to another may collide with existing application metrics +(prefixed with `vm_`) at destination and lead to confusion when using +[official Grafana dashboards](https://grafana.com/orgs/victoriametrics/dashboards). +To avoid such situation try to filter out VM process metrics via `--vm-native-filter-match` flag. +2. Migration is a backfilling process, so it is recommended to read +[Backfilling tips](https://github.com/VictoriaMetrics/VictoriaMetrics#backfilling) section. +3. `vmctl` doesn't provide relabeling or other types of labels management in this mode. +Instead, use [relabeling in VictoriaMetrics](https://github.com/VictoriaMetrics/vmctl/issues/4#issuecomment-683424375). + + +## Tuning + +### Influx mode + +The flag `--influx-concurrency` controls how many concurrent requests may be sent to InfluxDB while fetching +timeseries. Please set it wisely to avoid InfluxDB overwhelming. + +The flag `--influx-chunk-size` controls the max amount of datapoints to return in single chunk from fetch requests. +Please see more details [here](https://docs.influxdata.com/influxdb/v1.7/guides/querying_data/#chunking). +The chunk size is used to control InfluxDB memory usage, so it won't OOM on processing large timeseries with +billions of datapoints. + +### Prometheus mode + +The flag `--prom-concurrency` controls how many concurrent readers will be reading the blocks in snapshot. +Since snapshots are just files on disk it would be hard to overwhelm the system. Please go with value equal +to number of free CPU cores. + +### VictoriaMetrics importer + +The flag `--vm-concurrency` controls the number of concurrent workers that process the input from InfluxDB query results. +Please note that each import request can load up to a single vCPU core on VictoriaMetrics. So try to set it according +to allocated CPU resources of your VictoriMetrics installation. + +The flag `--vm-batch-size` controls max amount of samples collected before sending the import request. +For example, if `--influx-chunk-size=500` and `--vm-batch-size=2000` then importer will process not more +than 4 chunks before sending the request. + +### Importer stats + +After successful import `vmctl` prints some statistics for details. +The important numbers to watch are following: + - `idle duration` - shows time that importer spent while waiting for data from InfluxDB/Prometheus +to fill up `--vm-batch-size` batch size. Value shows total duration across all workers configured +via `--vm-concurrency`. High value may be a sign of too slow InfluxDB/Prometheus fetches or too +high `--vm-concurrency` value. Try to improve it by increasing `---concurrency` value or +decreasing `--vm-concurrency` value. +- `import requests` - shows how many import requests were issued to VM server. +The import request is issued once the batch size(`--vm-batch-size`) is full and ready to be sent. +Please prefer big batch sizes (50k-500k) to improve performance. +- `import requests retries` - shows number of unsuccessful import requests. Non-zero value may be +a sign of network issues or VM being overloaded. See the logs during import for error messages. + +### Silent mode + +By default `vmctl` waits confirmation from user before starting the import. If this is unwanted +behavior and no user interaction required - pass `-s` flag to enable "silence" mode: +``` + -s Whether to run in silent mode. If set to true no confirmation prompts will appear. (default: false) +``` + +### Significant figures + +`vmctl` allows to limit the number of [significant figures](https://en.wikipedia.org/wiki/Significant_figures) +before importing. For example, the average value for response size is `102.342305` bytes and it has 9 significant figures. +If you ask a human to pronounce this value then with high probability value will be rounded to first 4 or 5 figures +because the rest aren't really that important to mention. In most cases, such a high precision is too much. +Moreover, such values may be just a result of [floating point arithmetic](https://en.wikipedia.org/wiki/Floating-point_arithmetic), +create a [false precision](https://en.wikipedia.org/wiki/False_precision) and result into bad compression ratio +according to [information theory](https://en.wikipedia.org/wiki/Information_theory). + +`vmctl` provides the following flags for improving data compression: + +* `--vm-round-digits` flag for rounding processed values to the given number of decimal digits after the point. + For example, `--vm-round-digits=2` would round `1.2345` to `1.23`. By default the rounding is disabled. + +* `--vm-significant-figures` flag for limiting the number of significant figures in processed values. It takes no effect if set + to 0 (by default), but set `--vm-significant-figures=5` and `102.342305` will be rounded to `102.34`. + +The most common case for using these flags is to improve data compression for time series storing aggregation +results such as `average`, `rate`, etc. + +### Adding extra labels + + `vmctl` allows to add extra labels to all imported series. It can be achived with flag `--vm-extra-label label=value`. + If multiple labels needs to be added, set flag for each label, for example, `--vm-extra-label label1=value1 --vm-extra-label label2=value2`. + If timeseries already have label, that must be added with `--vm-extra-label` flag, flag has priority and will override label value from timeseries. + diff --git a/app/vmctl/deployment/Dockerfile b/app/vmctl/deployment/Dockerfile new file mode 100644 index 000000000..27b442b01 --- /dev/null +++ b/app/vmctl/deployment/Dockerfile @@ -0,0 +1,6 @@ +ARG base_image +FROM $base_image + +ENTRYPOINT ["/vmctl-prod"] +ARG src_binary +COPY $src_binary ./vmctl-prod diff --git a/app/vmctl/flags.go b/app/vmctl/flags.go new file mode 100644 index 000000000..947135506 --- /dev/null +++ b/app/vmctl/flags.go @@ -0,0 +1,292 @@ +package main + +import ( + "fmt" + + "github.com/urfave/cli/v2" +) + +const ( + globalSilent = "s" +) + +var ( + globalFlags = []cli.Flag{ + &cli.BoolFlag{ + Name: globalSilent, + Value: false, + Usage: "Whether to run in silent mode. If set to true no confirmation prompts will appear.", + }, + } +) + +const ( + vmAddr = "vm-addr" + vmUser = "vm-user" + vmPassword = "vm-password" + vmAccountID = "vm-account-id" + vmConcurrency = "vm-concurrency" + vmCompress = "vm-compress" + vmBatchSize = "vm-batch-size" + vmSignificantFigures = "vm-significant-figures" + vmRoundDigits = "vm-round-digits" + vmExtraLabel = "vm-extra-label" +) + +var ( + vmFlags = []cli.Flag{ + &cli.StringFlag{ + Name: vmAddr, + Value: "http://localhost:8428", + Usage: "VictoriaMetrics address to perform import requests. \n" + + "Should be the same as --httpListenAddr value for single-node version or VMInsert component. \n" + + "Please note, that `vmctl` performs initial readiness check for the given address by checking `/health` endpoint.", + }, + &cli.StringFlag{ + Name: vmUser, + Usage: "VictoriaMetrics username for basic auth", + EnvVars: []string{"VM_USERNAME"}, + }, + &cli.StringFlag{ + Name: vmPassword, + Usage: "VictoriaMetrics password for basic auth", + EnvVars: []string{"VM_PASSWORD"}, + }, + &cli.StringFlag{ + Name: vmAccountID, + Usage: "AccountID is an arbitrary 32-bit integer identifying namespace for data ingestion (aka tenant). \n" + + "It is possible to set it as accountID:projectID, where projectID is also arbitrary 32-bit integer. \n" + + "If projectID isn't set, then it equals to 0", + }, + &cli.UintFlag{ + Name: vmConcurrency, + Usage: "Number of workers concurrently performing import requests to VM", + Value: 2, + }, + &cli.BoolFlag{ + Name: vmCompress, + Value: true, + Usage: "Whether to apply gzip compression to import requests", + }, + &cli.IntFlag{ + Name: vmBatchSize, + Value: 200e3, + Usage: "How many samples importer collects before sending the import request to VM", + }, + &cli.IntFlag{ + Name: vmSignificantFigures, + Value: 0, + Usage: "The number of significant figures to leave in metric values before importing. " + + "See https://en.wikipedia.org/wiki/Significant_figures. Zero value saves all the significant figures. " + + "This option may be used for increasing on-disk compression level for the stored metrics. " + + "See also --vm-round-digits option", + }, + &cli.IntFlag{ + Name: vmRoundDigits, + Value: 100, + Usage: "Round metric values to the given number of decimal digits after the point. " + + "This option may be used for increasing on-disk compression level for the stored metrics", + }, + &cli.StringSliceFlag{ + Name: vmExtraLabel, + Value: nil, + Usage: "Extra labels, that will be added to imported timeseries. In case of collision, label value defined by flag" + + "will have priority. Flag can be set multiple times, to add few additional labels.", + }, + } +) + +const ( + influxAddr = "influx-addr" + influxUser = "influx-user" + influxPassword = "influx-password" + influxDB = "influx-database" + influxRetention = "influx-retention-policy" + influxChunkSize = "influx-chunk-size" + influxConcurrency = "influx-concurrency" + influxFilterSeries = "influx-filter-series" + influxFilterTimeStart = "influx-filter-time-start" + influxFilterTimeEnd = "influx-filter-time-end" + influxMeasurementFieldSeparator = "influx-measurement-field-separator" +) + +var ( + influxFlags = []cli.Flag{ + &cli.StringFlag{ + Name: influxAddr, + Value: "http://localhost:8086", + Usage: "Influx server addr", + }, + &cli.StringFlag{ + Name: influxUser, + Usage: "Influx user", + EnvVars: []string{"INFLUX_USERNAME"}, + }, + &cli.StringFlag{ + Name: influxPassword, + Usage: "Influx user password", + EnvVars: []string{"INFLUX_PASSWORD"}, + }, + &cli.StringFlag{ + Name: influxDB, + Usage: "Influx database", + Required: true, + }, + &cli.StringFlag{ + Name: influxRetention, + Usage: "Influx retention policy", + Value: "autogen", + }, + &cli.IntFlag{ + Name: influxChunkSize, + Usage: "The chunkSize defines max amount of series to be returned in one chunk", + Value: 10e3, + }, + &cli.IntFlag{ + Name: influxConcurrency, + Usage: "Number of concurrently running fetch queries to InfluxDB", + Value: 1, + }, + &cli.StringFlag{ + Name: influxFilterSeries, + Usage: "Influx filter expression to select series. E.g. \"from cpu where arch='x86' AND hostname='host_2753'\".\n" + + "See for details https://docs.influxdata.com/influxdb/v1.7/query_language/schema_exploration#show-series", + }, + &cli.StringFlag{ + Name: influxFilterTimeStart, + Usage: "The time filter to select timeseries with timestamp equal or higher than provided value. E.g. '2020-01-01T20:07:00Z'", + }, + &cli.StringFlag{ + Name: influxFilterTimeEnd, + Usage: "The time filter to select timeseries with timestamp equal or lower than provided value. E.g. '2020-01-01T20:07:00Z'", + }, + &cli.StringFlag{ + Name: influxMeasurementFieldSeparator, + Usage: "The {separator} symbol used to concatenate {measurement} and {field} names into series name {measurement}{separator}{field}.", + Value: "_", + }, + } +) + +const ( + promSnapshot = "prom-snapshot" + promConcurrency = "prom-concurrency" + promFilterTimeStart = "prom-filter-time-start" + promFilterTimeEnd = "prom-filter-time-end" + promFilterLabel = "prom-filter-label" + promFilterLabelValue = "prom-filter-label-value" +) + +var ( + promFlags = []cli.Flag{ + &cli.StringFlag{ + Name: promSnapshot, + Usage: "Path to Prometheus snapshot. Pls see for details https://www.robustperception.io/taking-snapshots-of-prometheus-data", + Required: true, + }, + &cli.IntFlag{ + Name: promConcurrency, + Usage: "Number of concurrently running snapshot readers", + Value: 1, + }, + &cli.StringFlag{ + Name: promFilterTimeStart, + Usage: "The time filter in RFC3339 format to select timeseries with timestamp equal or higher than provided value. E.g. '2020-01-01T20:07:00Z'", + }, + &cli.StringFlag{ + Name: promFilterTimeEnd, + Usage: "The time filter in RFC3339 format to select timeseries with timestamp equal or lower than provided value. E.g. '2020-01-01T20:07:00Z'", + }, + &cli.StringFlag{ + Name: promFilterLabel, + Usage: "Prometheus label name to filter timeseries by. E.g. '__name__' will filter timeseries by name.", + }, + &cli.StringFlag{ + Name: promFilterLabelValue, + Usage: fmt.Sprintf("Prometheus regular expression to filter label from %q flag.", promFilterLabel), + Value: ".*", + }, + } +) + +const ( + vmNativeFilterMatch = "vm-native-filter-match" + vmNativeFilterTimeStart = "vm-native-filter-time-start" + vmNativeFilterTimeEnd = "vm-native-filter-time-end" + + vmNativeSrcAddr = "vm-native-src-addr" + vmNativeSrcUser = "vm-native-src-user" + vmNativeSrcPassword = "vm-native-src-password" + + vmNativeDstAddr = "vm-native-dst-addr" + vmNativeDstUser = "vm-native-dst-user" + vmNativeDstPassword = "vm-native-dst-password" +) + +var ( + vmNativeFlags = []cli.Flag{ + &cli.StringFlag{ + Name: vmNativeFilterMatch, + Usage: "Time series selector to match series for export. For example, select {instance!=\"localhost\"} will " + + "match all series with \"instance\" label different to \"localhost\".\n" + + " See more details here https://github.com/VictoriaMetrics/VictoriaMetrics#how-to-export-data-in-native-format", + Value: `{__name__!=""}`, + }, + &cli.StringFlag{ + Name: vmNativeFilterTimeStart, + Usage: "The time filter may contain either unix timestamp in seconds or RFC3339 values. E.g. '2020-01-01T20:07:00Z'", + }, + &cli.StringFlag{ + Name: vmNativeFilterTimeEnd, + Usage: "The time filter may contain either unix timestamp in seconds or RFC3339 values. E.g. '2020-01-01T20:07:00Z'", + }, + &cli.StringFlag{ + Name: vmNativeSrcAddr, + Usage: "VictoriaMetrics address to perform export from. \n" + + " Should be the same as --httpListenAddr value for single-node version or VMSelect component." + + " If exporting from cluster version - include the tenet token in address.", + Required: true, + }, + &cli.StringFlag{ + Name: vmNativeSrcUser, + Usage: "VictoriaMetrics username for basic auth", + EnvVars: []string{"VM_NATIVE_SRC_USERNAME"}, + }, + &cli.StringFlag{ + Name: vmNativeSrcPassword, + Usage: "VictoriaMetrics password for basic auth", + EnvVars: []string{"VM_NATIVE_SRC_PASSWORD"}, + }, + &cli.StringFlag{ + Name: vmNativeDstAddr, + Usage: "VictoriaMetrics address to perform import to. \n" + + " Should be the same as --httpListenAddr value for single-node version or VMInsert component." + + " If importing into cluster version - include the tenet token in address.", + Required: true, + }, + &cli.StringFlag{ + Name: vmNativeDstUser, + Usage: "VictoriaMetrics username for basic auth", + EnvVars: []string{"VM_NATIVE_DST_USERNAME"}, + }, + &cli.StringFlag{ + Name: vmNativeDstPassword, + Usage: "VictoriaMetrics password for basic auth", + EnvVars: []string{"VM_NATIVE_DST_PASSWORD"}, + }, + &cli.StringSliceFlag{ + Name: vmExtraLabel, + Value: nil, + Usage: "Extra labels, that will be added to imported timeseries. In case of collision, label value defined by flag" + + "will have priority. Flag can be set multiple times, to add few additional labels.", + }, + } +) + +func mergeFlags(flags ...[]cli.Flag) []cli.Flag { + var result []cli.Flag + for _, f := range flags { + result = append(result, f...) + } + return result +} diff --git a/app/vmctl/influx.go b/app/vmctl/influx.go new file mode 100644 index 000000000..ececc59c6 --- /dev/null +++ b/app/vmctl/influx.go @@ -0,0 +1,146 @@ +package main + +import ( + "fmt" + "io" + "log" + "sync" + + "github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/influx" + "github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/vm" + "github.com/cheggaaa/pb/v3" +) + +type influxProcessor struct { + ic *influx.Client + im *vm.Importer + cc int + separator string +} + +func newInfluxProcessor(ic *influx.Client, im *vm.Importer, cc int, separator string) *influxProcessor { + if cc < 1 { + cc = 1 + } + return &influxProcessor{ + ic: ic, + im: im, + cc: cc, + separator: separator, + } +} + +func (ip *influxProcessor) run(silent bool) error { + series, err := ip.ic.Explore() + if err != nil { + return fmt.Errorf("explore query failed: %s", err) + } + if len(series) < 1 { + return fmt.Errorf("found no timeseries to import") + } + + question := fmt.Sprintf("Found %d timeseries to import. Continue?", len(series)) + if !silent && !prompt(question) { + return nil + } + + bar := pb.StartNew(len(series)) + seriesCh := make(chan *influx.Series) + errCh := make(chan error) + ip.im.ResetStats() + + var wg sync.WaitGroup + wg.Add(ip.cc) + for i := 0; i < ip.cc; i++ { + go func() { + defer wg.Done() + for s := range seriesCh { + if err := ip.do(s); err != nil { + errCh <- fmt.Errorf("request failed for %q.%q: %s", s.Measurement, s.Field, err) + return + } + bar.Increment() + } + }() + } + + // any error breaks the import + for _, s := range series { + select { + case infErr := <-errCh: + return fmt.Errorf("influx error: %s", infErr) + case vmErr := <-ip.im.Errors(): + return fmt.Errorf("Import process failed: \n%s", wrapErr(vmErr)) + case seriesCh <- s: + } + } + + close(seriesCh) + wg.Wait() + ip.im.Close() + // drain import errors channel + for vmErr := range ip.im.Errors() { + return fmt.Errorf("Import process failed: \n%s", wrapErr(vmErr)) + } + bar.Finish() + log.Println("Import finished!") + log.Print(ip.im.Stats()) + return nil +} + +const dbLabel = "db" + +func (ip *influxProcessor) do(s *influx.Series) error { + cr, err := ip.ic.FetchDataPoints(s) + if err != nil { + return fmt.Errorf("failed to fetch datapoints: %s", err) + } + defer func() { + _ = cr.Close() + }() + var name string + if s.Measurement != "" { + name = fmt.Sprintf("%s%s%s", s.Measurement, ip.separator, s.Field) + } else { + name = s.Field + } + + labels := make([]vm.LabelPair, len(s.LabelPairs)) + var containsDBLabel bool + for i, lp := range s.LabelPairs { + if lp.Name == dbLabel { + containsDBLabel = true + break + } + labels[i] = vm.LabelPair{ + Name: lp.Name, + Value: lp.Value, + } + } + if !containsDBLabel { + labels = append(labels, vm.LabelPair{ + Name: dbLabel, + Value: ip.ic.Database(), + }) + } + + for { + time, values, err := cr.Next() + if err != nil { + if err == io.EOF { + return nil + } + return err + } + // skip empty results + if len(time) < 1 { + continue + } + ip.im.Input() <- &vm.TimeSeries{ + Name: name, + LabelPairs: labels, + Timestamps: time, + Values: values, + } + } +} diff --git a/app/vmctl/influx/influx.go b/app/vmctl/influx/influx.go new file mode 100644 index 000000000..79ca5f249 --- /dev/null +++ b/app/vmctl/influx/influx.go @@ -0,0 +1,362 @@ +package influx + +import ( + "fmt" + "io" + "log" + "strings" + "time" + + influx "github.com/influxdata/influxdb/client/v2" +) + +// Client represents a wrapper over +// influx HTTP client +type Client struct { + influx.Client + + database string + retention string + chunkSize int + + filterSeries string + filterTime string +} + +// Config contains fields required +// for Client configuration +type Config struct { + Addr string + Username string + Password string + Database string + Retention string + ChunkSize int + + Filter Filter +} + +// Filter contains configuration for filtering +// the timeseries +type Filter struct { + Series string + TimeStart string + TimeEnd string +} + +// Series holds the time series +type Series struct { + Measurement string + Field string + LabelPairs []LabelPair +} + +var valueEscaper = strings.NewReplacer(`\`, `\\`, `'`, `\'`) + +func (s Series) fetchQuery(timeFilter string) string { + f := &strings.Builder{} + fmt.Fprintf(f, "select %q from %q", s.Field, s.Measurement) + if len(s.LabelPairs) > 0 || len(timeFilter) > 0 { + f.WriteString(" where") + } + for i, pair := range s.LabelPairs { + pairV := valueEscaper.Replace(pair.Value) + fmt.Fprintf(f, " %q='%s'", pair.Name, pairV) + if i != len(s.LabelPairs)-1 { + f.WriteString(" and") + } + } + if len(timeFilter) > 0 { + if len(s.LabelPairs) > 0 { + f.WriteString(" and") + } + fmt.Fprintf(f, " %s", timeFilter) + } + return f.String() +} + +// LabelPair is the key-value record +// of time series label +type LabelPair struct { + Name string + Value string +} + +// NewClient creates and returns influx client +// configured with passed Config +func NewClient(cfg Config) (*Client, error) { + c := influx.HTTPConfig{ + Addr: cfg.Addr, + Username: cfg.Username, + Password: cfg.Password, + InsecureSkipVerify: true, + } + hc, err := influx.NewHTTPClient(c) + if err != nil { + return nil, fmt.Errorf("failed to establish conn: %s", err) + } + if _, _, err := hc.Ping(time.Second); err != nil { + return nil, fmt.Errorf("ping failed: %s", err) + } + + chunkSize := cfg.ChunkSize + if chunkSize < 1 { + chunkSize = 10e3 + } + + client := &Client{ + Client: hc, + database: cfg.Database, + retention: cfg.Retention, + chunkSize: chunkSize, + filterTime: timeFilter(cfg.Filter.TimeStart, cfg.Filter.TimeEnd), + filterSeries: cfg.Filter.Series, + } + return client, nil +} + +// Database returns database name +func (c Client) Database() string { + return c.database +} + +func timeFilter(start, end string) string { + if start == "" && end == "" { + return "" + } + var tf string + if start != "" { + tf = fmt.Sprintf("time >= '%s'", start) + } + if end != "" { + if tf != "" { + tf += " and " + } + tf += fmt.Sprintf("time <= '%s'", end) + } + return tf +} + +// Explore checks the existing data schema in influx +// by checking available fields and series, +// which unique combination represents all possible +// time series existing in database. +// The explore required to reduce the load on influx +// by querying field of the exact time series at once, +// instead of fetching all of the values over and over. +// +// May contain non-existing time series. +func (c *Client) Explore() ([]*Series, error) { + log.Printf("Exploring scheme for database %q", c.database) + mFields, err := c.fieldsByMeasurement() + if err != nil { + return nil, fmt.Errorf("failed to get field keys: %s", err) + } + + series, err := c.getSeries() + if err != nil { + return nil, fmt.Errorf("failed to get series: %s", err) + } + + var iSeries []*Series + for _, s := range series { + fields, ok := mFields[s.Measurement] + if !ok { + return nil, fmt.Errorf("can't find field keys for measurement %q", s.Measurement) + } + for _, field := range fields { + is := &Series{ + Measurement: s.Measurement, + Field: field, + LabelPairs: s.LabelPairs, + } + iSeries = append(iSeries, is) + } + } + return iSeries, nil +} + +// ChunkedResponse is a wrapper over influx.ChunkedResponse. +// Used for better memory usage control while iterating +// over huge time series. +type ChunkedResponse struct { + cr *influx.ChunkedResponse + iq influx.Query + field string +} + +// Close closes cr. +func (cr *ChunkedResponse) Close() error { + return cr.cr.Close() +} + +// Next reads the next part/chunk of time series. +// Returns io.EOF when time series was read entirely. +func (cr *ChunkedResponse) Next() ([]int64, []float64, error) { + resp, err := cr.cr.NextResponse() + if err != nil { + return nil, nil, err + } + if resp.Error() != nil { + return nil, nil, fmt.Errorf("response error for %s: %s", cr.iq.Command, resp.Error()) + } + if len(resp.Results) != 1 { + return nil, nil, fmt.Errorf("unexpected number of results in response: %d", len(resp.Results)) + } + results, err := parseResult(resp.Results[0]) + if err != nil { + return nil, nil, err + } + if len(results) < 1 { + return nil, nil, nil + } + r := results[0] + + const key = "time" + timestamps, ok := r.values[key] + if !ok { + return nil, nil, fmt.Errorf("response doesn't contain field %q", key) + } + + fieldValues, ok := r.values[cr.field] + if !ok { + return nil, nil, fmt.Errorf("response doesn't contain filed %q", cr.field) + } + values := make([]float64, len(fieldValues)) + for i, fv := range fieldValues { + v, err := toFloat64(fv) + if err != nil { + return nil, nil, fmt.Errorf("failed to convert value %q.%v to float64: %s", + cr.field, v, err) + } + values[i] = v + } + + ts := make([]int64, len(results[0].values[key])) + for i, v := range timestamps { + t, err := parseDate(v.(string)) + if err != nil { + return nil, nil, err + } + ts[i] = t + } + return ts, values, nil +} + +// FetchDataPoints performs SELECT request to fetch +// datapoints for particular field. +func (c *Client) FetchDataPoints(s *Series) (*ChunkedResponse, error) { + iq := influx.Query{ + Command: s.fetchQuery(c.filterTime), + Database: c.database, + RetentionPolicy: c.retention, + Chunked: true, + ChunkSize: 1e4, + } + cr, err := c.QueryAsChunk(iq) + if err != nil { + return nil, fmt.Errorf("query %q err: %s", iq.Command, err) + } + return &ChunkedResponse{cr, iq, s.Field}, nil +} + +func (c *Client) fieldsByMeasurement() (map[string][]string, error) { + q := influx.Query{ + Command: "show field keys", + Database: c.database, + RetentionPolicy: c.retention, + } + log.Printf("fetching fields: %s", stringify(q)) + qValues, err := c.do(q) + if err != nil { + return nil, fmt.Errorf("error while executing query %q: %s", q.Command, err) + } + + var total int + var skipped int + const fKey = "fieldKey" + const fType = "fieldType" + result := make(map[string][]string, len(qValues)) + for _, qv := range qValues { + types := qv.values[fType] + fields := qv.values[fKey] + values := make([]string, 0) + for key, field := range fields { + if types[key].(string) == "string" { + skipped++ + continue + } + values = append(values, field.(string)) + total++ + } + result[qv.name] = values + } + + if skipped > 0 { + log.Printf("found %d fields; skipped %d non-numeric fields", total, skipped) + } else { + log.Printf("found %d fields", total) + } + return result, nil +} + +func (c *Client) getSeries() ([]*Series, error) { + com := "show series" + if c.filterSeries != "" { + com = fmt.Sprintf("%s %s", com, c.filterSeries) + } + q := influx.Query{ + Command: com, + Database: c.database, + RetentionPolicy: c.retention, + Chunked: true, + ChunkSize: c.chunkSize, + } + + log.Printf("fetching series: %s", stringify(q)) + cr, err := c.QueryAsChunk(q) + if err != nil { + return nil, fmt.Errorf("error while executing query %q: %s", q.Command, err) + } + + const key = "key" + var result []*Series + for { + resp, err := cr.NextResponse() + if err != nil { + if err == io.EOF { + break + } + return nil, err + } + if resp.Error() != nil { + return nil, fmt.Errorf("response error for query %q: %s", q.Command, resp.Error()) + } + qValues, err := parseResult(resp.Results[0]) + if err != nil { + return nil, err + } + for _, qv := range qValues { + for _, v := range qv.values[key] { + s := &Series{} + if err := s.unmarshal(v.(string)); err != nil { + return nil, err + } + result = append(result, s) + } + } + } + log.Printf("found %d series", len(result)) + return result, nil +} + +func (c *Client) do(q influx.Query) ([]queryValues, error) { + res, err := c.Query(q) + if err != nil { + return nil, fmt.Errorf("query %q err: %s", q.Command, err) + } + if len(res.Results) < 1 { + return nil, fmt.Errorf("exploration query %q returned 0 results", q.Command) + } + return parseResult(res.Results[0]) +} diff --git a/app/vmctl/influx/influx_test.go b/app/vmctl/influx/influx_test.go new file mode 100644 index 000000000..c92b0f5e0 --- /dev/null +++ b/app/vmctl/influx/influx_test.go @@ -0,0 +1,127 @@ +package influx + +import "testing" + +func TestFetchQuery(t *testing.T) { + testCases := []struct { + s Series + timeFilter string + expected string + }{ + { + s: Series{ + Measurement: "cpu", + Field: "value", + LabelPairs: []LabelPair{ + { + Name: "foo", + Value: "bar", + }, + }, + }, + expected: `select "value" from "cpu" where "foo"='bar'`, + }, + { + s: Series{ + Measurement: "cpu", + Field: "value", + LabelPairs: []LabelPair{ + { + Name: "foo", + Value: "bar", + }, + { + Name: "baz", + Value: "qux", + }, + }, + }, + expected: `select "value" from "cpu" where "foo"='bar' and "baz"='qux'`, + }, + { + s: Series{ + Measurement: "cpu", + Field: "value", + LabelPairs: []LabelPair{ + { + Name: "foo", + Value: "b'ar", + }, + }, + }, + timeFilter: "time >= now()", + expected: `select "value" from "cpu" where "foo"='b\'ar' and time >= now()`, + }, + { + s: Series{ + Measurement: "cpu", + Field: "value", + LabelPairs: []LabelPair{ + { + Name: "name", + Value: `dev-mapper-centos\x2dswap.swap`, + }, + { + Name: "state", + Value: "dev-mapp'er-c'en'tos", + }, + }, + }, + timeFilter: "time >= now()", + expected: `select "value" from "cpu" where "name"='dev-mapper-centos\\x2dswap.swap' and "state"='dev-mapp\'er-c\'en\'tos' and time >= now()`, + }, + { + s: Series{ + Measurement: "cpu", + Field: "value", + }, + timeFilter: "time >= now()", + expected: `select "value" from "cpu" where time >= now()`, + }, + { + s: Series{ + Measurement: "cpu", + Field: "value", + }, + expected: `select "value" from "cpu"`, + }, + } + + for _, tc := range testCases { + query := tc.s.fetchQuery(tc.timeFilter) + if query != tc.expected { + t.Fatalf("got: \n%s;\nexpected: \n%s", query, tc.expected) + } + } +} + +func TestTimeFilter(t *testing.T) { + testCases := []struct { + start string + end string + expected string + }{ + { + start: "2020-01-01T20:07:00Z", + end: "2020-01-01T21:07:00Z", + expected: "time >= '2020-01-01T20:07:00Z' and time <= '2020-01-01T21:07:00Z'", + }, + { + expected: "", + }, + { + start: "2020-01-01T20:07:00Z", + expected: "time >= '2020-01-01T20:07:00Z'", + }, + { + end: "2020-01-01T21:07:00Z", + expected: "time <= '2020-01-01T21:07:00Z'", + }, + } + for _, tc := range testCases { + f := timeFilter(tc.start, tc.end) + if f != tc.expected { + t.Fatalf("got: \n%q;\nexpected: \n%q", f, tc.expected) + } + } +} diff --git a/app/vmctl/influx/parser.go b/app/vmctl/influx/parser.go new file mode 100644 index 000000000..e8b425f42 --- /dev/null +++ b/app/vmctl/influx/parser.go @@ -0,0 +1,191 @@ +package influx + +import ( + "encoding/json" + "fmt" + "strconv" + "strings" + "time" + + influx "github.com/influxdata/influxdb/client/v2" +) + +type queryValues struct { + name string + values map[string][]interface{} +} + +func parseResult(r influx.Result) ([]queryValues, error) { + if len(r.Err) > 0 { + return nil, fmt.Errorf("result error: %s", r.Err) + } + qValues := make([]queryValues, len(r.Series)) + for i, row := range r.Series { + values := make(map[string][]interface{}, len(row.Values)) + for _, value := range row.Values { + for idx, v := range value { + key := row.Columns[idx] + values[key] = append(values[key], v) + } + } + qValues[i] = queryValues{ + name: row.Name, + values: values, + } + } + return qValues, nil +} + +func toFloat64(v interface{}) (float64, error) { + switch i := v.(type) { + case json.Number: + return i.Float64() + case float64: + return i, nil + case float32: + return float64(i), nil + case int64: + return float64(i), nil + case int32: + return float64(i), nil + case int: + return float64(i), nil + case uint64: + return float64(i), nil + case uint32: + return float64(i), nil + case uint: + return float64(i), nil + case string: + return strconv.ParseFloat(i, 64) + default: + return 0, fmt.Errorf("unexpected value type %v", i) + } +} + +func parseDate(dateStr string) (int64, error) { + startTime, err := time.Parse(time.RFC3339, dateStr) + if err != nil { + return 0, fmt.Errorf("cannot parse %q: %s", dateStr, err) + } + return startTime.UnixNano() / 1e6, nil +} + +func stringify(q influx.Query) string { + return fmt.Sprintf("command: %q; database: %q; retention: %q", + q.Command, q.Database, q.RetentionPolicy) +} + +func (s *Series) unmarshal(v string) error { + noEscapeChars := strings.IndexByte(v, '\\') < 0 + n := nextUnescapedChar(v, ',', noEscapeChars) + if n < 0 { + s.Measurement = unescapeTagValue(v, noEscapeChars) + return nil + } + s.Measurement = unescapeTagValue(v[:n], noEscapeChars) + var err error + s.LabelPairs, err = unmarshalTags(v[n+1:], noEscapeChars) + if err != nil { + return fmt.Errorf("failed to unmarhsal tags: %s", err) + } + return nil +} + +func unmarshalTags(s string, noEscapeChars bool) ([]LabelPair, error) { + var result []LabelPair + for { + lp := LabelPair{} + n := nextUnescapedChar(s, ',', noEscapeChars) + if n < 0 { + if err := lp.unmarshal(s, noEscapeChars); err != nil { + return nil, err + } + if len(lp.Name) == 0 || len(lp.Value) == 0 { + return nil, nil + } + result = append(result, lp) + return result, nil + } + if err := lp.unmarshal(s[:n], noEscapeChars); err != nil { + return nil, err + } + s = s[n+1:] + if len(lp.Name) == 0 || len(lp.Value) == 0 { + continue + } + result = append(result, lp) + } +} + +func (lp *LabelPair) unmarshal(s string, noEscapeChars bool) error { + n := nextUnescapedChar(s, '=', noEscapeChars) + if n < 0 { + return fmt.Errorf("missing tag value for %q", s) + } + lp.Name = unescapeTagValue(s[:n], noEscapeChars) + lp.Value = unescapeTagValue(s[n+1:], noEscapeChars) + return nil +} + +func unescapeTagValue(s string, noEscapeChars bool) string { + if noEscapeChars { + // Fast path - no escape chars. + return s + } + n := strings.IndexByte(s, '\\') + if n < 0 { + return s + } + + // Slow path. Remove escape chars. + dst := make([]byte, 0, len(s)) + for { + dst = append(dst, s[:n]...) + s = s[n+1:] + if len(s) == 0 { + return string(append(dst, '\\')) + } + ch := s[0] + if ch != ' ' && ch != ',' && ch != '=' && ch != '\\' { + dst = append(dst, '\\') + } + dst = append(dst, ch) + s = s[1:] + n = strings.IndexByte(s, '\\') + if n < 0 { + return string(append(dst, s...)) + } + } +} + +func nextUnescapedChar(s string, ch byte, noEscapeChars bool) int { + if noEscapeChars { + // Fast path: just search for ch in s, since s has no escape chars. + return strings.IndexByte(s, ch) + } + + sOrig := s +again: + n := strings.IndexByte(s, ch) + if n < 0 { + return -1 + } + if n == 0 { + return len(sOrig) - len(s) + n + } + if s[n-1] != '\\' { + return len(sOrig) - len(s) + n + } + nOrig := n + slashes := 0 + for n > 0 && s[n-1] == '\\' { + slashes++ + n-- + } + if slashes&1 == 0 { + return len(sOrig) - len(s) + nOrig + } + s = s[nOrig+1:] + goto again +} diff --git a/app/vmctl/influx/parser_test.go b/app/vmctl/influx/parser_test.go new file mode 100644 index 000000000..15ce9adaa --- /dev/null +++ b/app/vmctl/influx/parser_test.go @@ -0,0 +1,60 @@ +package influx + +import ( + "reflect" + "testing" +) + +func TestSeries_Unmarshal(t *testing.T) { + tag := func(name, value string) LabelPair { + return LabelPair{ + Name: name, + Value: value, + } + } + series := func(measurement string, lp ...LabelPair) Series { + return Series{ + Measurement: measurement, + LabelPairs: lp, + } + } + testCases := []struct { + got string + want Series + }{ + { + got: "cpu", + want: series("cpu"), + }, + { + got: "cpu,host=localhost", + want: series("cpu", tag("host", "localhost")), + }, + { + got: "cpu,host=localhost,instance=instance", + want: series("cpu", tag("host", "localhost"), tag("instance", "instance")), + }, + { + got: `fo\,bar\=baz,x\=\b=\\a\,\=\q\ `, + want: series("fo,bar=baz", tag(`x=\b`, `\a,=\q `)), + }, + { + got: "cpu,host=192.168.0.1,instance=fe80::fdc8:5e36:c2c6:baac%utun1", + want: series("cpu", tag("host", "192.168.0.1"), tag("instance", "fe80::fdc8:5e36:c2c6:baac%utun1")), + }, + { + got: `cpu,db=db1,host=localhost,server=host\=localhost\ user\=user\ `, + want: series("cpu", tag("db", "db1"), + tag("host", "localhost"), tag("server", "host=localhost user=user ")), + }, + } + for _, tc := range testCases { + s := Series{} + if err := s.unmarshal(tc.got); err != nil { + t.Fatalf("%q: unmarshal err: %s", tc.got, err) + } + if !reflect.DeepEqual(s, tc.want) { + t.Fatalf("%q: expected\n%#v\nto be equal\n%#v", tc.got, s, tc.want) + } + } +} diff --git a/app/vmctl/main.go b/app/vmctl/main.go new file mode 100644 index 000000000..d3268f353 --- /dev/null +++ b/app/vmctl/main.go @@ -0,0 +1,159 @@ +package main + +import ( + "fmt" + "log" + "os" + "os/signal" + "strings" + "syscall" + "time" + + "github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/influx" + "github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/prometheus" + "github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/vm" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/buildinfo" + "github.com/urfave/cli/v2" +) + +func main() { + start := time.Now() + app := &cli.App{ + Name: "vmctl", + Usage: "Victoria metrics command-line tool", + Version: buildinfo.Version, + Commands: []*cli.Command{ + { + Name: "influx", + Usage: "Migrate timeseries from InfluxDB", + Flags: mergeFlags(globalFlags, influxFlags, vmFlags), + Action: func(c *cli.Context) error { + fmt.Println("InfluxDB import mode") + + iCfg := influx.Config{ + Addr: c.String(influxAddr), + Username: c.String(influxUser), + Password: c.String(influxPassword), + Database: c.String(influxDB), + Retention: c.String(influxRetention), + Filter: influx.Filter{ + Series: c.String(influxFilterSeries), + TimeStart: c.String(influxFilterTimeStart), + TimeEnd: c.String(influxFilterTimeEnd), + }, + ChunkSize: c.Int(influxChunkSize), + } + influxClient, err := influx.NewClient(iCfg) + if err != nil { + return fmt.Errorf("failed to create influx client: %s", err) + } + + vmCfg := initConfigVM(c) + importer, err := vm.NewImporter(vmCfg) + if err != nil { + return fmt.Errorf("failed to create VM importer: %s", err) + } + + processor := newInfluxProcessor(influxClient, importer, + c.Int(influxConcurrency), c.String(influxMeasurementFieldSeparator)) + return processor.run(c.Bool(globalSilent)) + }, + }, + { + Name: "prometheus", + Usage: "Migrate timeseries from Prometheus", + Flags: mergeFlags(globalFlags, promFlags, vmFlags), + Action: func(c *cli.Context) error { + fmt.Println("Prometheus import mode") + + vmCfg := initConfigVM(c) + importer, err := vm.NewImporter(vmCfg) + if err != nil { + return fmt.Errorf("failed to create VM importer: %s", err) + } + + promCfg := prometheus.Config{ + Snapshot: c.String(promSnapshot), + Filter: prometheus.Filter{ + TimeMin: c.String(promFilterTimeStart), + TimeMax: c.String(promFilterTimeEnd), + Label: c.String(promFilterLabel), + LabelValue: c.String(promFilterLabelValue), + }, + } + cl, err := prometheus.NewClient(promCfg) + if err != nil { + return fmt.Errorf("failed to create prometheus client: %s", err) + } + pp := prometheusProcessor{ + cl: cl, + im: importer, + cc: c.Int(promConcurrency), + } + return pp.run(c.Bool(globalSilent)) + }, + }, + { + Name: "vm-native", + Usage: "Migrate time series between VictoriaMetrics installations via native binary format", + Flags: vmNativeFlags, + Action: func(c *cli.Context) error { + fmt.Println("VictoriaMetrics Native import mode") + + if c.String(vmNativeFilterMatch) == "" { + return fmt.Errorf("flag %q can't be empty", vmNativeFilterMatch) + } + + p := vmNativeProcessor{ + filter: filter{ + match: c.String(vmNativeFilterMatch), + timeStart: c.String(vmNativeFilterTimeStart), + timeEnd: c.String(vmNativeFilterTimeEnd), + }, + src: &vmNativeClient{ + addr: strings.Trim(c.String(vmNativeSrcAddr), "/"), + user: c.String(vmNativeSrcUser), + password: c.String(vmNativeSrcPassword), + }, + dst: &vmNativeClient{ + addr: strings.Trim(c.String(vmNativeDstAddr), "/"), + user: c.String(vmNativeDstUser), + password: c.String(vmNativeDstPassword), + extraLabels: c.StringSlice(vmExtraLabel), + }, + } + return p.run() + }, + }, + }, + } + + c := make(chan os.Signal, 1) + signal.Notify(c, syscall.SIGINT, syscall.SIGTERM) + go func() { + <-c + fmt.Println("\r- Execution cancelled") + os.Exit(0) + }() + + err := app.Run(os.Args) + if err != nil { + log.Fatal(err) + } + log.Printf("Total time: %v", time.Since(start)) +} + +func initConfigVM(c *cli.Context) vm.Config { + return vm.Config{ + Addr: c.String(vmAddr), + User: c.String(vmUser), + Password: c.String(vmPassword), + Concurrency: uint8(c.Int(vmConcurrency)), + Compress: c.Bool(vmCompress), + AccountID: c.String(vmAccountID), + BatchSize: c.Int(vmBatchSize), + SignificantFigures: c.Int(vmSignificantFigures), + RoundDigits: c.Int(vmRoundDigits), + ExtraLabels: c.StringSlice(vmExtraLabel), + } +} diff --git a/app/vmctl/prometheus.go b/app/vmctl/prometheus.go new file mode 100644 index 000000000..cb0c02b82 --- /dev/null +++ b/app/vmctl/prometheus.go @@ -0,0 +1,131 @@ +package main + +import ( + "fmt" + "log" + "sync" + + "github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/prometheus" + "github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/vm" + "github.com/cheggaaa/pb/v3" + "github.com/prometheus/prometheus/tsdb" +) + +type prometheusProcessor struct { + // prometheus client fetches and reads + // snapshot blocks + cl *prometheus.Client + // importer performs import requests + // for timeseries data returned from + // snapshot blocks + im *vm.Importer + // cc stands for concurrency + // and defines number of concurrently + // running snapshot block readers + cc int +} + +func (pp *prometheusProcessor) run(silent bool) error { + blocks, err := pp.cl.Explore() + if err != nil { + return fmt.Errorf("explore failed: %s", err) + } + if len(blocks) < 1 { + return fmt.Errorf("found no blocks to import") + } + question := fmt.Sprintf("Found %d blocks to import. Continue?", len(blocks)) + if !silent && !prompt(question) { + return nil + } + + bar := pb.StartNew(len(blocks)) + blockReadersCh := make(chan tsdb.BlockReader) + errCh := make(chan error, pp.cc) + pp.im.ResetStats() + + var wg sync.WaitGroup + wg.Add(pp.cc) + for i := 0; i < pp.cc; i++ { + go func() { + defer wg.Done() + for br := range blockReadersCh { + if err := pp.do(br); err != nil { + errCh <- fmt.Errorf("read failed for block %q: %s", br.Meta().ULID, err) + return + } + bar.Increment() + } + }() + } + + // any error breaks the import + for _, br := range blocks { + select { + case promErr := <-errCh: + close(blockReadersCh) + return fmt.Errorf("prometheus error: %s", promErr) + case vmErr := <-pp.im.Errors(): + close(blockReadersCh) + return fmt.Errorf("Import process failed: \n%s", wrapErr(vmErr)) + case blockReadersCh <- br: + } + } + + close(blockReadersCh) + wg.Wait() + // wait for all buffers to flush + pp.im.Close() + // drain import errors channel + for vmErr := range pp.im.Errors() { + return fmt.Errorf("Import process failed: \n%s", wrapErr(vmErr)) + } + bar.Finish() + log.Println("Import finished!") + log.Print(pp.im.Stats()) + return nil +} + +func (pp *prometheusProcessor) do(b tsdb.BlockReader) error { + ss, err := pp.cl.Read(b) + if err != nil { + return fmt.Errorf("failed to read block: %s", err) + } + for ss.Next() { + var name string + var labels []vm.LabelPair + series := ss.At() + + for _, label := range series.Labels() { + if label.Name == "__name__" { + name = label.Value + continue + } + labels = append(labels, vm.LabelPair{ + Name: label.Name, + Value: label.Value, + }) + } + if name == "" { + return fmt.Errorf("failed to find `__name__` label in labelset for block %v", b.Meta().ULID) + } + + var timestamps []int64 + var values []float64 + it := series.Iterator() + for it.Next() { + t, v := it.At() + timestamps = append(timestamps, t) + values = append(values, v) + } + if err := it.Err(); err != nil { + return err + } + pp.im.Input() <- &vm.TimeSeries{ + Name: name, + LabelPairs: labels, + Timestamps: timestamps, + Values: values, + } + } + return ss.Err() +} diff --git a/app/vmctl/prometheus/prometheus.go b/app/vmctl/prometheus/prometheus.go new file mode 100644 index 000000000..39b9fe709 --- /dev/null +++ b/app/vmctl/prometheus/prometheus.go @@ -0,0 +1,147 @@ +package prometheus + +import ( + "fmt" + "time" + + "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/tsdb" +) + +// Config contains a list of params needed +// for reading Prometheus snapshots +type Config struct { + // Path to snapshot directory + Snapshot string + + Filter Filter +} + +// Filter contains configuration for filtering +// the timeseries +type Filter struct { + TimeMin string + TimeMax string + Label string + LabelValue string +} + +// Client is a wrapper over Prometheus tsdb.DBReader +type Client struct { + *tsdb.DBReadOnly + filter filter +} + +type filter struct { + min, max int64 + label string + labelValue string +} + +func (f filter) inRange(min, max int64) bool { + fmin, fmax := f.min, f.max + if min == 0 { + fmin = min + } + if fmax == 0 { + fmax = max + } + return min <= fmax && fmin <= max +} + +// NewClient creates and validates new Client +// with given Config +func NewClient(cfg Config) (*Client, error) { + db, err := tsdb.OpenDBReadOnly(cfg.Snapshot, nil) + if err != nil { + return nil, fmt.Errorf("failed to open snapshot %q: %s", cfg.Snapshot, err) + } + c := &Client{DBReadOnly: db} + min, max, err := parseTime(cfg.Filter.TimeMin, cfg.Filter.TimeMax) + if err != nil { + return nil, fmt.Errorf("failed to parse time in filter: %s", err) + } + c.filter = filter{ + min: min, + max: max, + label: cfg.Filter.Label, + labelValue: cfg.Filter.LabelValue, + } + return c, nil +} + +// Explore fetches all available blocks from a snapshot +// and collects the Meta() data from each block. +// Explore does initial filtering by time-range +// for snapshot blocks but does not take into account +// label filters. +func (c *Client) Explore() ([]tsdb.BlockReader, error) { + blocks, err := c.Blocks() + if err != nil { + return nil, fmt.Errorf("failed to fetch blocks: %s", err) + } + s := &Stats{ + Filtered: c.filter.min != 0 || c.filter.max != 0 || c.filter.label != "", + Blocks: len(blocks), + } + var blocksToImport []tsdb.BlockReader + for _, block := range blocks { + meta := block.Meta() + if !c.filter.inRange(meta.MinTime, meta.MaxTime) { + s.SkippedBlocks++ + continue + } + if s.MinTime == 0 || meta.MinTime < s.MinTime { + s.MinTime = meta.MinTime + } + if s.MaxTime == 0 || meta.MaxTime > s.MaxTime { + s.MaxTime = meta.MaxTime + } + s.Samples += meta.Stats.NumSamples + s.Series += meta.Stats.NumSeries + blocksToImport = append(blocksToImport, block) + } + fmt.Println(s) + return blocksToImport, nil +} + +// Read reads the given BlockReader according to configured +// time and label filters. +func (c *Client) Read(block tsdb.BlockReader) (storage.SeriesSet, error) { + minTime, maxTime := block.Meta().MinTime, block.Meta().MaxTime + if c.filter.min != 0 { + minTime = c.filter.min + } + if c.filter.max != 0 { + maxTime = c.filter.max + } + q, err := tsdb.NewBlockQuerier(block, minTime, maxTime) + if err != nil { + return nil, err + } + ss := q.Select(false, nil, labels.MustNewMatcher(labels.MatchRegexp, c.filter.label, c.filter.labelValue)) + return ss, nil +} + +func parseTime(start, end string) (int64, int64, error) { + var s, e int64 + if start == "" && end == "" { + return 0, 0, nil + } + if start != "" { + v, err := time.Parse(time.RFC3339, start) + if err != nil { + return 0, 0, fmt.Errorf("failed to parse %q: %s", start, err) + } + s = v.UnixNano() / int64(time.Millisecond) + } + if end != "" { + v, err := time.Parse(time.RFC3339, end) + if err != nil { + return 0, 0, fmt.Errorf("failed to parse %q: %s", end, err) + } + e = v.UnixNano() / int64(time.Millisecond) + } + return s, e, nil +} diff --git a/app/vmctl/prometheus/prometheus_test.go b/app/vmctl/prometheus/prometheus_test.go new file mode 100644 index 000000000..6fd738f4e --- /dev/null +++ b/app/vmctl/prometheus/prometheus_test.go @@ -0,0 +1,34 @@ +package prometheus + +import ( + "testing" +) + +func TestInRange(t *testing.T) { + testCases := []struct { + filterMin, filterMax int64 + blockMin, blockMax int64 + expected bool + }{ + {0, 0, 1, 2, true}, + {0, 3, 1, 2, true}, + {0, 3, 4, 5, false}, + {3, 0, 1, 2, false}, + {3, 0, 2, 4, true}, + {3, 10, 1, 2, false}, + {3, 10, 1, 4, true}, + {3, 10, 5, 9, true}, + {3, 10, 9, 12, true}, + {3, 10, 12, 15, false}, + } + for _, tc := range testCases { + f := filter{ + min: tc.filterMin, + max: tc.filterMax, + } + got := f.inRange(tc.blockMin, tc.blockMax) + if got != tc.expected { + t.Fatalf("got %v; expected %v: %v", got, tc.expected, tc) + } + } +} diff --git a/app/vmctl/prometheus/stats.go b/app/vmctl/prometheus/stats.go new file mode 100644 index 000000000..e6dc907d5 --- /dev/null +++ b/app/vmctl/prometheus/stats.go @@ -0,0 +1,38 @@ +package prometheus + +import ( + "fmt" + "time" +) + +// Stats represents data migration stats. +type Stats struct { + Filtered bool + MinTime int64 + MaxTime int64 + Samples uint64 + Series uint64 + Blocks int + SkippedBlocks int +} + +// String returns string representation for s. +func (s Stats) String() string { + str := fmt.Sprintf("Prometheus snapshot stats:\n"+ + " blocks found: %d;\n"+ + " blocks skipped by time filter: %d;\n"+ + " min time: %d (%v);\n"+ + " max time: %d (%v);\n"+ + " samples: %d;\n"+ + " series: %d.", + s.Blocks, s.SkippedBlocks, + s.MinTime, time.Unix(s.MinTime/1e3, 0).Format(time.RFC3339), + s.MaxTime, time.Unix(s.MaxTime/1e3, 0).Format(time.RFC3339), + s.Samples, s.Series) + + if s.Filtered { + str += "\n* Stats numbers are based on blocks meta info and don't account for applied filters." + } + + return str +} diff --git a/app/vmctl/utils.go b/app/vmctl/utils.go new file mode 100644 index 000000000..678e70635 --- /dev/null +++ b/app/vmctl/utils.go @@ -0,0 +1,33 @@ +package main + +import ( + "bufio" + "fmt" + "os" + "strings" + + "github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/vm" +) + +func prompt(question string) bool { + reader := bufio.NewReader(os.Stdin) + fmt.Print(question, " [Y/n] ") + answer, err := reader.ReadString('\n') + if err != nil { + panic(err) + } + answer = strings.TrimSpace(strings.ToLower(answer)) + if answer == "" || answer == "yes" || answer == "y" { + return true + } + return false +} + +func wrapErr(vmErr *vm.ImportError) error { + var errTS string + for _, ts := range vmErr.Batch { + errTS += fmt.Sprintf("%s for timestamps range %d - %d\n", + ts.String(), ts.Timestamps[0], ts.Timestamps[len(ts.Timestamps)-1]) + } + return fmt.Errorf("%s with error: %s", errTS, vmErr.Err) +} diff --git a/app/vmctl/vm/stats.go b/app/vmctl/vm/stats.go new file mode 100644 index 000000000..dc76905e4 --- /dev/null +++ b/app/vmctl/vm/stats.go @@ -0,0 +1,47 @@ +package vm + +import ( + "fmt" + "sync" + "time" +) + +type stats struct { + sync.Mutex + samples uint64 + bytes uint64 + requests uint64 + retries uint64 + startTime time.Time + idleDuration time.Duration +} + +func (s *stats) String() string { + s.Lock() + defer s.Unlock() + + totalImportDuration := time.Since(s.startTime) + totalImportDurationS := totalImportDuration.Seconds() + var samplesPerS float64 + if s.samples > 0 && totalImportDurationS > 0 { + samplesPerS = float64(s.samples) / totalImportDurationS + } + bytesPerS := byteCountSI(0) + if s.bytes > 0 && totalImportDurationS > 0 { + bytesPerS = byteCountSI(int64(float64(s.bytes) / totalImportDurationS)) + } + + return fmt.Sprintf("VictoriaMetrics importer stats:\n"+ + " idle duration: %v;\n"+ + " time spent while importing: %v;\n"+ + " total samples: %d;\n"+ + " samples/s: %.2f;\n"+ + " total bytes: %s;\n"+ + " bytes/s: %s;\n"+ + " import requests: %d;\n"+ + " import requests retries: %d;", + s.idleDuration, totalImportDuration, + s.samples, samplesPerS, + byteCountSI(int64(s.bytes)), bytesPerS, + s.requests, s.retries) +} diff --git a/app/vmctl/vm/timeseries.go b/app/vmctl/vm/timeseries.go new file mode 100644 index 000000000..7a54a5152 --- /dev/null +++ b/app/vmctl/vm/timeseries.go @@ -0,0 +1,82 @@ +package vm + +import ( + "fmt" + "io" +) + +// TimeSeries represents a time series. +type TimeSeries struct { + Name string + LabelPairs []LabelPair + Timestamps []int64 + Values []float64 +} + +// LabelPair represents a label +type LabelPair struct { + Name string + Value string +} + +// String returns user-readable ts. +func (ts TimeSeries) String() string { + s := ts.Name + if len(ts.LabelPairs) < 1 { + return s + } + var labels string + for i, lp := range ts.LabelPairs { + labels += fmt.Sprintf("%s=%q", lp.Name, lp.Value) + if i < len(ts.LabelPairs)-1 { + labels += "," + } + } + return fmt.Sprintf("%s{%s}", s, labels) +} + +// cWriter used to avoid error checking +// while doing Write calls. +// cWriter caches the first error if any +// and discards all sequential write calls +type cWriter struct { + w io.Writer + n int + err error +} + +func (cw *cWriter) printf(format string, args ...interface{}) { + if cw.err != nil { + return + } + n, err := fmt.Fprintf(cw.w, format, args...) + cw.n += n + cw.err = err +} + +//"{"metric":{"__name__":"cpu_usage_guest","arch":"x64","hostname":"host_19",},"timestamps":[1567296000000,1567296010000],"values":[1567296000000,66]} +func (ts *TimeSeries) write(w io.Writer) (int, error) { + pointsCount := len(ts.Timestamps) + if pointsCount == 0 { + return 0, nil + } + + cw := &cWriter{w: w} + cw.printf(`{"metric":{"__name__":%q`, ts.Name) + if len(ts.LabelPairs) > 0 { + for _, lp := range ts.LabelPairs { + cw.printf(",%q:%q", lp.Name, lp.Value) + } + } + + cw.printf(`},"timestamps":[`) + for i := 0; i < pointsCount-1; i++ { + cw.printf(`%d,`, ts.Timestamps[i]) + } + cw.printf(`%d],"values":[`, ts.Timestamps[pointsCount-1]) + for i := 0; i < pointsCount-1; i++ { + cw.printf(`%v,`, ts.Values[i]) + } + cw.printf("%v]}\n", ts.Values[pointsCount-1]) + return cw.n, cw.err +} diff --git a/app/vmctl/vm/timeseries_test.go b/app/vmctl/vm/timeseries_test.go new file mode 100644 index 000000000..f020df96f --- /dev/null +++ b/app/vmctl/vm/timeseries_test.go @@ -0,0 +1,89 @@ +package vm + +import ( + "bytes" + "math" + "strings" + "testing" +) + +func TestTimeSeries_Write(t *testing.T) { + var testCases = []struct { + name string + ts *TimeSeries + exp string + }{ + { + name: "one datapoint", + ts: &TimeSeries{ + Name: "foo", + LabelPairs: []LabelPair{ + { + Name: "key", + Value: "val", + }, + }, + Timestamps: []int64{1577877162200}, + Values: []float64{1}, + }, + exp: `{"metric":{"__name__":"foo","key":"val"},"timestamps":[1577877162200],"values":[1]}`, + }, + { + name: "multiple samples", + ts: &TimeSeries{ + Name: "foo", + LabelPairs: []LabelPair{ + { + Name: "key", + Value: "val", + }, + }, + Timestamps: []int64{1577877162200, 15778771622400, 15778771622600}, + Values: []float64{1, 1.6263, 32.123}, + }, + exp: `{"metric":{"__name__":"foo","key":"val"},"timestamps":[1577877162200,15778771622400,15778771622600],"values":[1,1.6263,32.123]}`, + }, + { + name: "no samples", + ts: &TimeSeries{ + Name: "foo", + LabelPairs: []LabelPair{ + { + Name: "key", + Value: "val", + }, + }, + }, + exp: ``, + }, + { + name: "inf values", + ts: &TimeSeries{ + Name: "foo", + LabelPairs: []LabelPair{ + { + Name: "key", + Value: "val", + }, + }, + Timestamps: []int64{1577877162200, 1577877162200, 1577877162200}, + Values: []float64{0, math.Inf(-1), math.Inf(1)}, + }, + exp: `{"metric":{"__name__":"foo","key":"val"},"timestamps":[1577877162200,1577877162200,1577877162200],"values":[0,-Inf,+Inf]}`, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + b := &bytes.Buffer{} + _, err := tc.ts.write(b) + if err != nil { + t.Error(err) + } + got := strings.TrimSpace(b.String()) + if got != tc.exp { + t.Fatalf("\ngot: %q\nwant: %q", got, tc.exp) + } + }) + } +} diff --git a/app/vmctl/vm/vm.go b/app/vmctl/vm/vm.go new file mode 100644 index 000000000..0822e815f --- /dev/null +++ b/app/vmctl/vm/vm.go @@ -0,0 +1,384 @@ +package vm + +import ( + "bufio" + "compress/gzip" + "errors" + "fmt" + "io" + "io/ioutil" + "math" + "net/http" + "strings" + "sync" + "time" + + "github.com/VictoriaMetrics/VictoriaMetrics/lib/decimal" +) + +// Config contains list of params to configure +// the Importer +type Config struct { + // VictoriaMetrics address to perform import requests + // --httpListenAddr value for single node version + // --httpListenAddr value of VMSelect component for cluster version + Addr string + // Concurrency defines number of worker + // performing the import requests concurrently + Concurrency uint8 + // Whether to apply gzip compression + Compress bool + // AccountID for cluster version. + // Empty value assumes it is a single node version + AccountID string + // BatchSize defines how many samples + // importer collects before sending the import request + BatchSize int + // User name for basic auth + User string + // Password for basic auth + Password string + // SignificantFigures defines the number of significant figures to leave + // in metric values before importing. + // Zero value saves all the significant decimal places + SignificantFigures int + // RoundDigits defines the number of decimal digits after the point that must be left + // in metric values before importing. + RoundDigits int + // ExtraLabels that will be added to all imported series. Must be in label=value format. + ExtraLabels []string +} + +// Importer performs insertion of timeseries +// via VictoriaMetrics import protocol +// see https://github.com/VictoriaMetrics/VictoriaMetrics/tree/master#how-to-import-time-series-data +type Importer struct { + addr string + importPath string + compress bool + user string + password string + + close chan struct{} + input chan *TimeSeries + errors chan *ImportError + + wg sync.WaitGroup + once sync.Once + + s *stats +} + +// ResetStats resets im stats. +func (im *Importer) ResetStats() { + im.s = &stats{ + startTime: time.Now(), + } +} + +// Stats returns im stats. +func (im *Importer) Stats() string { + return im.s.String() +} + +// AddExtraLabelsToImportPath - adds extra labels query params to given url path. +func AddExtraLabelsToImportPath(path string, extraLabels []string) (string, error) { + dst := path + separator := "?" + for _, extraLabel := range extraLabels { + if !strings.Contains(extraLabel, "=") { + return path, fmt.Errorf("bad format for extra_label flag, it must be `key=value`, got: %q", extraLabel) + } + if strings.Contains(dst, "?") { + separator = "&" + } + dst += fmt.Sprintf("%sextra_label=%s", separator, extraLabel) + } + return dst, nil +} + +// NewImporter creates new Importer for the given cfg. +func NewImporter(cfg Config) (*Importer, error) { + if cfg.Concurrency < 1 { + return nil, fmt.Errorf("concurrency can't be lower than 1") + } + + addr := strings.TrimRight(cfg.Addr, "/") + // if single version + // see https://github.com/VictoriaMetrics/VictoriaMetrics/tree/master#how-to-import-time-series-data + importPath := addr + "/api/v1/import" + if cfg.AccountID != "" { + // if cluster version + // see https://github.com/VictoriaMetrics/VictoriaMetrics/tree/cluster#url-format + importPath = fmt.Sprintf("%s/insert/%s/prometheus/api/v1/import", addr, cfg.AccountID) + } + importPath, err := AddExtraLabelsToImportPath(importPath, cfg.ExtraLabels) + if err != nil { + return nil, err + } + + im := &Importer{ + addr: addr, + importPath: importPath, + compress: cfg.Compress, + user: cfg.User, + password: cfg.Password, + close: make(chan struct{}), + input: make(chan *TimeSeries, cfg.Concurrency*4), + errors: make(chan *ImportError, cfg.Concurrency), + } + if err := im.Ping(); err != nil { + return nil, fmt.Errorf("ping to %q failed: %s", addr, err) + } + + if cfg.BatchSize < 1 { + cfg.BatchSize = 1e5 + } + + im.wg.Add(int(cfg.Concurrency)) + for i := 0; i < int(cfg.Concurrency); i++ { + go func() { + defer im.wg.Done() + im.startWorker(cfg.BatchSize, cfg.SignificantFigures, cfg.RoundDigits) + }() + } + im.ResetStats() + return im, nil +} + +// ImportError is type of error generated +// in case of unsuccessful import request +type ImportError struct { + // The batch of timeseries that failed + Batch []*TimeSeries + // The error that appeared during insert + Err error +} + +// Errors returns a channel for receiving +// import errors if any +func (im *Importer) Errors() chan *ImportError { return im.errors } + +// Input returns a channel for sending timeseries +// that need to be imported +func (im *Importer) Input() chan<- *TimeSeries { return im.input } + +// Close sends signal to all goroutines to exit +// and waits until they are finished +func (im *Importer) Close() { + im.once.Do(func() { + close(im.close) + im.wg.Wait() + close(im.errors) + }) +} + +func (im *Importer) startWorker(batchSize, significantFigures, roundDigits int) { + var batch []*TimeSeries + var dataPoints int + var waitForBatch time.Time + for { + select { + case <-im.close: + if err := im.Import(batch); err != nil { + im.errors <- &ImportError{ + Batch: batch, + Err: err, + } + } + return + case ts := <-im.input: + // init waitForBatch when first + // value was received + if waitForBatch.IsZero() { + waitForBatch = time.Now() + } + + if significantFigures > 0 { + for i, v := range ts.Values { + ts.Values[i] = decimal.RoundToSignificantFigures(v, significantFigures) + } + } + if roundDigits < 100 { + for i, v := range ts.Values { + ts.Values[i] = decimal.RoundToDecimalDigits(v, roundDigits) + } + } + + batch = append(batch, ts) + dataPoints += len(ts.Values) + if dataPoints < batchSize { + continue + } + im.s.Lock() + im.s.idleDuration += time.Since(waitForBatch) + im.s.Unlock() + + if err := im.flush(batch); err != nil { + im.errors <- &ImportError{ + Batch: batch, + Err: err, + } + // make a new batch, since old one was referenced as err + batch = make([]*TimeSeries, len(batch)) + } + batch = batch[:0] + dataPoints = 0 + waitForBatch = time.Now() + } + } +} + +const ( + // TODO: make configurable + backoffRetries = 5 + backoffFactor = 1.7 + backoffMinDuration = time.Second +) + +func (im *Importer) flush(b []*TimeSeries) error { + var err error + for i := 0; i < backoffRetries; i++ { + err = im.Import(b) + if err == nil { + return nil + } + if errors.Is(err, ErrBadRequest) { + return err // fail fast if not recoverable + } + im.s.Lock() + im.s.retries++ + im.s.Unlock() + backoff := float64(backoffMinDuration) * math.Pow(backoffFactor, float64(i)) + time.Sleep(time.Duration(backoff)) + } + return fmt.Errorf("import failed with %d retries: %s", backoffRetries, err) +} + +// Ping sends a ping to im.addr. +func (im *Importer) Ping() error { + url := fmt.Sprintf("%s/health", im.addr) + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return fmt.Errorf("cannot create request to %q: %s", im.addr, err) + } + if im.user != "" { + req.SetBasicAuth(im.user, im.password) + } + resp, err := http.DefaultClient.Do(req) + if err != nil { + return err + } + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("bad status code: %d", resp.StatusCode) + } + return nil +} + +// Import imports tsBatch. +func (im *Importer) Import(tsBatch []*TimeSeries) error { + if len(tsBatch) < 1 { + return nil + } + + pr, pw := io.Pipe() + req, err := http.NewRequest("POST", im.importPath, pr) + if err != nil { + return fmt.Errorf("cannot create request to %q: %s", im.addr, err) + } + if im.user != "" { + req.SetBasicAuth(im.user, im.password) + } + if im.compress { + req.Header.Set("Content-Encoding", "gzip") + } + + errCh := make(chan error) + go func() { + errCh <- do(req) + close(errCh) + }() + + w := io.Writer(pw) + if im.compress { + zw, err := gzip.NewWriterLevel(pw, 1) + if err != nil { + return fmt.Errorf("unexpected error when creating gzip writer: %s", err) + } + w = zw + } + bw := bufio.NewWriterSize(w, 16*1024) + + var totalSamples, totalBytes int + for _, ts := range tsBatch { + n, err := ts.write(bw) + if err != nil { + return fmt.Errorf("write err: %w", err) + } + totalBytes += n + totalSamples += len(ts.Values) + } + if err := bw.Flush(); err != nil { + return err + } + if im.compress { + err := w.(*gzip.Writer).Close() + if err != nil { + return err + } + } + if err := pw.Close(); err != nil { + return err + } + + requestErr := <-errCh + if requestErr != nil { + return fmt.Errorf("import request error for %q: %w", im.addr, requestErr) + } + + im.s.Lock() + im.s.bytes += uint64(totalBytes) + im.s.samples += uint64(totalSamples) + im.s.requests++ + im.s.Unlock() + + return nil +} + +// ErrBadRequest represents bad request error. +var ErrBadRequest = errors.New("bad request") + +func do(req *http.Request) error { + resp, err := http.DefaultClient.Do(req) + if err != nil { + return fmt.Errorf("unexpected error when performing request: %s", err) + } + defer func() { + _ = resp.Body.Close() + }() + if resp.StatusCode != http.StatusNoContent { + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return fmt.Errorf("failed to read response body for status code %d: %s", resp.StatusCode, err) + } + if resp.StatusCode == http.StatusBadRequest { + return fmt.Errorf("%w: unexpected response code %d: %s", ErrBadRequest, resp.StatusCode, string(body)) + } + return fmt.Errorf("unexpected response code %d: %s", resp.StatusCode, string(body)) + } + return nil +} + +func byteCountSI(b int64) string { + const unit = 1000 + if b < unit { + return fmt.Sprintf("%d B", b) + } + div, exp := int64(unit), 0 + for n := b / unit; n >= unit; n /= unit { + div *= unit + exp++ + } + return fmt.Sprintf("%.1f %cB", + float64(b)/float64(div), "kMGTPE"[exp]) +} diff --git a/app/vmctl/vm/vm_test.go b/app/vmctl/vm/vm_test.go new file mode 100644 index 000000000..1d9d42523 --- /dev/null +++ b/app/vmctl/vm/vm_test.go @@ -0,0 +1,69 @@ +package vm + +import "testing" + +func TestAddExtraLabelsToImportPath(t *testing.T) { + type args struct { + path string + extraLabels []string + } + tests := []struct { + name string + args args + want string + wantErr bool + }{ + { + name: "ok w/o extra labels", + args: args{ + path: "/api/v1/import", + }, + want: "/api/v1/import", + }, + { + name: "ok one extra label", + args: args{ + path: "/api/v1/import", + extraLabels: []string{"instance=host-1"}, + }, + want: "/api/v1/import?extra_label=instance=host-1", + }, + { + name: "ok two extra labels", + args: args{ + path: "/api/v1/import", + extraLabels: []string{"instance=host-2", "job=vmagent"}, + }, + want: "/api/v1/import?extra_label=instance=host-2&extra_label=job=vmagent", + }, + { + name: "ok two extra with exist param", + args: args{ + path: "/api/v1/import?timeout=50", + extraLabels: []string{"instance=host-2", "job=vmagent"}, + }, + want: "/api/v1/import?timeout=50&extra_label=instance=host-2&extra_label=job=vmagent", + }, + { + name: "bad incorrect format for extra label", + args: args{ + path: "/api/v1/import", + extraLabels: []string{"label=value", "bad_label_wo_value"}, + }, + want: "/api/v1/import", + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := AddExtraLabelsToImportPath(tt.args.path, tt.args.extraLabels) + if (err != nil) != tt.wantErr { + t.Errorf("AddExtraLabelsToImportPath() error = %v, wantErr %v", err, tt.wantErr) + return + } + if got != tt.want { + t.Errorf("AddExtraLabelsToImportPath() got = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/app/vmctl/vm_native.go b/app/vmctl/vm_native.go new file mode 100644 index 000000000..02a9ffd0d --- /dev/null +++ b/app/vmctl/vm_native.go @@ -0,0 +1,143 @@ +package main + +import ( + "fmt" + "io" + "io/ioutil" + "log" + "net/http" + + "github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/vm" + "github.com/cheggaaa/pb/v3" +) + +type vmNativeProcessor struct { + filter filter + + dst *vmNativeClient + src *vmNativeClient +} + +type vmNativeClient struct { + addr string + user string + password string + extraLabels []string +} + +type filter struct { + match string + timeStart string + timeEnd string +} + +func (f filter) String() string { + s := fmt.Sprintf("\n\tfilter: match[]=%s", f.match) + if f.timeStart != "" { + s += fmt.Sprintf("\n\tstart: %s", f.timeStart) + } + if f.timeEnd != "" { + s += fmt.Sprintf("\n\tend: %s", f.timeEnd) + } + return s +} + +const ( + nativeExportAddr = "api/v1/export/native" + nativeImportAddr = "api/v1/import/native" + + barTpl = `Total: {{counters . }} {{ cycle . "↖" "↗" "↘" "↙" }} Speed: {{speed . }} {{string . "suffix"}}` +) + +func (p *vmNativeProcessor) run() error { + pr, pw := io.Pipe() + + fmt.Printf("Initing export pipe from %q with filters: %s\n", p.src.addr, p.filter) + exportReader, err := p.exportPipe() + if err != nil { + return fmt.Errorf("failed to init export pipe: %s", err) + } + + sync := make(chan struct{}) + nativeImportAddr, err := vm.AddExtraLabelsToImportPath(nativeImportAddr, p.dst.extraLabels) + if err != nil { + return err + } + + go func() { + defer func() { close(sync) }() + u := fmt.Sprintf("%s/%s", p.dst.addr, nativeImportAddr) + req, err := http.NewRequest("POST", u, pr) + if err != nil { + log.Fatalf("cannot create import request to %q: %s", p.dst.addr, err) + } + importResp, err := p.dst.do(req, http.StatusNoContent) + if err != nil { + log.Fatalf("import request failed: %s", err) + } + if err := importResp.Body.Close(); err != nil { + log.Fatalf("cannot close import response body: %s", err) + } + }() + + fmt.Printf("Initing import process to %q:\n", p.dst.addr) + bar := pb.ProgressBarTemplate(barTpl).Start64(0) + barReader := bar.NewProxyReader(exportReader) + + _, err = io.Copy(pw, barReader) + if err != nil { + return fmt.Errorf("failed to write into %q: %s", p.dst.addr, err) + } + if err := pw.Close(); err != nil { + return err + } + <-sync + + bar.Finish() + return nil +} + +func (p *vmNativeProcessor) exportPipe() (io.ReadCloser, error) { + u := fmt.Sprintf("%s/%s", p.src.addr, nativeExportAddr) + req, err := http.NewRequest("GET", u, nil) + if err != nil { + return nil, fmt.Errorf("cannot create request to %q: %s", p.src.addr, err) + } + + params := req.URL.Query() + params.Set("match[]", p.filter.match) + if p.filter.timeStart != "" { + params.Set("start", p.filter.timeStart) + } + if p.filter.timeEnd != "" { + params.Set("end", p.filter.timeEnd) + } + req.URL.RawQuery = params.Encode() + + // disable compression since it is meaningless for native format + req.Header.Set("Accept-Encoding", "identity") + resp, err := p.src.do(req, http.StatusOK) + if err != nil { + return nil, fmt.Errorf("export request failed: %s", err) + } + return resp.Body, nil +} + +func (c *vmNativeClient) do(req *http.Request, expSC int) (*http.Response, error) { + if c.user != "" { + req.SetBasicAuth(c.user, c.password) + } + resp, err := http.DefaultClient.Do(req) + if err != nil { + return nil, fmt.Errorf("unexpected error when performing request: %s", err) + } + + if resp.StatusCode != expSC { + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("failed to read response body for status code %d: %s", resp.StatusCode, err) + } + return nil, fmt.Errorf("unexpected response code %d: %s", resp.StatusCode, string(body)) + } + return resp, err +} diff --git a/app/vmrestore/README.md b/app/vmrestore/README.md index 6750ed14f..b18177f39 100644 --- a/app/vmrestore/README.md +++ b/app/vmrestore/README.md @@ -7,7 +7,7 @@ Restore process can be interrupted at any time. It is automatically resumed from when restarting `vmrestore` with the same args. -### Usage +## Usage VictoriaMetrics must be stopped during the restore process. @@ -25,13 +25,13 @@ The original `-storageDataPath` directory may contain old files. They will be su i.e. the end result would be similar to [rsync --delete](https://askubuntu.com/questions/476041/how-do-i-make-rsync-delete-files-that-have-been-deleted-from-the-source-folder). -### Troubleshooting +## Troubleshooting * If `vmrestore` eats all the network bandwidth, then set `-maxBytesPerSecond` to the desired value. * If `vmrestore` has been interrupted due to temporary error, then just restart it with the same args. It will resume the restore process. -### Advanced usage +## Advanced usage * Obtaining credentials from a file. @@ -118,24 +118,24 @@ i.e. the end result would be similar to [rsync --delete](https://askubuntu.com/q ``` -### How to build from sources +## How to build from sources It is recommended using [binary releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) - see `vmutils-*` archives there. -#### Development build +### Development build 1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.13. 2. Run `make vmrestore` from the root folder of the repository. It builds `vmrestore` binary and puts it into the `bin` folder. -#### Production build +### Production build 1. [Install docker](https://docs.docker.com/install/). 2. Run `make vmrestore-prod` from the root folder of the repository. It builds `vmrestore-prod` binary and puts it into the `bin` folder. -#### Building docker images +### Building docker images Run `make package-vmrestore`. It builds `victoriametrics/vmrestore:` docker image locally. `` is auto-generated image tag, which depends on source code in the repository. diff --git a/app/vmselect/graphiteql/lexer.go b/app/vmselect/graphiteql/lexer.go new file mode 100644 index 000000000..3a519706c --- /dev/null +++ b/app/vmselect/graphiteql/lexer.go @@ -0,0 +1,417 @@ +package graphiteql + +import ( + "fmt" + "strings" +) + +type lexer struct { + // Token contains the currently parsed token. + // An empty token means EOF. + Token string + + sOrig string + sTail string + + err error +} + +func (lex *lexer) Context() string { + return fmt.Sprintf("%s%s", lex.Token, lex.sTail) +} + +func (lex *lexer) Init(s string) { + lex.Token = "" + + lex.sOrig = s + lex.sTail = s + + lex.err = nil +} + +func (lex *lexer) Next() error { + if lex.err != nil { + return lex.err + } + token, err := lex.next() + if err != nil { + lex.err = err + return err + } + lex.Token = token + return nil +} + +func (lex *lexer) next() (string, error) { + // Skip whitespace + s := lex.sTail + i := 0 + for i < len(s) && isSpaceChar(s[i]) { + i++ + } + s = s[i:] + lex.sTail = s + + if len(s) == 0 { + return "", nil + } + + var token string + var err error + switch s[0] { + case '(', ')', ',', '|', '=', '+', '-': + token = s[:1] + goto tokenFoundLabel + } + if isStringPrefix(s) { + token, err = scanString(s) + if err != nil { + return "", err + } + goto tokenFoundLabel + } + if isPositiveNumberPrefix(s) { + token, err = scanPositiveNumber(s) + if err != nil { + return "", err + } + goto tokenFoundLabel + } + token, err = scanIdent(s) + if err != nil { + return "", err + } + +tokenFoundLabel: + lex.sTail = s[len(token):] + return token, nil +} + +func scanString(s string) (string, error) { + if len(s) < 2 { + return "", fmt.Errorf("cannot find end of string in %q", s) + } + + quote := s[0] + i := 1 + for { + n := strings.IndexByte(s[i:], quote) + if n < 0 { + return "", fmt.Errorf("cannot find closing quote %c for the string %q", quote, s) + } + i += n + bs := 0 + for bs < i && s[i-bs-1] == '\\' { + bs++ + } + if bs%2 == 0 { + token := s[:i+1] + return token, nil + } + i++ + } +} + +func scanPositiveNumber(s string) (string, error) { + // Scan integer part. It may be empty if fractional part exists. + i := 0 + skipChars, isHex := scanSpecialIntegerPrefix(s) + i += skipChars + if isHex { + // Scan integer hex number + for i < len(s) && isHexChar(s[i]) { + i++ + } + if i == skipChars { + return "", fmt.Errorf("number cannot be empty") + } + return s[:i], nil + } + for i < len(s) && isDecimalChar(s[i]) { + i++ + } + + if i == len(s) { + if i == skipChars { + return "", fmt.Errorf("number cannot be empty") + } + return s, nil + } + if s[i] != '.' && s[i] != 'e' && s[i] != 'E' { + return s[:i], nil + } + + if s[i] == '.' { + // Scan fractional part. It cannot be empty. + i++ + j := i + for j < len(s) && isDecimalChar(s[j]) { + j++ + } + if j == i { + return "", fmt.Errorf("missing fractional part in %q", s) + } + i = j + if i == len(s) { + return s, nil + } + } + + if s[i] != 'e' && s[i] != 'E' { + return s[:i], nil + } + i++ + + // Scan exponent part. + if i == len(s) { + return "", fmt.Errorf("missing exponent part in %q", s) + } + if s[i] == '-' || s[i] == '+' { + i++ + } + j := i + for j < len(s) && isDecimalChar(s[j]) { + j++ + } + if j == i { + return "", fmt.Errorf("missing exponent part in %q", s) + } + return s[:j], nil +} + +func scanIdent(s string) (string, error) { + i := 0 + for i < len(s) { + switch s[i] { + case '\\': + // Skip the next char, since it is escaped + i += 2 + if i > len(s) { + return "", fmt.Errorf("missing escaped char in the end of %q", s) + } + case '[': + n := strings.IndexByte(s[i+1:], ']') + if n < 0 { + return "", fmt.Errorf("missing `]` char in %q", s) + } + i += n + 2 + case '{': + n := strings.IndexByte(s[i+1:], '}') + if n < 0 { + return "", fmt.Errorf("missing '}' char in %q", s) + } + i += n + 2 + case '*', '.': + i++ + default: + if !isIdentChar(s[i]) { + goto end + } + i++ + } + } +end: + if i == 0 { + return "", fmt.Errorf("cannot find a single ident char in %q", s) + } + return s[:i], nil +} + +func unescapeIdent(s string) string { + n := strings.IndexByte(s, '\\') + if n < 0 { + return s + } + dst := make([]byte, 0, len(s)) + for { + dst = append(dst, s[:n]...) + s = s[n+1:] + if len(s) == 0 { + return string(dst) + } + if s[0] == 'x' && len(s) >= 3 { + h1 := fromHex(s[1]) + h2 := fromHex(s[2]) + if h1 >= 0 && h2 >= 0 { + dst = append(dst, byte((h1<<4)|h2)) + s = s[3:] + } else { + dst = append(dst, s[0]) + s = s[1:] + } + } else { + dst = append(dst, s[0]) + s = s[1:] + } + n = strings.IndexByte(s, '\\') + if n < 0 { + dst = append(dst, s...) + return string(dst) + } + } +} + +func fromHex(ch byte) int { + if ch >= '0' && ch <= '9' { + return int(ch - '0') + } + if ch >= 'a' && ch <= 'f' { + return int((ch - 'a') + 10) + } + if ch >= 'A' && ch <= 'F' { + return int((ch - 'A') + 10) + } + return -1 +} + +func toHex(n byte) byte { + if n < 10 { + return '0' + n + } + return 'a' + (n - 10) +} + +func isMetricExprChar(ch byte) bool { + switch ch { + case '.', '*', '[', ']', '{', '}', ',': + return true + } + return false +} + +func appendEscapedIdent(dst []byte, s string) []byte { + for i := 0; i < len(s); i++ { + ch := s[i] + if isIdentChar(ch) || isMetricExprChar(ch) { + if i == 0 && !isFirstIdentChar(ch) { + // hex-encode the first char + dst = append(dst, '\\', 'x', toHex(ch>>4), toHex(ch&0xf)) + } else { + dst = append(dst, ch) + } + } else if ch >= 0x20 && ch < 0x7f { + // Leave ASCII printable chars as is + dst = append(dst, '\\', ch) + } else { + // hex-encode non-printable chars + dst = append(dst, '\\', 'x', toHex(ch>>4), toHex(ch&0xf)) + } + } + return dst +} + +func isEOF(s string) bool { + return len(s) == 0 +} + +func isBool(s string) bool { + s = strings.ToLower(s) + return s == "true" || s == "false" +} + +func isStringPrefix(s string) bool { + if len(s) == 0 { + return false + } + switch s[0] { + case '"', '\'': + return true + default: + return false + } +} + +func isPositiveNumberPrefix(s string) bool { + if len(s) == 0 { + return false + } + if isDecimalChar(s[0]) { + return true + } + + // Check for .234 numbers + if s[0] != '.' || len(s) < 2 { + return false + } + return isDecimalChar(s[1]) +} + +func isSpecialIntegerPrefix(s string) bool { + skipChars, _ := scanSpecialIntegerPrefix(s) + return skipChars > 0 +} + +func scanSpecialIntegerPrefix(s string) (skipChars int, isHex bool) { + if len(s) < 1 || s[0] != '0' { + return 0, false + } + s = strings.ToLower(s[1:]) + if len(s) == 0 { + return 0, false + } + if isDecimalChar(s[0]) { + // octal number: 0123 + return 1, false + } + if s[0] == 'x' { + // 0x + return 2, true + } + if s[0] == 'o' || s[0] == 'b' { + // 0x, 0o or 0b prefix + return 2, false + } + return 0, false +} + +func isDecimalChar(ch byte) bool { + return ch >= '0' && ch <= '9' +} + +func isHexChar(ch byte) bool { + return isDecimalChar(ch) || ch >= 'a' && ch <= 'f' || ch >= 'A' && ch <= 'F' +} + +func isIdentPrefix(s string) bool { + if len(s) == 0 { + return false + } + if s[0] == '\\' { + // Assume this is an escape char for the next char. + return true + } + return isFirstIdentChar(s[0]) +} + +func isFirstIdentChar(ch byte) bool { + if !isIdentChar(ch) { + return false + } + if isDecimalChar(ch) { + return false + } + return true +} + +func isIdentChar(ch byte) bool { + if ch >= 'a' && ch <= 'z' || ch >= 'A' && ch <= 'Z' { + return true + } + if isDecimalChar(ch) { + return true + } + switch ch { + case '-', '_', '$', ':', '*', '{', '[': + return true + } + return false +} + +func isSpaceChar(ch byte) bool { + switch ch { + case ' ', '\t', '\n', '\v', '\f', '\r': + return true + default: + return false + } +} diff --git a/app/vmselect/graphiteql/lexer_test.go b/app/vmselect/graphiteql/lexer_test.go new file mode 100644 index 000000000..bb070bf3a --- /dev/null +++ b/app/vmselect/graphiteql/lexer_test.go @@ -0,0 +1,151 @@ +package graphiteql + +import ( + "reflect" + "strings" + "testing" +) + +func TestScanStringSuccess(t *testing.T) { + f := func(s, sExpected string) { + t.Helper() + result, err := scanString(s) + if err != nil { + t.Fatalf("unexpected error in scanString(%s): %s", s, err) + } + if result != sExpected { + t.Fatalf("unexpected string scanned from %s; got %s; want %s", s, result, sExpected) + } + if !strings.HasPrefix(s, result) { + t.Fatalf("invalid prefix for scanne string %s: %s", s, result) + } + } + f(`""`, `""`) + f(`''`, `''`) + f(`""tail`, `""`) + f(`''tail`, `''`) + f(`"foo", bar`, `"foo"`) + f(`'foo', bar`, `'foo'`) + f(`"foo\.bar"`, `"foo\.bar"`) + f(`"foo\"bar\1"\"`, `"foo\"bar\1"`) + f(`"foo\\"bar\1"\"`, `"foo\\"`) + f(`'foo\\'bar\1"\"`, `'foo\\'`) +} + +func TestScanStringFailure(t *testing.T) { + f := func(s string) { + t.Helper() + result, err := scanString(s) + if err == nil { + t.Fatalf("expecting non-nil error for scanString(%s)", s) + } + if result != "" { + t.Fatalf("expecting empty result for scanString(%s); got %s", s, result) + } + } + f(``) + f(`"foo`) + f(`'bar`) +} + +func TestAppendEscapedIdent(t *testing.T) { + f := func(s, resultExpected string) { + t.Helper() + result := appendEscapedIdent(nil, s) + if string(result) != resultExpected { + t.Fatalf("unexpected result; got\n%s\nwant\n%s", result, resultExpected) + } + } + f("", "") + f("a", "a") + f("fo_o.$b-ar.b[a]*z{aa,bb}", "fo_o.$b-ar.b[a]*z{aa,bb}") + f("a(b =C)", `a\(b\ \=C\)`) +} + +func TestLexerSuccess(t *testing.T) { + f := func(s string, tokensExpected []string) { + t.Helper() + var lex lexer + var tokens []string + lex.Init(s) + for { + if err := lex.Next(); err != nil { + t.Fatalf("unexpected error: %s", err) + } + if isEOF(lex.Token) { + break + } + tokens = append(tokens, lex.Token) + } + if !reflect.DeepEqual(tokens, tokensExpected) { + t.Fatalf("unexpected tokens; got\n%q\nwant\n%q", tokens, tokensExpected) + } + } + f("", nil) + f("a", []string{"a"}) + f("*", []string{"*"}) + f("*.a", []string{"*.a"}) + f("[a-z]zx", []string{"[a-z]zx"}) + f("fo_o.$ba-r", []string{"fo_o.$ba-r"}) + f("{foo,bar}", []string{"{foo,bar}"}) + f("[a-z]", []string{"[a-z]"}) + f("fo*.bar10[s-z]as{aaa,bb}.ss{aa*ss,DS[c-D],ss}ss", []string{"fo*.bar10[s-z]as{aaa,bb}.ss{aa*ss,DS[c-D],ss}ss"}) + f("FOO.bar:avg", []string{"FOO.bar:avg"}) + f("FOO.bar\\:avg", []string{"FOO.bar\\:avg"}) + f(`foo.Bar|aaa(b,cd,e=aa)`, []string{"foo.Bar", "|", "aaa", "(", "b", ",", "cd", ",", "e", "=", "aa", ")"}) + f(`foo.Bar\|aaa\(b\,cd\,e\=aa\)`, []string{`foo.Bar\|aaa\(b\,cd\,e\=aa\)`}) + f(`123`, []string{`123`}) + f(`12.34`, []string{`12.34`}) + f(`12.34e4`, []string{`12.34e4`}) + f(`12.34e-4`, []string{`12.34e-4`}) + f(`12E+45`, []string{`12E+45`}) + f(`+12.34`, []string{`+`, `12.34`}) + f("0xABcd", []string{`0xABcd`}) + f("f(0o765,0b1101,0734,12.34)", []string{"f", "(", "0o765", ",", "0b1101", ",", "0734", ",", "12.34", ")"}) + f(`f ( foo, -.54e6,bar)`, []string{"f", "(", "foo", ",", "-", ".54e6", ",", "bar", ")"}) + f(`"foo(b'ar:baz)"`, []string{`"foo(b'ar:baz)"`}) + f(`'a"bc'`, []string{`'a"bc'`}) + f(`"f\"oo\\b"`, []string{`"f\"oo\\b"`}) + f(`a("b,c", 'de')`, []string{`a`, `(`, `"b,c"`, `,`, `'de'`, `)`}) +} + +func TestLexerError(t *testing.T) { + f := func(s string) { + t.Helper() + var lex lexer + lex.Init(s) + for { + if err := lex.Next(); err != nil { + // Make sure lex.Next() consistently returns the error. + if err1 := lex.Next(); err1 != err { + t.Fatalf("unexpected error returned; got %v; want %v", err1, err) + } + return + } + if isEOF(lex.Token) { + t.Fatalf("expecting non-nil error when parsing %q", s) + } + } + } + + // Invalid identifier + f("foo\\") + f(`foo[bar`) + f(`foo{bar`) + f("~") + f(",~") + + // Invalid string + f(`"`) + f(`"foo`) + f(`'aa`) + + // Invalid number + f(`0x`) + f(`0o`) + f(`-0b`) + f(`13.`) + f(`1e`) + f(`1E+`) + f(`1.3e`) +} diff --git a/app/vmselect/graphiteql/parser.go b/app/vmselect/graphiteql/parser.go new file mode 100644 index 000000000..f24a2e8bf --- /dev/null +++ b/app/vmselect/graphiteql/parser.go @@ -0,0 +1,407 @@ +package graphiteql + +import ( + "fmt" + "strconv" + "strings" +) + +type parser struct { + lex lexer +} + +// Expr is Graphite expression for render API. +type Expr interface { + // AppendString appends Expr contents to dst and returns the result. + AppendString(dst []byte) []byte +} + +// Parse parses Graphite render API target expression. +// +// See https://graphite.readthedocs.io/en/stable/render_api.html +func Parse(s string) (Expr, error) { + var p parser + p.lex.Init(s) + if err := p.lex.Next(); err != nil { + return nil, fmt.Errorf("cannot parse target expression: %w; context: %q", err, p.lex.Context()) + } + expr, err := p.parseExpr() + if err != nil { + return nil, fmt.Errorf("cannot parse target expression: %w; context: %q", err, p.lex.Context()) + } + if !isEOF(p.lex.Token) { + return nil, fmt.Errorf("unexpected tail left after parsing %q; context: %q", expr.AppendString(nil), p.lex.Context()) + } + return expr, nil +} + +func (p *parser) parseExpr() (Expr, error) { + var expr Expr + var err error + token := p.lex.Token + switch { + case isPositiveNumberPrefix(token) || token == "+" || token == "-": + expr, err = p.parseNumber() + if err != nil { + return nil, err + } + case isStringPrefix(token): + expr, err = p.parseString() + if err != nil { + return nil, err + } + case isIdentPrefix(token): + expr, err = p.parseMetricExprOrFuncCall() + if err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("unexpected token when parsing expression: %q", token) + } + + for { + switch p.lex.Token { + case "|": + // Chained function call. For example, `metric|func` + firstArg := &ArgExpr{ + Expr: expr, + } + expr, err = p.parseChainedFunc(firstArg) + if err != nil { + return nil, err + } + continue + default: + return expr, nil + } + } +} + +func (p *parser) parseNumber() (*NumberExpr, error) { + token := p.lex.Token + isMinus := false + if token == "-" || token == "+" { + if err := p.lex.Next(); err != nil { + return nil, fmt.Errorf("cannot find number after %q token: %w", token, err) + } + isMinus = token == "-" + token = p.lex.Token + } + var n float64 + if isSpecialIntegerPrefix(token) { + d, err := strconv.ParseInt(token, 0, 64) + if err != nil { + return nil, fmt.Errorf("cannot parse integer %q: %w", token, err) + } + n = float64(d) + } else { + f, err := strconv.ParseFloat(token, 64) + if err != nil { + return nil, fmt.Errorf("cannot parse floating-point number %q: %w", token, err) + } + n = f + } + if isMinus { + n = -n + } + if err := p.lex.Next(); err != nil { + return nil, fmt.Errorf("cannot find next token after %q: %w", token, err) + } + ne := &NumberExpr{ + N: n, + } + return ne, nil +} + +// NoneExpr contains None value +type NoneExpr struct{} + +// AppendString appends string representation of nne to dst and returns the result. +func (nne *NoneExpr) AppendString(dst []byte) []byte { + return append(dst, "None"...) +} + +// BoolExpr contains bool value (True or False). +type BoolExpr struct { + // B is bool value + B bool +} + +// AppendString appends string representation of be to dst and returns the result. +func (be *BoolExpr) AppendString(dst []byte) []byte { + if be.B { + return append(dst, "True"...) + } + return append(dst, "False"...) +} + +// NumberExpr contains float64 constant. +type NumberExpr struct { + // N is float64 constant + N float64 +} + +// AppendString appends string representation of ne to dst and returns the result. +func (ne *NumberExpr) AppendString(dst []byte) []byte { + return strconv.AppendFloat(dst, ne.N, 'g', -1, 64) +} + +func (p *parser) parseString() (*StringExpr, error) { + token := p.lex.Token + if len(token) < 2 || token[0] != token[len(token)-1] { + return nil, fmt.Errorf(`string literal contains unexpected trailing char; got %q`, token) + } + quote := string(append([]byte{}, token[0])) + s := token[1 : len(token)-1] + s = strings.ReplaceAll(s, `\`+quote, quote) + s = strings.ReplaceAll(s, `\\`, `\`) + if err := p.lex.Next(); err != nil { + return nil, fmt.Errorf("cannot find next token after %s: %w", token, err) + } + se := &StringExpr{ + S: s, + } + return se, nil +} + +// StringExpr represents string contant. +type StringExpr struct { + // S contains unquoted string contents. + S string +} + +// AppendString appends se to dst and returns the result. +func (se *StringExpr) AppendString(dst []byte) []byte { + dst = append(dst, '\'') + s := strings.ReplaceAll(se.S, `\`, `\\`) + s = strings.ReplaceAll(s, `'`, `\'`) + dst = append(dst, s...) + dst = append(dst, '\'') + return dst +} + +// QuoteString quotes s, so it could be used in Graphite queries. +func QuoteString(s string) string { + se := &StringExpr{ + S: s, + } + return string(se.AppendString(nil)) +} + +func (p *parser) parseMetricExprOrFuncCall() (Expr, error) { + token := p.lex.Token + ident := unescapeIdent(token) + if err := p.lex.Next(); err != nil { + return nil, fmt.Errorf("cannot find next token after %q: %w", token, err) + } + token = p.lex.Token + switch token { + case "(": + // Function call. For example, `func(foo,bar)` + funcName := ident + args, err := p.parseArgs() + if err != nil { + return nil, fmt.Errorf("cannot parse args for function %q: %w", funcName, err) + } + fe := &FuncExpr{ + FuncName: funcName, + Args: args, + printState: printStateNormal, + } + return fe, nil + default: + // Metric epxression or bool expression or None. + if isBool(ident) { + be := &BoolExpr{ + B: strings.ToLower(ident) == "true", + } + return be, nil + } + if strings.ToLower(ident) == "none" { + nne := &NoneExpr{} + return nne, nil + } + me := &MetricExpr{ + Query: ident, + } + return me, nil + } +} + +func (p *parser) parseChainedFunc(firstArg *ArgExpr) (*FuncExpr, error) { + for { + if err := p.lex.Next(); err != nil { + return nil, fmt.Errorf("cannot find function name after %q|: %w", firstArg.AppendString(nil), err) + } + if !isIdentPrefix(p.lex.Token) { + return nil, fmt.Errorf("expecting function name after %q|, got %q", firstArg.AppendString(nil), p.lex.Token) + } + funcName := unescapeIdent(p.lex.Token) + if err := p.lex.Next(); err != nil { + return nil, fmt.Errorf("cannot find next token after %q|%q: %w", firstArg.AppendString(nil), funcName, err) + } + fe := &FuncExpr{ + FuncName: funcName, + printState: printStateChained, + } + if p.lex.Token != "(" { + fe.Args = []*ArgExpr{firstArg} + } else { + args, err := p.parseArgs() + if err != nil { + return nil, fmt.Errorf("cannot parse args for %q|%q: %w", firstArg.AppendString(nil), funcName, err) + } + fe.Args = append([]*ArgExpr{firstArg}, args...) + } + if p.lex.Token != "|" { + return fe, nil + } + firstArg = &ArgExpr{ + Expr: fe, + } + } +} + +func (p *parser) parseArgs() ([]*ArgExpr, error) { + var args []*ArgExpr + for { + if err := p.lex.Next(); err != nil { + return nil, fmt.Errorf("cannot find arg #%d: %w", len(args), err) + } + if p.lex.Token == ")" { + if err := p.lex.Next(); err != nil { + return nil, fmt.Errorf("cannot find next token after function args: %w", err) + } + return args, nil + } + expr, err := p.parseExpr() + if err != nil { + return nil, fmt.Errorf("cannot parse arg #%d: %w", len(args), err) + } + if p.lex.Token == "=" { + // Named expression + me, ok := expr.(*MetricExpr) + if !ok { + return nil, fmt.Errorf("expecting a name for named expression; got %q", expr.AppendString(nil)) + } + argName := me.Query + if err := p.lex.Next(); err != nil { + return nil, fmt.Errorf("cannot find named value for %q: %w", argName, err) + } + argValue, err := p.parseExpr() + if err != nil { + return nil, fmt.Errorf("cannot parse named value for %q: %w", argName, err) + } + args = append(args, &ArgExpr{ + Name: argName, + Expr: argValue, + }) + } else { + args = append(args, &ArgExpr{ + Expr: expr, + }) + } + switch p.lex.Token { + case ",": + // Continue parsing args + case ")": + // End of args + if err := p.lex.Next(); err != nil { + return nil, fmt.Errorf("cannot find next token after func args: %w", err) + } + return args, nil + default: + return nil, fmt.Errorf("unexpected token detected in func args: %q", p.lex.Token) + } + } +} + +// ArgExpr represents function arg (which may be named). +type ArgExpr struct { + // Name is named arg name. It is empty for positional arg. + Name string + + // Expr arg expression. + Expr Expr +} + +// AppendString appends string representation of ae to dst and returns the result. +func (ae *ArgExpr) AppendString(dst []byte) []byte { + if ae.Name != "" { + dst = appendEscapedIdent(dst, ae.Name) + dst = append(dst, '=') + } + dst = ae.Expr.AppendString(dst) + return dst +} + +// FuncExpr represents function call. +type FuncExpr struct { + // FuncName is the function name + FuncName string + + // Args is function args. + Args []*ArgExpr + + printState funcPrintState +} + +type funcPrintState int + +const ( + // Normal func call: `func(arg1, ..., argN)` + printStateNormal = funcPrintState(0) + + // Chained func call: `arg1|func(arg2, ..., argN)` + printStateChained = funcPrintState(1) +) + +// AppendString appends string representation of fe to dst and returns the result. +func (fe *FuncExpr) AppendString(dst []byte) []byte { + switch fe.printState { + case printStateNormal: + dst = appendEscapedIdent(dst, fe.FuncName) + dst = appendArgsString(dst, fe.Args) + case printStateChained: + if len(fe.Args) == 0 { + panic("BUG: chained func call must have at least a single arg") + } + firstArg := fe.Args[0] + tailArgs := fe.Args[1:] + if firstArg.Name != "" { + panic("BUG: the first chained arg must have no name") + } + dst = firstArg.AppendString(dst) + dst = append(dst, '|') + dst = appendEscapedIdent(dst, fe.FuncName) + if len(tailArgs) > 0 { + dst = appendArgsString(dst, tailArgs) + } + default: + panic(fmt.Sprintf("BUG: unexpected printState=%d", fe.printState)) + } + return dst +} + +// MetricExpr represents metric expression. +type MetricExpr struct { + // Query is the query for fetching metrics. + Query string +} + +// AppendString append string representation of me to dst and returns the result. +func (me *MetricExpr) AppendString(dst []byte) []byte { + return appendEscapedIdent(dst, me.Query) +} + +func appendArgsString(dst []byte, args []*ArgExpr) []byte { + dst = append(dst, '(') + for i, arg := range args { + dst = arg.AppendString(dst) + if i+1 < len(args) { + dst = append(dst, ',') + } + } + dst = append(dst, ')') + return dst +} diff --git a/app/vmselect/graphiteql/parser_test.go b/app/vmselect/graphiteql/parser_test.go new file mode 100644 index 000000000..287c4eac2 --- /dev/null +++ b/app/vmselect/graphiteql/parser_test.go @@ -0,0 +1,121 @@ +package graphiteql + +import ( + "testing" +) + +func TestQuoteString(t *testing.T) { + f := func(s, qExpected string) { + t.Helper() + q := QuoteString(s) + if q != qExpected { + t.Fatalf("unexpected result from QuoteString(%q); got %s; want %s", s, q, qExpected) + } + } + f(``, `''`) + f(`foo`, `'foo'`) + f(`f'o\ba"r`, `'f\'o\\ba"r'`) +} + +func TestParseSuccess(t *testing.T) { + another := func(s, resultExpected string) { + t.Helper() + expr, err := Parse(s) + if err != nil { + t.Fatalf("unexpected error when parsing %s: %s", s, err) + } + result := expr.AppendString(nil) + if string(result) != resultExpected { + t.Fatalf("unexpected result when marshaling %s;\ngot\n%s\nwant\n%s", s, result, resultExpected) + } + } + same := func(s string) { + t.Helper() + another(s, s) + } + // Metric expressions + same("a") + same("foo.bar.baz") + same("foo.bar.baz:aa:bb") + same("fOO.*.b[a-z]R{aa*,bb}s_s.$aaa") + same("*") + same("*.foo") + same("{x,y}.z") + same("[x-zaBc]DeF") + another(`\f\ oo`, `f\ oo`) + another(`f\x1B\x3a`, `f\x1b:`) + + // booleans + same("True") + same("False") + another("true", "True") + another("faLSe", "False") + + // Numbers + same("123") + same("-123") + another("+123", "123") + same("12.3") + same("-1.23") + another("+1.23", "1.23") + another("123e5", "1.23e+07") + another("-123e5", "-1.23e+07") + another("+123e5", "1.23e+07") + another("1.23E5", "123000") + another("-1.23e5", "-123000") + another("+1.23e5", "123000") + another("0xab", "171") + another("0b1011101", "93") + another("0O12345", "5349") + + // strings + another(`"foo'"`, `'foo\''`) + same(`'fo\'o"b\\ar'`) + another(`"f\\oo\.bar\1"`, `'f\\oo\\.bar\\1'`) + + // function calls + same("foo()") + another("foo(bar,)", "foo(bar)") + same("foo(bar,123,'baz')") + another("foo(foo(bar), BAZ = xx ( 123, x))", `foo(foo(bar),BAZ=xx(123,x))`) + + // chained functions + another("foo | bar", "foo|bar") + same("foo|bar|baz") + same("foo(sss)|bar(aa)|xxx.ss") + another(`foo|bar(1,"sdf")`, `foo|bar(1,'sdf')`) + + // mix + same(`f(a,xx=b|c|aa(124,'456'),aa=bb)`) +} + +func TestParseFailure(t *testing.T) { + f := func(s string) { + t.Helper() + expr, err := Parse(s) + if err == nil { + t.Fatalf("expecting error when parsing %s", s) + } + if expr != nil { + t.Fatalf("expecting nil expr when parsing %s; got %s", s, expr.AppendString(nil)) + } + } + f("") + f("'asdf") + f("foo bar") + f("f(a") + f("f(1.2.3") + f("foo|bar(") + f("+foo") + f("-bar") + f("123 '") + f("f '") + f("f|") + f("f|'") + f("f|123") + f("f('") + f("f(f()=123)") + f("f(a=')") + f("f(a=foo(") + f("f()'") +} diff --git a/app/vmselect/prometheus/prometheus.go b/app/vmselect/prometheus/prometheus.go index bd62b3cc6..248833aee 100644 --- a/app/vmselect/prometheus/prometheus.go +++ b/app/vmselect/prometheus/prometheus.go @@ -54,10 +54,6 @@ func FederateHandler(startTime time.Time, w http.ResponseWriter, r *http.Request if err := r.ParseForm(); err != nil { return fmt.Errorf("cannot parse request form values: %w", err) } - matches := r.Form["match[]"] - if len(matches) == 0 { - return fmt.Errorf("missing `match[]` arg") - } lookbackDelta, err := getMaxLookback(r) if err != nil { return err @@ -77,7 +73,7 @@ func FederateHandler(startTime time.Time, w http.ResponseWriter, r *http.Request if start >= end { start = end - defaultStep } - tagFilterss, err := getTagFilterssFromMatches(matches) + tagFilterss, err := getTagFilterssFromRequest(r) if err != nil { return err } @@ -123,15 +119,6 @@ func ExportCSVHandler(startTime time.Time, w http.ResponseWriter, r *http.Reques return fmt.Errorf("missing `format` arg; see https://victoriametrics.github.io/#how-to-export-csv-data") } fieldNames := strings.Split(format, ",") - matches := r.Form["match[]"] - if len(matches) == 0 { - // Maintain backwards compatibility - match := r.FormValue("match") - if len(match) == 0 { - return fmt.Errorf("missing `match[]` arg") - } - matches = []string{match} - } start, err := searchutils.GetTime(r, "start", 0) if err != nil { return err @@ -141,7 +128,7 @@ func ExportCSVHandler(startTime time.Time, w http.ResponseWriter, r *http.Reques return err } deadline := searchutils.GetDeadlineForExport(r, startTime) - tagFilterss, err := getTagFilterssFromMatches(matches) + tagFilterss, err := getTagFilterssFromRequest(r) if err != nil { return err } @@ -200,15 +187,6 @@ func ExportNativeHandler(startTime time.Time, w http.ResponseWriter, r *http.Req if err := r.ParseForm(); err != nil { return fmt.Errorf("cannot parse request form values: %w", err) } - matches := r.Form["match[]"] - if len(matches) == 0 { - // Maintain backwards compatibility - match := r.FormValue("match") - if len(match) == 0 { - return fmt.Errorf("missing `match[]` arg") - } - matches = []string{match} - } start, err := searchutils.GetTime(r, "start", 0) if err != nil { return err @@ -218,7 +196,7 @@ func ExportNativeHandler(startTime time.Time, w http.ResponseWriter, r *http.Req return err } deadline := searchutils.GetDeadlineForExport(r, startTime) - tagFilterss, err := getTagFilterssFromMatches(matches) + tagFilterss, err := getTagFilterssFromRequest(r) if err != nil { return err } @@ -282,14 +260,9 @@ func ExportHandler(startTime time.Time, w http.ResponseWriter, r *http.Request) if err := r.ParseForm(); err != nil { return fmt.Errorf("cannot parse request form values: %w", err) } - matches := r.Form["match[]"] - if len(matches) == 0 { - // Maintain backwards compatibility - match := r.FormValue("match") - if len(match) == 0 { - return fmt.Errorf("missing `match[]` arg") - } - matches = []string{match} + matches, err := getMatchesFromRequest(r) + if err != nil { + return err } start, err := searchutils.GetTime(r, "start", 0) if err != nil { @@ -306,7 +279,11 @@ func ExportHandler(startTime time.Time, w http.ResponseWriter, r *http.Request) if start >= end { end = start + defaultStep } - if err := exportHandler(w, matches, start, end, format, maxRowsPerLine, reduceMemUsage, deadline); err != nil { + etf, err := getEnforcedTagFiltersFromRequest(r) + if err != nil { + return err + } + if err := exportHandler(w, matches, etf, start, end, format, maxRowsPerLine, reduceMemUsage, deadline); err != nil { return fmt.Errorf("error when exporting data for queries=%q on the time range (start=%d, end=%d): %w", matches, start, end, err) } exportDuration.UpdateDuration(startTime) @@ -315,7 +292,7 @@ func ExportHandler(startTime time.Time, w http.ResponseWriter, r *http.Request) var exportDuration = metrics.NewSummary(`vm_request_duration_seconds{path="/api/v1/export"}`) -func exportHandler(w http.ResponseWriter, matches []string, start, end int64, format string, maxRowsPerLine int, reduceMemUsage bool, deadline searchutils.Deadline) error { +func exportHandler(w http.ResponseWriter, matches []string, etf []storage.TagFilter, start, end int64, format string, maxRowsPerLine int, reduceMemUsage bool, deadline searchutils.Deadline) error { writeResponseFunc := WriteExportStdResponse writeLineFunc := func(xb *exportBlock, resultsCh chan<- *quicktemplate.ByteBuffer) { bb := quicktemplate.AcquireByteBuffer() @@ -372,6 +349,8 @@ func exportHandler(w http.ResponseWriter, matches []string, start, end int64, fo if err != nil { return err } + tagFilterss = addEnforcedFiltersToTagFilterss(tagFilterss, etf) + sq := storage.NewSearchQuery(start, end, tagFilterss) w.Header().Set("Content-Type", contentType) bw := bufferedwriter.Get(w) @@ -465,18 +444,14 @@ func DeleteHandler(startTime time.Time, r *http.Request) error { if r.FormValue("start") != "" || r.FormValue("end") != "" { return fmt.Errorf("start and end aren't supported. Remove these args from the query in order to delete all the matching metrics") } - matches := r.Form["match[]"] - if len(matches) == 0 { - return fmt.Errorf("missing `match[]` arg") - } - tagFilterss, err := getTagFilterssFromMatches(matches) + tagFilterss, err := getTagFilterssFromRequest(r) if err != nil { return err } sq := storage.NewSearchQuery(0, 0, tagFilterss) deletedCount, err := netstorage.DeleteSeries(sq) if err != nil { - return fmt.Errorf("cannot delete time series matching %q: %w", matches, err) + return fmt.Errorf("cannot delete time series: %w", err) } if deletedCount > 0 { promql.ResetRollupResultCache() @@ -495,8 +470,12 @@ func LabelValuesHandler(startTime time.Time, labelName string, w http.ResponseWr if err := r.ParseForm(); err != nil { return fmt.Errorf("cannot parse form values: %w", err) } + etf, err := getEnforcedTagFiltersFromRequest(r) + if err != nil { + return err + } var labelValues []string - if len(r.Form["match[]"]) == 0 { + if len(r.Form["match[]"]) == 0 && len(etf) == 0 { if len(r.Form["start"]) == 0 && len(r.Form["end"]) == 0 { var err error labelValues, err = netstorage.GetLabelValues(labelName, deadline) @@ -540,7 +519,7 @@ func LabelValuesHandler(startTime time.Time, labelName string, w http.ResponseWr if err != nil { return err } - labelValues, err = labelValuesWithMatches(labelName, matches, start, end, deadline) + labelValues, err = labelValuesWithMatches(labelName, matches, etf, start, end, deadline) if err != nil { return fmt.Errorf("cannot obtain label values for %q, match[]=%q, start=%d, end=%d: %w", labelName, matches, start, end, err) } @@ -557,10 +536,7 @@ func LabelValuesHandler(startTime time.Time, labelName string, w http.ResponseWr return nil } -func labelValuesWithMatches(labelName string, matches []string, start, end int64, deadline searchutils.Deadline) ([]string, error) { - if len(matches) == 0 { - logger.Panicf("BUG: matches must be non-empty") - } +func labelValuesWithMatches(labelName string, matches []string, etf []storage.TagFilter, start, end int64, deadline searchutils.Deadline) ([]string, error) { tagFilterss, err := getTagFilterssFromMatches(matches) if err != nil { return nil, err @@ -581,6 +557,10 @@ func labelValuesWithMatches(labelName string, matches []string, start, end int64 if start >= end { end = start + defaultStep } + tagFilterss = addEnforcedFiltersToTagFilterss(tagFilterss, etf) + if len(tagFilterss) == 0 { + logger.Panicf("BUG: tagFilterss must be non-empty") + } sq := storage.NewSearchQuery(start, end, tagFilterss) m := make(map[string]struct{}) if end-start > 24*3600*1000 { @@ -705,8 +685,12 @@ func LabelsHandler(startTime time.Time, w http.ResponseWriter, r *http.Request) if err := r.ParseForm(); err != nil { return fmt.Errorf("cannot parse form values: %w", err) } + etf, err := getEnforcedTagFiltersFromRequest(r) + if err != nil { + return err + } var labels []string - if len(r.Form["match[]"]) == 0 { + if len(r.Form["match[]"]) == 0 && len(etf) == 0 { if len(r.Form["start"]) == 0 && len(r.Form["end"]) == 0 { var err error labels, err = netstorage.GetLabels(deadline) @@ -748,7 +732,7 @@ func LabelsHandler(startTime time.Time, w http.ResponseWriter, r *http.Request) if err != nil { return err } - labels, err = labelsWithMatches(matches, start, end, deadline) + labels, err = labelsWithMatches(matches, etf, start, end, deadline) if err != nil { return fmt.Errorf("cannot obtain labels for match[]=%q, start=%d, end=%d: %w", matches, start, end, err) } @@ -765,10 +749,7 @@ func LabelsHandler(startTime time.Time, w http.ResponseWriter, r *http.Request) return nil } -func labelsWithMatches(matches []string, start, end int64, deadline searchutils.Deadline) ([]string, error) { - if len(matches) == 0 { - logger.Panicf("BUG: matches must be non-empty") - } +func labelsWithMatches(matches []string, etf []storage.TagFilter, start, end int64, deadline searchutils.Deadline) ([]string, error) { tagFilterss, err := getTagFilterssFromMatches(matches) if err != nil { return nil, err @@ -776,6 +757,10 @@ func labelsWithMatches(matches []string, start, end int64, deadline searchutils. if start >= end { end = start + defaultStep } + tagFilterss = addEnforcedFiltersToTagFilterss(tagFilterss, etf) + if len(tagFilterss) == 0 { + logger.Panicf("BUG: tagFilterss must be non-empty") + } sq := storage.NewSearchQuery(start, end, tagFilterss) m := make(map[string]struct{}) if end-start > 24*3600*1000 { @@ -849,10 +834,6 @@ func SeriesHandler(startTime time.Time, w http.ResponseWriter, r *http.Request) if err := r.ParseForm(); err != nil { return fmt.Errorf("cannot parse form values: %w", err) } - matches := r.Form["match[]"] - if len(matches) == 0 { - return fmt.Errorf("missing `match[]` arg") - } end, err := searchutils.GetTime(r, "end", ct) if err != nil { return err @@ -868,7 +849,7 @@ func SeriesHandler(startTime time.Time, w http.ResponseWriter, r *http.Request) } deadline := searchutils.GetDeadlineForQuery(r, startTime) - tagFilterss, err := getTagFilterssFromMatches(matches) + tagFilterss, err := getTagFilterssFromRequest(r) if err != nil { return err } @@ -969,6 +950,10 @@ func QueryHandler(startTime time.Time, w http.ResponseWriter, r *http.Request) e if len(query) > maxQueryLen.N { return fmt.Errorf("too long query; got %d bytes; mustn't exceed `-search.maxQueryLen=%d` bytes", len(query), maxQueryLen.N) } + etf, err := getEnforcedTagFiltersFromRequest(r) + if err != nil { + return err + } if childQuery, windowStr, offsetStr := promql.IsMetricSelectorWithRollup(query); childQuery != "" { window, err := parsePositiveDuration(windowStr, step) if err != nil { @@ -981,7 +966,7 @@ func QueryHandler(startTime time.Time, w http.ResponseWriter, r *http.Request) e start -= offset end := start start = end - window - if err := exportHandler(w, []string{childQuery}, start, end, "promapi", 0, false, deadline); err != nil { + if err := exportHandler(w, []string{childQuery}, etf, start, end, "promapi", 0, false, deadline); err != nil { return fmt.Errorf("error when exporting data for query=%q on the time range (start=%d, end=%d): %w", childQuery, start, end, err) } queryDuration.UpdateDuration(startTime) @@ -1006,7 +991,7 @@ func QueryHandler(startTime time.Time, w http.ResponseWriter, r *http.Request) e start -= offset end := start start = end - window - if err := queryRangeHandler(startTime, w, childQuery, start, end, step, r, ct); err != nil { + if err := queryRangeHandler(startTime, w, childQuery, start, end, step, r, ct, etf); err != nil { return fmt.Errorf("error when executing query=%q on the time range (start=%d, end=%d, step=%d): %w", childQuery, start, end, step, err) } queryDuration.UpdateDuration(startTime) @@ -1024,12 +1009,13 @@ func QueryHandler(startTime time.Time, w http.ResponseWriter, r *http.Request) e queryOffset = 0 } ec := promql.EvalConfig{ - Start: start, - End: start, - Step: step, - QuotedRemoteAddr: httpserver.GetQuotedRemoteAddr(r), - Deadline: deadline, - LookbackDelta: lookbackDelta, + Start: start, + End: start, + Step: step, + QuotedRemoteAddr: httpserver.GetQuotedRemoteAddr(r), + Deadline: deadline, + LookbackDelta: lookbackDelta, + EnforcedTagFilters: etf, } result, err := promql.Exec(&ec, query, true) if err != nil { @@ -1092,14 +1078,18 @@ func QueryRangeHandler(startTime time.Time, w http.ResponseWriter, r *http.Reque if err != nil { return err } - if err := queryRangeHandler(startTime, w, query, start, end, step, r, ct); err != nil { + etf, err := getEnforcedTagFiltersFromRequest(r) + if err != nil { + return err + } + if err := queryRangeHandler(startTime, w, query, start, end, step, r, ct, etf); err != nil { return fmt.Errorf("error when executing query=%q on the time range (start=%d, end=%d, step=%d): %w", query, start, end, step, err) } queryRangeDuration.UpdateDuration(startTime) return nil } -func queryRangeHandler(startTime time.Time, w http.ResponseWriter, query string, start, end, step int64, r *http.Request, ct int64) error { +func queryRangeHandler(startTime time.Time, w http.ResponseWriter, query string, start, end, step int64, r *http.Request, ct int64, etf []storage.TagFilter) error { deadline := searchutils.GetDeadlineForQuery(r, startTime) mayCache := !searchutils.GetBool(r, "nocache") lookbackDelta, err := getMaxLookback(r) @@ -1122,13 +1112,14 @@ func queryRangeHandler(startTime time.Time, w http.ResponseWriter, query string, } ec := promql.EvalConfig{ - Start: start, - End: end, - Step: step, - QuotedRemoteAddr: httpserver.GetQuotedRemoteAddr(r), - Deadline: deadline, - MayCache: mayCache, - LookbackDelta: lookbackDelta, + Start: start, + End: end, + Step: step, + QuotedRemoteAddr: httpserver.GetQuotedRemoteAddr(r), + Deadline: deadline, + MayCache: mayCache, + LookbackDelta: lookbackDelta, + EnforcedTagFilters: etf, } result, err := promql.Exec(&ec, query, false) if err != nil { @@ -1236,6 +1227,38 @@ func getMaxLookback(r *http.Request) (int64, error) { return searchutils.GetDuration(r, "max_lookback", d) } +func getEnforcedTagFiltersFromRequest(r *http.Request) ([]storage.TagFilter, error) { + // fast path. + extraLabels := r.Form["extra_label"] + if len(extraLabels) == 0 { + return nil, nil + } + tagFilters := make([]storage.TagFilter, 0, len(extraLabels)) + for _, match := range extraLabels { + tmp := strings.SplitN(match, "=", 2) + if len(tmp) != 2 { + return nil, fmt.Errorf("`extra_label` query arg must have the format `name=value`; got %q", match) + } + tagFilters = append(tagFilters, storage.TagFilter{ + Key: []byte(tmp[0]), + Value: []byte(tmp[1]), + }) + } + return tagFilters, nil +} + +func addEnforcedFiltersToTagFilterss(dstTfss [][]storage.TagFilter, enforcedFilters []storage.TagFilter) [][]storage.TagFilter { + if len(dstTfss) == 0 { + return [][]storage.TagFilter{ + enforcedFilters, + } + } + for i := range dstTfss { + dstTfss[i] = append(dstTfss[i], enforcedFilters...) + } + return dstTfss +} + func getTagFilterssFromMatches(matches []string) ([][]storage.TagFilter, error) { tagFilterss := make([][]storage.TagFilter, 0, len(matches)) for _, match := range matches { @@ -1248,6 +1271,35 @@ func getTagFilterssFromMatches(matches []string) ([][]storage.TagFilter, error) return tagFilterss, nil } +func getTagFilterssFromRequest(r *http.Request) ([][]storage.TagFilter, error) { + matches, err := getMatchesFromRequest(r) + if err != nil { + return nil, err + } + tagFilterss, err := getTagFilterssFromMatches(matches) + if err != nil { + return nil, err + } + etf, err := getEnforcedTagFiltersFromRequest(r) + if err != nil { + return nil, err + } + tagFilterss = addEnforcedFiltersToTagFilterss(tagFilterss, etf) + return tagFilterss, nil +} + +func getMatchesFromRequest(r *http.Request) ([]string, error) { + matches := r.Form["match[]"] + if len(matches) > 0 { + return matches, nil + } + match := r.Form.Get("match") + if len(match) == 0 { + return nil, fmt.Errorf("missing `match[]` query arg") + } + return []string{match}, nil +} + func getLatencyOffsetMilliseconds() int64 { d := latencyOffset.Milliseconds() if d <= 1000 { diff --git a/app/vmselect/prometheus/prometheus_test.go b/app/vmselect/prometheus/prometheus_test.go index 70d2b06f2..d376a928b 100644 --- a/app/vmselect/prometheus/prometheus_test.go +++ b/app/vmselect/prometheus/prometheus_test.go @@ -2,10 +2,12 @@ package prometheus import ( "math" + "net/http" "reflect" "testing" "github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/netstorage" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/storage" ) func TestRemoveEmptyValuesAndTimeseries(t *testing.T) { @@ -195,3 +197,75 @@ func TestAdjustLastPoints(t *testing.T) { }, }) } + +// helper for tests +func tfFromKV(k, v string) storage.TagFilter { + return storage.TagFilter{ + Key: []byte(k), + Value: []byte(v), + } +} + +func Test_addEnforcedFiltersToTagFilterss(t *testing.T) { + f := func(t *testing.T, dstTfss [][]storage.TagFilter, enforcedFilters []storage.TagFilter, want [][]storage.TagFilter) { + t.Helper() + got := addEnforcedFiltersToTagFilterss(dstTfss, enforcedFilters) + if !reflect.DeepEqual(got, want) { + t.Fatalf("unxpected result for addEnforcedFiltersToTagFilterss, \ngot: %v,\n want: %v", want, got) + } + } + f(t, [][]storage.TagFilter{{tfFromKV("label", "value")}}, + nil, + [][]storage.TagFilter{{tfFromKV("label", "value")}}) + + f(t, nil, + []storage.TagFilter{tfFromKV("ext-label", "ext-value")}, + [][]storage.TagFilter{{tfFromKV("ext-label", "ext-value")}}) + + f(t, [][]storage.TagFilter{ + {tfFromKV("l1", "v1")}, + {tfFromKV("l2", "v2")}, + }, + []storage.TagFilter{tfFromKV("ext-l1", "v2")}, + [][]storage.TagFilter{ + {tfFromKV("l1", "v1"), tfFromKV("ext-l1", "v2")}, + {tfFromKV("l2", "v2"), tfFromKV("ext-l1", "v2")}, + }) +} + +func Test_getEnforcedTagFiltersFromRequest(t *testing.T) { + httpReqWithForm := func(tfs []string) *http.Request { + return &http.Request{ + Form: map[string][]string{ + "extra_label": tfs, + }, + } + } + f := func(t *testing.T, r *http.Request, want []storage.TagFilter, wantErr bool) { + t.Helper() + got, err := getEnforcedTagFiltersFromRequest(r) + if (err != nil) != wantErr { + t.Fatalf("unexpected error: %v", err) + } + if !reflect.DeepEqual(got, want) { + t.Fatalf("unxpected result for getEnforcedTagFiltersFromRequest, \ngot: %v,\n want: %v", want, got) + } + } + + f(t, httpReqWithForm([]string{"label=value"}), + []storage.TagFilter{ + tfFromKV("label", "value"), + }, + false) + + f(t, httpReqWithForm([]string{"job=vmagent", "dc=gce"}), + []storage.TagFilter{tfFromKV("job", "vmagent"), tfFromKV("dc", "gce")}, + false, + ) + f(t, httpReqWithForm([]string{"bad_filter"}), + nil, + true, + ) + f(t, &http.Request{}, + nil, false) +} diff --git a/app/vmselect/promql/eval.go b/app/vmselect/promql/eval.go index e5735c72e..d3b197788 100644 --- a/app/vmselect/promql/eval.go +++ b/app/vmselect/promql/eval.go @@ -100,6 +100,9 @@ type EvalConfig struct { timestamps []int64 timestampsOnce sync.Once + + // EnforcedTagFilters used for apply additional label filters to query. + EnforcedTagFilters []storage.TagFilter } // newEvalConfig returns new EvalConfig copy from src. @@ -111,6 +114,7 @@ func newEvalConfig(src *EvalConfig) *EvalConfig { ec.Deadline = src.Deadline ec.MayCache = src.MayCache ec.LookbackDelta = src.LookbackDelta + ec.EnforcedTagFilters = src.EnforcedTagFilters // do not copy src.timestamps - they must be generated again. return &ec @@ -647,6 +651,8 @@ func evalRollupFuncWithMetricExpr(ec *EvalConfig, name string, rf rollupFunc, // Fetch the remaining part of the result. tfs := toTagFilters(me.LabelFilters) + // append external filters. + tfs = append(tfs, ec.EnforcedTagFilters...) minTimestamp := start - maxSilenceInterval if window > ec.Step { minTimestamp -= window diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md index fc9f1f7bd..5a24263e7 100644 --- a/docs/CHANGELOG.md +++ b/docs/CHANGELOG.md @@ -2,13 +2,22 @@ # tip +* FEATURE: added [vmctl tool](https://victoriametrics.github.io/vmctl.html) to VictoriaMetrics release process. Now it is packaged in `vmutils-*.tar.gz` archive on [the releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases). Source code for `vmctl` tool has been moved from [github.com/VictoriaMetrics/vmctl](https://github.com/VictoriaMetrics/vmctl) to [github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/master/app/vmctl). * FEATURE: added `-loggerTimezone` command-line flag for adjusting time zone for timestamps in log messages. By default UTC is used. -* FEATURE: added `-search.maxStepForPointsAdjustment` command-line flag, which can be used for disabling adjustment for points returned `/api/v1/query_range` handler if such points have timestamps closer than `-search.latencyOffset` to the current time. Such points may contain incomplete data, so they are substituted by the previous values for `step` query args smaller than one minute by default. -* FEATURE: vmalert: added `-datasource.queryStep` command-line flag for passing `step` query arg to `/api/v1/query` endpoint. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1025 +* FEATURE: added `-search.maxStepForPointsAdjustment` command-line flag, which can be used for disabling adjustment for points returned by `/api/v1/query_range` handler if such points have timestamps closer than `-search.latencyOffset` to the current time. Such points may contain incomplete data, so they are substituted by the previous values for `step` query args smaller than one minute by default. +* FEATURE: vmselect: added ability to set additional label filters, which must be applied during queries. Such label filters can be set via optional `extra_label` query arg, which is accepted by [querying API](https://victoriametrics.github.io/#prometheus-querying-api-usage) handlers. For example, the request to `/api/v1/query_range?extra_label=tenant_id=123&query=` adds `{tenant_id="123"}` label filter to the given ``. It is expected that the `extra_label` query arg is automatically set by auth proxy sitting +in front of VictoriaMetrics. [Contact us](mailto:sales@victoriametrics.com) if you need assistance with such a proxy. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1021 . +* FEATURE: vmalert: added `-datasource.queryStep` command-line flag for passing optional `step` query arg to `/api/v1/query` endpoint. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1025 +* FEATURE: vmalert: added ability to query Graphite datasource when evaluating alerting and recording rules. See [these docs](https://victoriametrics.github.io/vmalert.html#graphite) for details. +* FEATURE: vmagent: added `-remoteWrite.roundDigits` command-line option for rounding metric values to the given number of decimal digits after the point before sending the metric to the corresponding `-remoteWrite.url`. This option can be used for improving data compression on the remote storage, because values with lower number of decimal digits can be compressed better than values with bigger number of decimal digits. * FEATURE: vmagent: added `-remoteWrite.rateLimit` command-line flag for limiting data transfer rate to `-remoteWrite.url`. This may be useful when big amounts of buffered data is sent after temporarily unavailability of the remote storage. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1035 -* FEATURE: vmagent: export `vm_promscrape_scrapes_failed_per_url_total` and `vm_promscrape_scrapes_skipped_by_sample_limit_per_url_total` counters, which may help identifying improperly working scrape targets. - -* BUGFIX: vmagent: reduce the HTTP reconnection rate to scrape targets. Previously vmagent could errorneusly close HTTP keep-alive connections more often than needed. +* FEATURE: vmagent: export the following additional metrics, which may be useful during troubleshooting: + - `vm_promscrape_scrapes_failed_per_url_total` + - `vm_promscrape_scrapes_skipped_by_sample_limit_per_url_total` + - `vm_promscrape_discovery_requests_total` + - `vm_promscrape_discovery_retries_total` + - `vm_promscrape_scrape_retries_total` +* BUGFIX: vmagent: reduce HTTP reconnection rate for scrape targets. Previously vmagent could errorneusly close HTTP keep-alive connections more frequently than needed. * BUGFIX: vmagent: retry scrape and service discovery requests when the remote server closes HTTP keep-alive connection. Previously `disable_keepalive: true` option could be used under `scrape_configs` section when working with such servers. diff --git a/docs/Cluster-VictoriaMetrics.md b/docs/Cluster-VictoriaMetrics.md index da8df3bf3..68d0ccd5e 100644 --- a/docs/Cluster-VictoriaMetrics.md +++ b/docs/Cluster-VictoriaMetrics.md @@ -269,7 +269,7 @@ the update process. See [cluster availability](#cluster-availability) section fo * The cluster remains available if at least a single `vmstorage` node exists: - `vminsert` re-routes incoming data from unavailable `vmstorage` nodes to healthy `vmstorage` nodes - - `vmselect` continues serving partial responses if at least a single `vmstorage` node is available. + - `vmselect` continues serving partial responses if at least a single `vmstorage` node is available. If consistency over availability is preferred, then either pass `-search.denyPartialResponse` command-line flag to `vmselect` or pass `deny_partial_response=1` query arg in requests to `vmselect`. Data replication can be used for increasing storage durability. See [these docs](#replication-and-data-safety) for details. diff --git a/docs/Single-server-VictoriaMetrics.md b/docs/Single-server-VictoriaMetrics.md index e60d13534..d2e3d8798 100644 --- a/docs/Single-server-VictoriaMetrics.md +++ b/docs/Single-server-VictoriaMetrics.md @@ -154,6 +154,7 @@ Alphabetically sorted links to case studies: * [Tuning](#tuning) * [Monitoring](#monitoring) * [Troubleshooting](#troubleshooting) +* [Data migration](#data-migration) * [Backfilling](#backfilling) * [Data updates](#data-updates) * [Replication](#replication) @@ -547,10 +548,15 @@ These handlers can be queried from Prometheus-compatible clients such as Grafana ### Prometheus querying API enhancements -Additionally to unix timestamps and [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) VictoriaMetrics accepts relative times in `time`, `start` and `end` query args. +VictoriaMetrics accepts optional `extra_label==` query arg, which can be used for enforcing additional label filters for queries. For example, +`/api/v1/query_range?extra_label=user_id=123&query=` would automatically add `{user_id="123"}` label filter to the given ``. This functionality can be used +for limiting the scope of time series visible to the given tenant. It is expected that the `extra_label` query arg is automatically set by auth proxy sitting +in front of VictoriaMetrics. [Contact us](mailto:sales@victoriametrics.com) if you need assistance with such a proxy. + +VictoriaMetrics accepts relative times in `time`, `start` and `end` query args additionally to unix timestamps and [RFC3339](https://www.ietf.org/rfc/rfc3339.txt). For example, the following query would return data for the last 30 minutes: `/api/v1/query_range?start=-30m&query=...`. -By default, VictoriaMetrics returns time series for the last 5 minutes from /api/v1/series, while the Prometheus API defaults to all time. Use `start` and `end` to select a different time range. +By default, VictoriaMetrics returns time series for the last 5 minutes from `/api/v1/series`, while the Prometheus API defaults to all time. Use `start` and `end` to select a different time range. VictoriaMetrics accepts additional args for `/api/v1/labels` and `/api/v1/label/.../values` handlers. See [this feature request](https://github.com/prometheus/prometheus/issues/6178) for details: @@ -1353,6 +1359,17 @@ See the example of alerting rules for VM components [here](https://github.com/Vi * VictoriaMetrics ignores `NaN` values during data ingestion. +## Data migration + +Use [vmctl](https://victoriametrics.github.io/vmctl.html) for data migration. It supports the following data migration types: + +* From Prometheus to VictoriaMetrics +* From InfluxDB to VictoriaMetrics +* From VictoriaMetrics to VictoriaMetrics + +See [vmctl docs](https://victoriametrics.github.io/vmctl.html) for more details. + + ## Backfilling VictoriaMetrics accepts historical data in arbitrary order of time via [any supported ingestion method](#how-to-import-time-series-data). @@ -1420,7 +1437,6 @@ The collected profiles may be analyzed with [go tool pprof](https://github.com/g * [Helm charts for single-node and cluster versions of VictoriaMetrics](https://github.com/VictoriaMetrics/helm-charts). * [Kubernetes operator for VictoriaMetrics](https://github.com/VictoriaMetrics/operator). -* [vmctl tool for data migration to VictoriaMetrics](https://github.com/VictoriaMetrics/vmctl). * [netdata](https://github.com/netdata/netdata) can push data into VictoriaMetrics via `Prometheus remote_write API`. See [these docs](https://github.com/netdata/netdata#integrations). * [go-graphite/carbonapi](https://github.com/go-graphite/carbonapi) can use VictoriaMetrics as time series backend. diff --git a/docs/vmagent.md b/docs/vmagent.md index 0c5eb9e58..d2d48e5ec 100644 --- a/docs/vmagent.md +++ b/docs/vmagent.md @@ -7,7 +7,7 @@ or any other Prometheus-compatible storage system that supports the `remote_writ vmagent -### Motivation +## Motivation While VictoriaMetrics provides an efficient solution to store and observe metrics, our users needed something fast and RAM friendly to scrape metrics from Prometheus-compatible exporters to VictoriaMetrics. @@ -15,7 +15,7 @@ Also, we found that users’ infrastructure are snowflakes - no two are alike, a to `vmagent` (like the ability to push metrics instead of pulling them). We did our best and plan to do even more. -### Features +## Features * Can be used as drop-in replacement for Prometheus for scraping targets such as [node_exporter](https://github.com/prometheus/node_exporter). See [Quick Start](#quick-start) for details. @@ -36,7 +36,7 @@ to `vmagent` (like the ability to push metrics instead of pulling them). We did * Uses lower amounts of RAM, CPU, disk IO and network bandwidth compared to Prometheus. -### Quick Start +## Quick Start Just download `vmutils-*` archive from [releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases), unpack it and pass the following flags to `vmagent` binary in order to start scraping Prometheus targets: @@ -63,7 +63,7 @@ Then send Influx data to `http://vmagent-host:8429`. See [these docs](https://vi Pass `-help` to `vmagent` in order to see the full list of supported command-line flags with their descriptions. -### Configuration update +## Configuration update `vmagent` should be restarted in order to update config options set via command-line args. @@ -79,10 +79,10 @@ Pass `-help` to `vmagent` in order to see the full list of supported command-lin There is also `-promscrape.configCheckInterval` command-line option, which can be used for automatic reloading configs from updated `-promscrape.config` file. -### Use cases +## Use cases -#### IoT and Edge monitoring +### IoT and Edge monitoring `vmagent` can run and collect metrics in IoT and industrial networks with unreliable or scheduled connections to the remote storage. It buffers the collected data in local files until the connection to remote storage becomes available and then sends the buffered @@ -93,14 +93,14 @@ The maximum buffer size can be limited with `-remoteWrite.maxDiskUsagePerURL`. See [the corresponding Makefile rules](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/app/vmagent/Makefile) for details. -#### Drop-in replacement for Prometheus +### Drop-in replacement for Prometheus If you use Prometheus only for scraping metrics from various targets and forwarding these metrics to remote storage, then `vmagent` can replace such Prometheus setup. Usually `vmagent` requires lower amounts of RAM, CPU and network bandwidth comparing to Prometheus for such a setup. See [these docs](#how-to-collect-metrics-in-prometheus-format) for details. -#### Replication and high availability +### Replication and high availability `vmagent` replicates the collected metrics among multiple remote storage instances configured via `-remoteWrite.url` args. If a single remote storage instance temporarily is out of service, then the collected data remains available in another remote storage instances. @@ -108,14 +108,14 @@ If a single remote storage instance temporarily is out of service, then the coll Then it sends the buffered data to the remote storage in order to prevent data gaps in the remote storage. -#### Relabeling and filtering +### Relabeling and filtering `vmagent` can add, remove or update labels on the collected data before sending it to remote storage. Additionally, it can remove unwanted samples via Prometheus-like relabeling before sending the collected data to remote storage. See [these docs](#relabeling) for details. -#### Splitting data streams among multiple systems +### Splitting data streams among multiple systems `vmagent` supports splitting the collected data between muliple destinations with the help of `-remoteWrite.urlRelabelConfig`, which is applied independently for each configured `-remoteWrite.url` destination. For instance, it is possible to replicate or split @@ -123,7 +123,7 @@ data among long-term remote storage, short-term remote storage and real-time ana Note that each destination can receive its own subset of the collected data thanks to per-destination relabeling via `-remoteWrite.urlRelabelConfig`. -#### Prometheus remote_write proxy +### Prometheus remote_write proxy `vmagent` may be used as a proxy for Prometheus data sent via Prometheus `remote_write` protocol. It can accept data via `remote_write` API at `/api/v1/write` endpoint, apply relabeling and filtering and then proxy it to another `remote_write` systems. @@ -131,12 +131,12 @@ The `vmagent` can be configured to encrypt the incoming `remote_write` requests Additionally, Basic Auth can be enabled for the incoming `remote_write` requests with `-httpAuth.*` command-line flags. -#### remote_write for clustered version +### remote_write for clustered version Despite `vmagent` can accept data in several supported protocols (OpenTSDB, Influx, Prometheus, Graphite) and scrape data from various targets, writes always peformed in Promethes remote_write protocol. Therefore for clustered version `-remoteWrite.url` command-line flag should be configured as `://:8480/insert//prometheus/api/v1/write` -### How to collect metrics in Prometheus format +## How to collect metrics in Prometheus format Pass the path to `prometheus.yml` to `-promscrape.config` command-line flag. `vmagent` takes into account the following sections from [Prometheus config file](https://prometheus.io/docs/prometheus/latest/configuration/configuration/): @@ -192,7 +192,7 @@ entries to 60s. Run `vmagent -help` in order to see default values for `-promscr The file pointed by `-promscrape.config` may contain `%{ENV_VAR}` placeholders, which are substituted by the corresponding `ENV_VAR` environment variable values. -### Adding labels to metrics +## Adding labels to metrics Labels can be added to metrics via the following mechanisms: @@ -200,7 +200,7 @@ Labels can be added to metrics via the following mechanisms: * Via `-remoteWrite.label` command-line flag. These labels are added to all the collected metrics before sending them to `-remoteWrite.url`. -### Relabeling +## Relabeling `vmagent` supports [Prometheus relabeling](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config). Additionally it provides the following extra actions: @@ -227,7 +227,7 @@ Read more about relabeling in the following articles: * [relabel_configs vs metric_relabel_configs](https://www.robustperception.io/relabel_configs-vs-metric_relabel_configs) -### Monitoring +## Monitoring `vmagent` exports various metrics in Prometheus exposition format at `http://vmagent-host:8429/metrics` page. It is recommended setting up regular scraping of this page either via `vmagent` itself or via Prometheus, so the exported metrics could be analyzed later. @@ -246,7 +246,7 @@ This information may be useful for debugging target relabeling. It may be useful for performing `vmagent` rolling update without scrape loss. -### Troubleshooting +## Troubleshooting * It is recommended [setting up the official Grafana dashboard](#monitoring) in order to monitor `vmagent` state. @@ -322,24 +322,24 @@ It may be useful for performing `vmagent` rolling update without scrape loss. ``` -### How to build from sources +## How to build from sources It is recommended using [binary releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) - `vmagent` is located in `vmutils-*` archives there. -#### Development build +### Development build 1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.13. 2. Run `make vmagent` from the root folder of the repository. It builds `vmagent` binary and puts it into the `bin` folder. -#### Production build +### Production build 1. [Install docker](https://docs.docker.com/install/). 2. Run `make vmagent-prod` from the root folder of the repository. It builds `vmagent-prod` binary and puts it into the `bin` folder. -#### Building docker images +### Building docker images Run `make package-vmagent`. It builds `victoriametrics/vmagent:` docker image locally. `` is auto-generated image tag, which depends on source code in the repository. @@ -352,24 +352,24 @@ by setting it via `` environment variable. For example, the followin ROOT_IMAGE=scratch make package-vmagent ``` -#### ARM build +### ARM build ARM build may run on Raspberry Pi or on [energy-efficient ARM servers](https://blog.cloudflare.com/arm-takes-wing/). -#### Development ARM build +### Development ARM build 1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.13. 2. Run `make vmagent-arm` or `make vmagent-arm64` from the root folder of the repository. It builds `vmagent-arm` or `vmagent-arm64` binary respectively and puts it into the `bin` folder. -#### Production ARM build +### Production ARM build 1. [Install docker](https://docs.docker.com/install/). 2. Run `make vmagent-arm-prod` or `make vmagent-arm64-prod` from the root folder of the repository. It builds `vmagent-arm-prod` or `vmagent-arm64-prod` binary respectively and puts it into the `bin` folder. -### Profiling +## Profiling `vmagent` provides handlers for collecting the following [Go profiles](https://blog.golang.org/profiling-go-programs): diff --git a/docs/vmalert.md b/docs/vmalert.md index aba8231d6..1d0e785e0 100644 --- a/docs/vmalert.md +++ b/docs/vmalert.md @@ -12,6 +12,7 @@ rules against configured address. support; * Integration with [Alertmanager](https://github.com/prometheus/alertmanager); * Keeps the alerts [state on restarts](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/master/app/vmalert#alerts-state-on-restarts); +* Graphite datasource can be used for alerting and recording rules. See [these docs](#graphite) for details. * Lightweight without extra dependencies. ### Limitations: @@ -105,7 +106,13 @@ The syntax for alerting rule is following: # The name of the alert. Must be a valid metric name. alert: -# The MetricsQL expression to evaluate. +# Optional type for the rule. Supported values: "graphite", "prometheus". +# By default "prometheus" rule type is used. +[ type: ] + +# The expression to evaluate. The expression language depends on the type value. +# By default MetricsQL expression is used. If type="graphite", then the expression +# must contain valid Graphite expression. expr: # Alerts are considered firing once they have been returned for this long. @@ -128,7 +135,13 @@ The syntax for recording rules is following: # The name of the time series to output to. Must be a valid metric name. record: -# The MetricsQL expression to evaluate. +# Optional type for the rule. Supported values: "graphite", "prometheus". +# By default "prometheus" rule type is used. +[ type: ] + +# The expression to evaluate. The expression language depends on the type value. +# By default MetricsQL expression is used. If type="graphite", then the expression +# must contain valid Graphite expression. expr: # Labels to add or overwrite before storing the result. @@ -166,6 +179,13 @@ Used as alert source in AlertManager. * `http:///-/reload` - hot configuration reload. +### Graphite + +vmalert sends requests to `<-datasource.url>/render?format=json` during evaluation of alerting and recording rules +if the corresponding rule contains `type: "graphite"` config option. It is expected that the `<-datasource.url>/render` +implements [Graphite Render API](https://graphite.readthedocs.io/en/stable/render_api.html) for `format=json`. + + ### Configuration The shortlist of configuration flags is the following: diff --git a/docs/vmauth.md b/docs/vmauth.md index 017a9278e..aeed7f1e2 100644 --- a/docs/vmauth.md +++ b/docs/vmauth.md @@ -5,7 +5,7 @@ It reads username and password from [Basic Auth headers](https://en.wikipedia.or matches them against configs pointed by `-auth.config` command-line flag and proxies incoming HTTP requests to the configured per-user `url_prefix` on successful match. -### Quick start +## Quick start Just download `vmutils-*` archive from [releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases), unpack it and pass the following flag to `vmauth` binary in order to start authorizing and routing requests: @@ -26,7 +26,7 @@ Pass `-help` to `vmauth` in order to see all the supported command-line flags wi Feel free [contacting us](mailto:info@victoriametrics.com) if you need customized auth proxy for VictoriaMetrics with the support of LDAP, SSO, RBAC, SAML, accounting, limits, etc. -### Auth config +## Auth config Auth config is represented in the following simple `yml` format: @@ -68,7 +68,7 @@ The config may contain `%{ENV_VAR}` placeholders, which are substituted by the c This may be useful for passing secrets to the config. -### Security +## Security Do not transfer Basic Auth headers in plaintext over untrusted networks. Enable https. This can be done by passing the following `-tls*` command-line flags to `vmauth`: @@ -84,30 +84,30 @@ Do not transfer Basic Auth headers in plaintext over untrusted networks. Enable Alternatively, [https termination proxy](https://en.wikipedia.org/wiki/TLS_termination_proxy) may be put in front of `vmauth`. -### Monitoring +## Monitoring `vmauth` exports various metrics in Prometheus exposition format at `http://vmauth-host:8427/metrics` page. It is recommended setting up regular scraping of this page either via [vmagent](https://victoriametrics.github.io/vmagent.html) or via Prometheus, so the exported metrics could be analyzed later. -### How to build from sources +## How to build from sources It is recommended using [binary releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) - `vmauth` is located in `vmutils-*` archives there. -#### Development build +### Development build 1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.13. 2. Run `make vmauth` from the root folder of the repository. It builds `vmauth` binary and puts it into the `bin` folder. -#### Production build +### Production build 1. [Install docker](https://docs.docker.com/install/). 2. Run `make vmauth-prod` from the root folder of the repository. It builds `vmauth-prod` binary and puts it into the `bin` folder. -#### Building docker images +### Building docker images Run `make package-vmauth`. It builds `victoriametrics/vmauth:` docker image locally. `` is auto-generated image tag, which depends on source code in the repository. @@ -121,7 +121,7 @@ ROOT_IMAGE=scratch make package-vmauth ``` -### Profiling +## Profiling `vmauth` provides handlers for collecting the following [Go profiles](https://blog.golang.org/profiling-go-programs): @@ -142,7 +142,7 @@ The command for collecting CPU profile waits for 30 seconds before returning. The collected profiles may be analyzed with [go tool pprof](https://github.com/google/pprof). -### Advanced usage +## Advanced usage Pass `-help` command-line arg to `vmauth` in order to see all the configuration options: diff --git a/docs/vmbackup.md b/docs/vmbackup.md index e3ae3f22b..8f62311c2 100644 --- a/docs/vmbackup.md +++ b/docs/vmbackup.md @@ -23,9 +23,9 @@ See also [vmbackuper](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/ creation of hourly, daily, weekly and monthly backups. -### Use cases +## Use cases -#### Regular backups +### Regular backups Regular backup can be performed with the following command: @@ -40,7 +40,7 @@ vmbackup -storageDataPath= -snapshotName=` is the destination path where new backup will be placed. -#### Regular backups with server-side copy from existing backup +### Regular backups with server-side copy from existing backup If the destination GCS bucket already contains the previous backup at `-origin` path, then new backup can be sped up with the following command: @@ -52,7 +52,7 @@ vmbackup -storageDataPath= -snapshotName= -snapshotName=` docker image locally. `` is auto-generated image tag, which depends on source code in the repository. diff --git a/docs/vmctl.md b/docs/vmctl.md new file mode 100644 index 000000000..52563a0f7 --- /dev/null +++ b/docs/vmctl.md @@ -0,0 +1,473 @@ +# vmctl - Victoria metrics command-line tool + +Features: +- [x] Prometheus: migrate data from Prometheus to VictoriaMetrics using snapshot API +- [x] Thanos: migrate data from Thanos to VictoriaMetrics +- [ ] ~~Prometheus: migrate data from Prometheus to VictoriaMetrics by query~~(discarded) +- [x] InfluxDB: migrate data from InfluxDB to VictoriaMetrics +- [ ] Storage Management: data re-balancing between nodes + +# Table of contents + +* [Articles](#articles) +* [How to build](#how-to-build) +* [Migrating data from InfluxDB 1.x](#migrating-data-from-influxdb-1x) + * [Data mapping](#data-mapping) + * [Configuration](#configuration) + * [Filtering](#filtering) +* [Migrating data from InfluxDB 2.x](#migrating-data-from-influxdb-2x) +* [Migrating data from Prometheus](#migrating-data-from-prometheus) + * [Data mapping](#data-mapping-1) + * [Configuration](#configuration-1) + * [Filtering](#filtering-1) +* [Migrating data from Thanos](#migrating-data-from-thanos) + * [Current data](#current-data) + * [Historical data](#historical-data) +* [Migrating data from VictoriaMetrics](#migrating-data-from-victoriametrics) + * [Native protocol](#native-protocol) +* [Tuning](#tuning) + * [Influx mode](#influx-mode) + * [Prometheus mode](#prometheus-mode) + * [VictoriaMetrics importer](#victoriametrics-importer) + * [Importer stats](#importer-stats) +* [Significant figures](#significant-figures) +* [Adding extra labels](#adding-extra-labels) + + +## Articles + +* [How to migrate data from Prometheus](https://medium.com/@romanhavronenko/victoriametrics-how-to-migrate-data-from-prometheus-d44a6728f043) +* [How to migrate data from Prometheus. Filtering and modifying time series](https://medium.com/@romanhavronenko/victoriametrics-how-to-migrate-data-from-prometheus-filtering-and-modifying-time-series-6d40cea4bf21) + +## How to build + +It is recommended using [binary releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) - `vmctl` is located in `vmutils-*` archives there. + + +### Development build + +1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.13. +2. Run `make vmctl` from the root folder of the repository. + It builds `vmctl` binary and puts it into the `bin` folder. + +### Production build + +1. [Install docker](https://docs.docker.com/install/). +2. Run `make vmctl-prod` from the root folder of the repository. + It builds `vmctl-prod` binary and puts it into the `bin` folder. + +### Building docker images + +Run `make package-vmctl`. It builds `victoriametrics/vmctl:` docker image locally. +`` is auto-generated image tag, which depends on source code in the repository. +The `` may be manually set via `PKG_TAG=foobar make package-vmctl`. + +The base docker image is [alpine](https://hub.docker.com/_/alpine) but it is possible to use any other base image +by setting it via `` environment variable. For example, the following command builds the image on top of [scratch](https://hub.docker.com/_/scratch) image: + +```bash +ROOT_IMAGE=scratch make package-vmctl +``` + +### ARM build + +ARM build may run on Raspberry Pi or on [energy-efficient ARM servers](https://blog.cloudflare.com/arm-takes-wing/). + +#### Development ARM build + +1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.13. +2. Run `make vmctl-arm` or `make vmctl-arm64` from the root folder of the repository. + It builds `vmctl-arm` or `vmctl-arm64` binary respectively and puts it into the `bin` folder. + +#### Production ARM build + +1. [Install docker](https://docs.docker.com/install/). +2. Run `make vmctl-arm-prod` or `make vmctl-arm64-prod` from the root folder of the repository. + It builds `vmctl-arm-prod` or `vmctl-arm64-prod` binary respectively and puts it into the `bin` folder. + + +## Migrating data from InfluxDB (1.x) + +`vmctl` supports the `influx` mode to migrate data from InfluxDB to VictoriaMetrics time-series database. + +See `./vmctl influx --help` for details and full list of flags. + +To use migration tool please specify the InfluxDB address `--influx-addr`, the database `--influx-database` and VictoriaMetrics address `--vm-addr`. +Flag `--vm-addr` for single-node VM is usually equal to `--httpListenAddr`, and for cluster version +is equal to `--httpListenAddr` flag of VMInsert component. Please note, that vmctl performs initial readiness check for the given address +by checking `/health` endpoint. For cluster version it is additionally required to specify the `--vm-account-id` flag. +See more details for cluster version [here](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/cluster). + +As soon as required flags are provided and all endpoints are accessible, `vmctl` will start the InfluxDB scheme exploration. +Basically, it just fetches all fields and timeseries from the provided database and builds up registry of all available timeseries. +Then `vmctl` sends fetch requests for each timeseries to InfluxDB one by one and pass results to VM importer. +VM importer then accumulates received samples in batches and sends import requests to VM. + +The importing process example for local installation of InfluxDB(`http://localhost:8086`) +and single-node VictoriaMetrics(`http://localhost:8428`): +``` +./vmctl influx --influx-database benchmark +InfluxDB import mode +2020/01/18 20:47:11 Exploring scheme for database "benchmark" +2020/01/18 20:47:11 fetching fields: command: "show field keys"; database: "benchmark"; retention: "autogen" +2020/01/18 20:47:11 found 10 fields +2020/01/18 20:47:11 fetching series: command: "show series "; database: "benchmark"; retention: "autogen" +Found 40000 timeseries to import. Continue? [Y/n] y +40000 / 40000 [-----------------------------------------------------------------------------------------------------------------------------------------------] 100.00% 21 p/s +2020/01/18 21:19:00 Import finished! +2020/01/18 21:19:00 VictoriaMetrics importer stats: + idle duration: 13m51.461434876s; + time spent while importing: 17m56.923899847s; + total samples: 345600000; + samples/s: 320914.04; + total bytes: 5.9 GB; + bytes/s: 5.4 MB; + import requests: 40001; +2020/01/18 21:19:00 Total time: 31m48.467044016s +``` + +### Data mapping + +Vmctl maps Influx data the same way as VictoriaMetrics does by using the following rules: + +* `influx-database` arg is mapped into `db` label value unless `db` tag exists in the Influx line. +* Field names are mapped to time series names prefixed with {measurement}{separator} value, +where {separator} equals to _ by default. +It can be changed with `--influx-measurement-field-separator` command-line flag. +* Field values are mapped to time series values. +* Tags are mapped to Prometheus labels format as-is. + +For example, the following Influx line: +``` +foo,tag1=value1,tag2=value2 field1=12,field2=40 +``` + +is converted into the following Prometheus format data points: +``` +foo_field1{tag1="value1", tag2="value2"} 12 +foo_field2{tag1="value1", tag2="value2"} 40 +``` + +### Configuration + +The configuration flags should contain self-explanatory descriptions. + +### Filtering + +The filtering consists of two parts: timeseries and time. +The first step of application is to select all available timeseries +for given database and retention. User may specify additional filtering +condition via `--influx-filter-series` flag. For example: +``` +./vmctl influx --influx-database benchmark \ + --influx-filter-series "on benchmark from cpu where hostname='host_1703'" +InfluxDB import mode +2020/01/26 14:23:29 Exploring scheme for database "benchmark" +2020/01/26 14:23:29 fetching fields: command: "show field keys"; database: "benchmark"; retention: "autogen" +2020/01/26 14:23:29 found 12 fields +2020/01/26 14:23:29 fetching series: command: "show series on benchmark from cpu where hostname='host_1703'"; database: "benchmark"; retention: "autogen" +Found 10 timeseries to import. Continue? [Y/n] +``` +The timeseries select query would be following: + `fetching series: command: "show series on benchmark from cpu where hostname='host_1703'"; database: "benchmark"; retention: "autogen"` + +The second step of filtering is a time filter and it applies when fetching the datapoints from Influx. +Time filtering may be configured with two flags: +* --influx-filter-time-start +* --influx-filter-time-end +Here's an example of importing timeseries for one day only: +`./vmctl influx --influx-database benchmark --influx-filter-series "where hostname='host_1703'" --influx-filter-time-start "2020-01-01T10:07:00Z" --influx-filter-time-end "2020-01-01T15:07:00Z"` + +Please see more about time filtering [here](https://docs.influxdata.com/influxdb/v1.7/query_language/schema_exploration#filter-meta-queries-by-time). + +## Migrating data from InfluxDB (2.x) + +Migrating data from InfluxDB v2.x is not supported yet ([#32](https://github.com/VictoriaMetrics/vmctl/issues/32)). +You may find useful a 3rd party solution for this - https://github.com/jonppe/influx_to_victoriametrics. + + +## Migrating data from Prometheus + +`vmctl` supports the `prometheus` mode for migrating data from Prometheus to VictoriaMetrics time-series database. +Migration is based on reading Prometheus snapshot, which is basically a hard-link to Prometheus data files. + +See `./vmctl prometheus --help` for details and full list of flags. + +To use migration tool please specify the path to Prometheus snapshot `--prom-snapshot` and VictoriaMetrics address `--vm-addr`. +More about Prometheus snapshots may be found [here](https://www.robustperception.io/taking-snapshots-of-prometheus-data). +Flag `--vm-addr` for single-node VM is usually equal to `--httpListenAddr`, and for cluster version +is equal to `--httpListenAddr` flag of VMInsert component. Please note, that vmctl performs initial readiness check for the given address +by checking `/health` endpoint. For cluster version it is additionally required to specify the `--vm-account-id` flag. +See more details for cluster version [here](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/cluster). + +As soon as required flags are provided and all endpoints are accessible, `vmctl` will start the Prometheus snapshot exploration. +Basically, it just fetches all available blocks in provided snapshot and read the metadata. It also does initial filtering by time +if flags `--prom-filter-time-start` or `--prom-filter-time-end` were set. The exploration procedure prints some stats from read blocks. +Please note that stats are not taking into account timeseries or samples filtering. This will be done during importing process. + +The importing process takes the snapshot blocks revealed from Explore procedure and processes them one by one +accumulating timeseries and samples. Please note, that `vmctl` relies on responses from Influx on this stage, +so ensure that Explore queries are executed without errors or limits. Please see this +[issue](https://github.com/VictoriaMetrics/vmctl/issues/30) for details. +The data processed in chunks and then sent to VM. + +The importing process example for local installation of Prometheus +and single-node VictoriaMetrics(`http://localhost:8428`): +``` +./vmctl prometheus --prom-snapshot=/path/to/snapshot \ + --vm-concurrency=1 \ + --vm-batch-size=200000 \ + --prom-concurrency=3 +Prometheus import mode +Prometheus snapshot stats: + blocks found: 14; + blocks skipped: 0; + min time: 1581288163058 (2020-02-09T22:42:43Z); + max time: 1582409128139 (2020-02-22T22:05:28Z); + samples: 32549106; + series: 27289. +Found 14 blocks to import. Continue? [Y/n] y +14 / 14 [-------------------------------------------------------------------------------------------] 100.00% 0 p/s +2020/02/23 15:50:03 Import finished! +2020/02/23 15:50:03 VictoriaMetrics importer stats: + idle duration: 6.152953029s; + time spent while importing: 44.908522491s; + total samples: 32549106; + samples/s: 724786.84; + total bytes: 669.1 MB; + bytes/s: 14.9 MB; + import requests: 323; + import requests retries: 0; +2020/02/23 15:50:03 Total time: 51.077451066s +``` + +### Data mapping + +VictoriaMetrics has very similar data model to Prometheus and supports [RemoteWrite integration](https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage). +So no data changes will be applied. + +### Configuration + +The configuration flags should contain self-explanatory descriptions. + +### Filtering + +The filtering consists of three parts: by timeseries and time. + +Filtering by time may be configured via flags `--prom-filter-time-start` and `--prom-filter-time-end` +in in RFC3339 format. This filter applied twice: to drop blocks out of range and to filter timeseries in blocks with +overlapping time range. + +Example of applying time filter: +``` +./vmctl prometheus --prom-snapshot=/path/to/snapshot \ + --prom-filter-time-start=2020-02-07T00:07:01Z \ + --prom-filter-time-end=2020-02-11T00:07:01Z +Prometheus import mode +Prometheus snapshot stats: + blocks found: 2; + blocks skipped: 12; + min time: 1581288163058 (2020-02-09T22:42:43Z); + max time: 1581328800000 (2020-02-10T10:00:00Z); + samples: 1657698; + series: 3930. +Found 2 blocks to import. Continue? [Y/n] y +``` + +Please notice, that total amount of blocks in provided snapshot is 14, but only 2 of them were in provided +time range. So other 12 blocks were marked as `skipped`. The amount of samples and series is not taken into account, +since this is heavy operation and will be done during import process. + + +Filtering by timeseries is configured with following flags: +* `--prom-filter-label` - the label name, e.g. `__name__` or `instance`; +* `--prom-filter-label-value` - the regular expression to filter the label value. By default matches all `.*` + +For example: +``` +./vmctl prometheus --prom-snapshot=/path/to/snapshot \ + --prom-filter-label="__name__" \ + --prom-filter-label-value="promhttp.*" \ + --prom-filter-time-start=2020-02-07T00:07:01Z \ + --prom-filter-time-end=2020-02-11T00:07:01Z +Prometheus import mode +Prometheus snapshot stats: + blocks found: 2; + blocks skipped: 12; + min time: 1581288163058 (2020-02-09T22:42:43Z); + max time: 1581328800000 (2020-02-10T10:00:00Z); + samples: 1657698; + series: 3930. +Found 2 blocks to import. Continue? [Y/n] y +14 / 14 [------------------------------------------------------------------------------------------------------------------------------------------------------] 100.00% ? p/s +2020/02/23 15:51:07 Import finished! +2020/02/23 15:51:07 VictoriaMetrics importer stats: + idle duration: 0s; + time spent while importing: 37.415461ms; + total samples: 10128; + samples/s: 270690.24; + total bytes: 195.2 kB; + bytes/s: 5.2 MB; + import requests: 2; + import requests retries: 0; +2020/02/23 15:51:07 Total time: 7.153158218s +``` + +## Migrating data from Thanos + +Thanos uses the same storage engine as Prometheus and the data layout on-disk should be the same. That means +`vmctl` in mode `prometheus` may be used for Thanos historical data migration as well. +These instructions may vary based on the details of your Thanos configuration. +Please read carefully and verify as you go. We assume you're using Thanos Sidecar on your Prometheus pods, +and that you have a separate Thanos Store installation. + +### Current data + +1. For now, keep your Thanos Sidecar and Thanos-related Prometheus configuration, but add this to also stream + metrics to VictoriaMetrics: + ``` + remote_write: + - url: http://victoria-metrics:8428/api/v1/write + ``` +2. Make sure VM is running, of course. Now check the logs to make sure that Prometheus is sending and VM is receiving. + In Prometheus, make sure there are no errors. On the VM side, you should see messages like this: + ``` + 2020-04-27T18:38:46.474Z info VictoriaMetrics/lib/storage/partition.go:207 creating a partition "2020_04" with smallPartsPath="/victoria-metrics-data/data/small/2020_04", bigPartsPath="/victoria-metrics-data/data/big/2020_04" + 2020-04-27T18:38:46.506Z info VictoriaMetrics/lib/storage/partition.go:222 partition "2020_04" has been created + ``` +3. Now just wait. Within two hours, Prometheus should finish its current data file and hand it off to Thanos Store for long term + storage. + +### Historical data + +Let's assume your data is stored on S3 served by minio. You first need to copy that out to a local filesystem, +then import it into VM using `vmctl` in `prometheus` mode. +1. Copy data from minio. + 1. Run the `minio/mc` Docker container. + 1. `mc config host add minio http://minio:9000 accessKey secretKey`, substituting appropriate values for the last 3 items. + 1. `mc cp -r minio/prometheus thanos-data` +1. Import using `vmctl`. + 1. Follow the [instructions](#how-to-build) to compile `vmctl` on your machine. + 1. Use [prometheus](#migrating-data-from-prometheus) mode to import data: + ``` + vmctl prometheus --prom-snapshot thanos-data --vm-addr http://victoria-metrics:8428 + ``` + +## Migrating data from VictoriaMetrics + +### Native protocol + +The [native binary protocol](https://victoriametrics.github.io/#how-to-export-data-in-native-format) +was introduced in [1.42.0 release](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.42.0) +and provides the most efficient way to migrate data between VM instances: single to single, cluster to cluster, +single to cluster and vice versa. Please note that both instances (source and destination) should be of v1.42.0 +or higher. + +See `./vmctl vm-native --help` for details and full list of flags. + +In this mode `vmctl` acts as a proxy between two VM instances, where time series filtering is done by "source" (`src`) +and processing is done by "destination" (`dst`). Because of that, `vmctl` doesn't actually know how much data will be +processed and can't show the progress bar. It will show the current processing speed and total number of processed bytes: + +``` +./vmctl vm-native --vm-native-src-addr=http://localhost:8528 \ + --vm-native-dst-addr=http://localhost:8428 \ + --vm-native-filter-match='{job="vmagent"}' \ + --vm-native-filter-time-start='2020-01-01T20:07:00Z' +VictoriaMetrics Native import mode +Initing export pipe from "http://localhost:8528" with filters: + filter: match[]={job="vmagent"} +Initing import process to "http://localhost:8428": +Total: 336.75 KiB ↖ Speed: 454.46 KiB p/s +2020/10/13 17:04:59 Total time: 952.143376ms +``` + +Importing tips: +1. Migrating all the metrics from one VM to another may collide with existing application metrics +(prefixed with `vm_`) at destination and lead to confusion when using +[official Grafana dashboards](https://grafana.com/orgs/victoriametrics/dashboards). +To avoid such situation try to filter out VM process metrics via `--vm-native-filter-match` flag. +2. Migration is a backfilling process, so it is recommended to read +[Backfilling tips](https://github.com/VictoriaMetrics/VictoriaMetrics#backfilling) section. +3. `vmctl` doesn't provide relabeling or other types of labels management in this mode. +Instead, use [relabeling in VictoriaMetrics](https://github.com/VictoriaMetrics/vmctl/issues/4#issuecomment-683424375). + + +## Tuning + +### Influx mode + +The flag `--influx-concurrency` controls how many concurrent requests may be sent to InfluxDB while fetching +timeseries. Please set it wisely to avoid InfluxDB overwhelming. + +The flag `--influx-chunk-size` controls the max amount of datapoints to return in single chunk from fetch requests. +Please see more details [here](https://docs.influxdata.com/influxdb/v1.7/guides/querying_data/#chunking). +The chunk size is used to control InfluxDB memory usage, so it won't OOM on processing large timeseries with +billions of datapoints. + +### Prometheus mode + +The flag `--prom-concurrency` controls how many concurrent readers will be reading the blocks in snapshot. +Since snapshots are just files on disk it would be hard to overwhelm the system. Please go with value equal +to number of free CPU cores. + +### VictoriaMetrics importer + +The flag `--vm-concurrency` controls the number of concurrent workers that process the input from InfluxDB query results. +Please note that each import request can load up to a single vCPU core on VictoriaMetrics. So try to set it according +to allocated CPU resources of your VictoriMetrics installation. + +The flag `--vm-batch-size` controls max amount of samples collected before sending the import request. +For example, if `--influx-chunk-size=500` and `--vm-batch-size=2000` then importer will process not more +than 4 chunks before sending the request. + +### Importer stats + +After successful import `vmctl` prints some statistics for details. +The important numbers to watch are following: + - `idle duration` - shows time that importer spent while waiting for data from InfluxDB/Prometheus +to fill up `--vm-batch-size` batch size. Value shows total duration across all workers configured +via `--vm-concurrency`. High value may be a sign of too slow InfluxDB/Prometheus fetches or too +high `--vm-concurrency` value. Try to improve it by increasing `---concurrency` value or +decreasing `--vm-concurrency` value. +- `import requests` - shows how many import requests were issued to VM server. +The import request is issued once the batch size(`--vm-batch-size`) is full and ready to be sent. +Please prefer big batch sizes (50k-500k) to improve performance. +- `import requests retries` - shows number of unsuccessful import requests. Non-zero value may be +a sign of network issues or VM being overloaded. See the logs during import for error messages. + +### Silent mode + +By default `vmctl` waits confirmation from user before starting the import. If this is unwanted +behavior and no user interaction required - pass `-s` flag to enable "silence" mode: +``` + -s Whether to run in silent mode. If set to true no confirmation prompts will appear. (default: false) +``` + +### Significant figures + +`vmctl` allows to limit the number of [significant figures](https://en.wikipedia.org/wiki/Significant_figures) +before importing. For example, the average value for response size is `102.342305` bytes and it has 9 significant figures. +If you ask a human to pronounce this value then with high probability value will be rounded to first 4 or 5 figures +because the rest aren't really that important to mention. In most cases, such a high precision is too much. +Moreover, such values may be just a result of [floating point arithmetic](https://en.wikipedia.org/wiki/Floating-point_arithmetic), +create a [false precision](https://en.wikipedia.org/wiki/False_precision) and result into bad compression ratio +according to [information theory](https://en.wikipedia.org/wiki/Information_theory). + +`vmctl` provides the following flags for improving data compression: + +* `--vm-round-digits` flag for rounding processed values to the given number of decimal digits after the point. + For example, `--vm-round-digits=2` would round `1.2345` to `1.23`. By default the rounding is disabled. + +* `--vm-significant-figures` flag for limiting the number of significant figures in processed values. It takes no effect if set + to 0 (by default), but set `--vm-significant-figures=5` and `102.342305` will be rounded to `102.34`. + +The most common case for using these flags is to improve data compression for time series storing aggregation +results such as `average`, `rate`, etc. + +### Adding extra labels + + `vmctl` allows to add extra labels to all imported series. It can be achived with flag `--vm-extra-label label=value`. + If multiple labels needs to be added, set flag for each label, for example, `--vm-extra-label label1=value1 --vm-extra-label label2=value2`. + If timeseries already have label, that must be added with `--vm-extra-label` flag, flag has priority and will override label value from timeseries. + diff --git a/docs/vmrestore.md b/docs/vmrestore.md index 6750ed14f..b18177f39 100644 --- a/docs/vmrestore.md +++ b/docs/vmrestore.md @@ -7,7 +7,7 @@ Restore process can be interrupted at any time. It is automatically resumed from when restarting `vmrestore` with the same args. -### Usage +## Usage VictoriaMetrics must be stopped during the restore process. @@ -25,13 +25,13 @@ The original `-storageDataPath` directory may contain old files. They will be su i.e. the end result would be similar to [rsync --delete](https://askubuntu.com/questions/476041/how-do-i-make-rsync-delete-files-that-have-been-deleted-from-the-source-folder). -### Troubleshooting +## Troubleshooting * If `vmrestore` eats all the network bandwidth, then set `-maxBytesPerSecond` to the desired value. * If `vmrestore` has been interrupted due to temporary error, then just restart it with the same args. It will resume the restore process. -### Advanced usage +## Advanced usage * Obtaining credentials from a file. @@ -118,24 +118,24 @@ i.e. the end result would be similar to [rsync --delete](https://askubuntu.com/q ``` -### How to build from sources +## How to build from sources It is recommended using [binary releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) - see `vmutils-*` archives there. -#### Development build +### Development build 1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.13. 2. Run `make vmrestore` from the root folder of the repository. It builds `vmrestore` binary and puts it into the `bin` folder. -#### Production build +### Production build 1. [Install docker](https://docs.docker.com/install/). 2. Run `make vmrestore-prod` from the root folder of the repository. It builds `vmrestore-prod` binary and puts it into the `bin` folder. -#### Building docker images +### Building docker images Run `make package-vmrestore`. It builds `victoriametrics/vmrestore:` docker image locally. `` is auto-generated image tag, which depends on source code in the repository. diff --git a/go.mod b/go.mod index 120fb499f..2e1453e3f 100644 --- a/go.mod +++ b/go.mod @@ -10,29 +10,39 @@ require ( github.com/VictoriaMetrics/fasthttp v1.0.12 github.com/VictoriaMetrics/metrics v1.12.3 github.com/VictoriaMetrics/metricsql v0.10.0 - github.com/aws/aws-sdk-go v1.36.25 + github.com/aws/aws-sdk-go v1.37.1 github.com/cespare/xxhash/v2 v2.1.1 + github.com/cheggaaa/pb/v3 v3.0.5 + github.com/cpuguy83/go-md2man/v2 v2.0.0 // indirect + github.com/fatih/color v1.10.0 // indirect github.com/go-kit/kit v0.10.0 github.com/golang/snappy v0.0.2 - github.com/klauspost/compress v1.11.6 + github.com/influxdata/influxdb v1.8.4 + github.com/klauspost/compress v1.11.7 + github.com/mattn/go-runewidth v0.0.10 // indirect github.com/oklog/ulid v1.3.1 github.com/prometheus/client_golang v1.9.0 // indirect github.com/prometheus/procfs v0.3.0 // indirect github.com/prometheus/prometheus v1.8.2-0.20201119142752-3ad25a6dc3d9 + github.com/rivo/uniseg v0.2.0 // indirect + github.com/russross/blackfriday/v2 v2.1.0 // indirect + github.com/urfave/cli/v2 v2.3.0 github.com/valyala/fastjson v1.6.3 github.com/valyala/fastrand v1.0.0 github.com/valyala/fasttemplate v1.2.1 github.com/valyala/gozstd v1.9.0 github.com/valyala/histogram v1.1.2 github.com/valyala/quicktemplate v1.6.3 - golang.org/x/oauth2 v0.0.0-20210112200429-01de73cf58bd - golang.org/x/sync v0.0.0-20201207232520-09787c993a3a // indirect - golang.org/x/sys v0.0.0-20210113000019-eaf3bda374d2 + go.opencensus.io v0.22.6 // indirect + golang.org/x/mod v0.4.1 // indirect + golang.org/x/net v0.0.0-20210119194325-5f4716e94777 // indirect + golang.org/x/oauth2 v0.0.0-20210201163806-010130855d6c + golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c golang.org/x/text v0.3.5 // indirect - golang.org/x/tools v0.0.0-20210112230658-8b4aab62c064 // indirect - google.golang.org/api v0.36.0 - google.golang.org/genproto v0.0.0-20210111234610-22ae2b108f89 // indirect - google.golang.org/grpc v1.34.1 // indirect + golang.org/x/tools v0.1.0 // indirect + google.golang.org/api v0.38.0 + google.golang.org/genproto v0.0.0-20210201151548-94839c025ad4 // indirect + google.golang.org/grpc v1.35.0 // indirect gopkg.in/yaml.v2 v2.4.0 ) diff --git a/go.sum b/go.sum index f5fb720ae..406d418ab 100644 --- a/go.sum +++ b/go.sum @@ -17,6 +17,7 @@ cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOY cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= cloud.google.com/go v0.66.0/go.mod h1:dgqGAjKCDxyhGTtC9dAREQGUJpkceNm1yt590Qno0Ko= cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= cloud.google.com/go v0.75.0 h1:XgtDnVJRCPEUG21gjFiRPz4zI1Mjg16R+NYQjfmU4XY= cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= @@ -88,6 +89,8 @@ github.com/VictoriaMetrics/metrics v1.12.3 h1:Fe6JHC6MSEKa+BtLhPN8WIvS+HKPzMc2ev github.com/VictoriaMetrics/metrics v1.12.3/go.mod h1:Z1tSfPfngDn12bTfZSCqArT3OPY3u88J12hSoOhuiRE= github.com/VictoriaMetrics/metricsql v0.10.0 h1:45BARAP2shaL/5p67Hvz+YrWUbr0X0VCy9t+gvdIm8o= github.com/VictoriaMetrics/metricsql v0.10.0/go.mod h1:ylO7YITho/Iw6P71oEaGyHbO94bGoGtzWfLGqFhMIg8= +github.com/VividCortex/ewma v1.1.1 h1:MnEK4VOv6n0RSY4vtRe3h11qjxL3+t0B8yOL8iMXdcM= +github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmxzcbUokwA= github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= @@ -120,8 +123,8 @@ github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQ github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= github.com/aws/aws-sdk-go v1.35.31/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.36.25 h1:foHwQg8LGGuR9L8IODs2co5OQqjYhNNrngefIbXbyjg= -github.com/aws/aws-sdk-go v1.36.25/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= +github.com/aws/aws-sdk-go v1.37.1 h1:BTHmuN+gzhxkvU9sac2tZvaY0gV9ihbHw+KxZOecYvY= +github.com/aws/aws-sdk-go v1.37.1/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= @@ -139,6 +142,8 @@ github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cheggaaa/pb/v3 v3.0.5 h1:lmZOti7CraK9RSjzExsY53+WWfub9Qv13B5m4ptEoPE= +github.com/cheggaaa/pb/v3 v3.0.5/go.mod h1:X1L61/+36nz9bjIsrDU52qHKOQukUQe2Ge+YvGuquCw= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= @@ -148,6 +153,7 @@ github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4 github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/containerd/containerd v1.3.4/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= @@ -155,6 +161,8 @@ github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3Ee github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg= @@ -185,10 +193,13 @@ github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymF github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= +github.com/fatih/color v1.10.0 h1:s36xzo75JdqLaaWoiEHk767eHiwo0598uUxyfiPkDsg= +github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= @@ -368,6 +379,7 @@ github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= @@ -388,6 +400,7 @@ github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200905233945-acf8798be1f7/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201117184057-ae444373da19/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -451,6 +464,8 @@ github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJ github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/influxdata/flux v0.65.1/go.mod h1:J754/zds0vvpfwuq7Gc2wRdVwEodfpCFM7mYlOw2LqY= github.com/influxdata/influxdb v1.8.3/go.mod h1:JugdFhsvvI8gadxOI6noqNeeBHvWNTbfYGtiAn+2jhI= +github.com/influxdata/influxdb v1.8.4 h1:FUcPJJ1/sM47gX3Xr7QCIbkmZ2N4ug5CV7iOFQCR75A= +github.com/influxdata/influxdb v1.8.4/go.mod h1:JugdFhsvvI8gadxOI6noqNeeBHvWNTbfYGtiAn+2jhI= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= github.com/influxdata/influxql v1.1.1-0.20200828144457-65d3ef77d385/go.mod h1:gHp9y86a/pxhjJ+zMjNXiQAA197Xk9wLxaz+fGG+kWk= github.com/influxdata/line-protocol v0.0.0-20180522152040-32c6aa80de5e/go.mod h1:4kt73NQhadE3daL3WhR5EJ/J2ocX0PZzwxQ0gXJ7oFE= @@ -492,8 +507,8 @@ github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0 github.com/klauspost/compress v1.10.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/compress v1.11.6 h1:EgWPCW6O3n1D5n99Zq3xXBt9uCwRGvpwGOusOLNBRSQ= -github.com/klauspost/compress v1.11.6/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.11.7 h1:0hzRabrMN4tSTvMfnL3SCv1ZGeAP23ynzodBgaHeMeg= +github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg= github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= @@ -525,16 +540,23 @@ github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJ github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8= +github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= +github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-runewidth v0.0.10 h1:CoZ3S2P7pvtP45xOtBw+/mDL2z0RKI576gSkzRRpdGg= +github.com/mattn/go-runewidth v0.0.10/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk= github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= @@ -664,6 +686,9 @@ github.com/prometheus/prometheus v1.8.2-0.20201119142752-3ad25a6dc3d9 h1:F2A86PG github.com/prometheus/prometheus v1.8.2-0.20201119142752-3ad25a6dc3d9/go.mod h1:1MDE/bXgu4gqd5w/otko6WQpXZX9vu8QX4KbitCmaPg= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1RftBQPUCDRw6SmxeaREsAaRKnOclghuzp/WRzc= +github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= +github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= @@ -671,6 +696,8 @@ github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= @@ -721,7 +748,10 @@ github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqri github.com/uber/jaeger-client-go v2.25.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-lib v2.4.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.22.1 h1:+mkCCcOFKPnCmVYVcURKps1Xe+3zP90gSYGNfRkjoIY= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli/v2 v2.3.0 h1:qph92Y649prgesehzOrQjdWyxFOp/QVM+6imKHad91M= +github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI= github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= github.com/valyala/fasthttp v1.16.0/go.mod h1:YOKImeEosDdBPnxc0gy7INqi3m1zK6A+xl6TwOBhHCA= @@ -764,8 +794,9 @@ go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.5 h1:dntmOdLpSpHlVqbW5Eay97DelsZHe+55D+xC6i0dDS0= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.22.6 h1:BdkrbWrzDlV9dnbzoP7sfN+dHheJ4J9JOaYxcUDL+ok= +go.opencensus.io v0.22.6/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= @@ -831,8 +862,9 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.0 h1:8pl+sMODzuvGJkmj2W4kZihvVb5mKm8pB/X44PIQHv8= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1 h1:Kvvh58BN8Y9/lBi7hTekvtMpm07eUZ0ck5pRHpsMWrY= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -876,8 +908,10 @@ golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201224014010-6772e930b67b h1:iFwSg7t5GZmB/Q5TjiEAsdoLDrdJRC1RiF2WhuV29Qw= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777 h1:003p0dJM77cxMSyCPFphvZf/Y5/NXf5fzg6ufd1/Oew= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -886,8 +920,8 @@ golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4Iltr golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210112200429-01de73cf58bd h1:0n2rzLq6xLtV9OFaT0BF2syUkjOwRrJ1zvXY5hH7Kkc= -golang.org/x/oauth2 v0.0.0-20210112200429-01de73cf58bd/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210201163806-010130855d6c h1:HiAZXo96zOhVhtFHchj/ojzoxCFiPrp9/j0GtS38V3g= +golang.org/x/oauth2 v0.0.0-20210201163806-010130855d6c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -963,8 +997,10 @@ golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201214210602-f9fddec55a1e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210113000019-eaf3bda374d2 h1:F9vNgpIiamoF+Q1/c78bikg/NScXEtbZSNEpnRelOzs= -golang.org/x/sys v0.0.0-20210113000019-eaf3bda374d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c h1:VwygUrnw9jn88c4u8GD3rZQbqrP/tgas88tPUbBxQrk= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1048,9 +1084,11 @@ golang.org/x/tools v0.0.0-20200918232735-d647fc253266/go.mod h1:z6u4i615ZeAfBE4X golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201119054027-25dc3e1ccc3c/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210112230658-8b4aab62c064 h1:BmCFkEH4nJrYcAc2L08yX5RhYGD4j58PTMkEUDkpz2I= -golang.org/x/tools v0.0.0-20210112230658-8b4aab62c064/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0 h1:po9/4sTYwZU9lPhi1tOrb4hCv3qrhiQ77LZfGa2OjwY= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1082,8 +1120,9 @@ google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz513 google.golang.org/api v0.31.0/go.mod h1:CL+9IBCa2WWU6gRuBWaKqGWLFFwbEUXkfeMkHLQWYWo= google.golang.org/api v0.32.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= -google.golang.org/api v0.36.0 h1:l2Nfbl2GPXdWorv+dT2XfinX2jOOw4zv1VhLstx+6rE= google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.38.0 h1:vDyWk6eup8eQAidaZ31sNWIn8tZEL8qpbtGkBD4ytQo= +google.golang.org/api v0.38.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1132,9 +1171,11 @@ google.golang.org/genproto v0.0.0-20200914193844-75d14daec038/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20200921151605-7abf4a1a14d5/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210111234610-22ae2b108f89 h1:R2owLnwrU3BdTJ5R9cnHDNsnEmBQ7n5lZjKShnbISe4= -google.golang.org/genproto v0.0.0-20210111234610-22ae2b108f89/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210201151548-94839c025ad4 h1:HPkKL4eEh/nemF/FRzYMrFsAh1ZPm5t8NqKBI/Ejlg0= +google.golang.org/genproto v0.0.0-20210201151548-94839c025ad4/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= @@ -1157,8 +1198,8 @@ google.golang.org/grpc v1.32.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= -google.golang.org/grpc v1.34.1 h1:ugq+9++ZQPFzM2pKUMCIK8gj9M0pFyuUWO9Q8kwEDQw= -google.golang.org/grpc v1.34.1/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0 h1:TwIQcH3es+MojMVojxxfQ3l3OF2KzlRxML2xZq0kRo8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= diff --git a/lib/decimal/decimal.go b/lib/decimal/decimal.go index 79ea761bb..84304fefe 100644 --- a/lib/decimal/decimal.go +++ b/lib/decimal/decimal.go @@ -298,8 +298,21 @@ func maxUpExponent(v int64) int16 { } } -// Round f to value with the given number of significant figures. -func Round(f float64, digits int) float64 { +// RoundToDecimalDigits rounds f to the given number of decimal digits after the point. +// +// See also RoundToSignificantFigures. +func RoundToDecimalDigits(f float64, digits int) float64 { + if digits <= -100 || digits >= 100 { + return f + } + m := math.Pow10(digits) + return math.Round(f*m) / m +} + +// RoundToSignificantFigures rounds f to value with the given number of significant figures. +// +// See also RoundToDecimalDigits. +func RoundToSignificantFigures(f float64, digits int) float64 { if digits <= 0 || digits >= 18 { return f } diff --git a/lib/decimal/decimal_test.go b/lib/decimal/decimal_test.go index 153e0ff69..f6a94ea97 100644 --- a/lib/decimal/decimal_test.go +++ b/lib/decimal/decimal_test.go @@ -7,10 +7,34 @@ import ( "testing" ) -func TestRoundInplace(t *testing.T) { +func TestRoundToDecimalDigits(t *testing.T) { f := func(f float64, digits int, resultExpected float64) { t.Helper() - result := Round(f, digits) + result := RoundToDecimalDigits(f, digits) + if math.IsNaN(result) { + if !math.IsNaN(resultExpected) { + t.Fatalf("unexpected result; got %v; want %v", result, resultExpected) + } + } + if result != resultExpected { + t.Fatalf("unexpected result; got %v; want %v", result, resultExpected) + } + } + f(12.34, 0, 12) + f(12.57, 0, 13) + f(-1.578, 2, -1.58) + f(-1.578, 3, -1.578) + f(1234, -2, 1200) + f(1235, -1, 1240) + f(1234, 0, 1234) + f(1234.6, 0, 1235) + f(123.4e-99, 99, 123e-99) +} + +func TestRoundToSignificantFigures(t *testing.T) { + f := func(f float64, digits int, resultExpected float64) { + t.Helper() + result := RoundToSignificantFigures(f, digits) if math.IsNaN(result) { if !math.IsNaN(resultExpected) { t.Fatalf("unexpected result; got %v; want %v", result, resultExpected) diff --git a/lib/flagutil/array.go b/lib/flagutil/array.go index 51f7d2f5b..27ba3a540 100644 --- a/lib/flagutil/array.go +++ b/lib/flagutil/array.go @@ -35,6 +35,15 @@ func NewArrayBool(name, description string) *ArrayBool { return &a } +// NewArrayInt returns new ArrayInt with the given name and description. +func NewArrayInt(name string, description string) *ArrayInt { + description += "\nSupports `array` of values separated by comma" + + " or specified via multiple flags." + var a ArrayInt + flag.Var(&a, name, description) + return &a +} + // Array is a flag that holds an array of values. // // It may be set either by specifying multiple flags with the given name @@ -223,3 +232,41 @@ func (a *ArrayDuration) GetOptionalArgOrDefault(argIdx int, defaultValue time.Du } return x[argIdx] } + +// ArrayInt is flag that holds an array of ints. +type ArrayInt []int + +// String implements flag.Value interface +func (a *ArrayInt) String() string { + x := *a + formattedInts := make([]string, len(x)) + for i, v := range x { + formattedInts[i] = strconv.Itoa(v) + } + return strings.Join(formattedInts, ",") +} + +// Set implements flag.Value interface +func (a *ArrayInt) Set(value string) error { + values := parseArrayValues(value) + for _, v := range values { + n, err := strconv.Atoi(v) + if err != nil { + return err + } + *a = append(*a, n) + } + return nil +} + +// GetOptionalArgOrDefault returns optional arg under the given argIdx. +func (a *ArrayInt) GetOptionalArgOrDefault(argIdx int, defaultValue int) int { + x := *a + if argIdx < len(x) { + return x[argIdx] + } + if len(x) == 1 { + return x[0] + } + return defaultValue +} diff --git a/lib/flagutil/array_test.go b/lib/flagutil/array_test.go index 7e27ab2ff..b6675ebd8 100644 --- a/lib/flagutil/array_test.go +++ b/lib/flagutil/array_test.go @@ -12,14 +12,18 @@ var ( fooFlag Array fooFlagDuration ArrayDuration fooFlagBool ArrayBool + fooFlagInt ArrayInt ) func init() { - os.Args = append(os.Args, "--fooFlag=foo", "--fooFlag=bar", "--fooFlagDuration=10s", "--fooFlagDuration=5m") + os.Args = append(os.Args, "--fooFlag=foo", "--fooFlag=bar") + os.Args = append(os.Args, "--fooFlagDuration=10s", "--fooFlagDuration=5m") os.Args = append(os.Args, "--fooFlagBool=true", "--fooFlagBool=false,true", "--fooFlagBool") + os.Args = append(os.Args, "--fooFlagInt=1", "--fooFlagInt=2,3") flag.Var(&fooFlag, "fooFlag", "test") flag.Var(&fooFlagDuration, "fooFlagDuration", "test") flag.Var(&fooFlagBool, "fooFlagBool", "test") + flag.Var(&fooFlagInt, "fooFlagInt", "test") } func TestMain(m *testing.M) { @@ -28,16 +32,16 @@ func TestMain(m *testing.M) { } func TestArray(t *testing.T) { - expected := map[string]struct{}{ - "foo": {}, - "bar": {}, + expected := []string{ + "foo", + "bar", } if len(expected) != len(fooFlag) { t.Errorf("len array flag (%d) is not equal to %d", len(fooFlag), len(expected)) } - for _, i := range fooFlag { - if _, ok := expected[i]; !ok { - t.Errorf("unexpected item in array %v", i) + for i, v := range fooFlag { + if v != expected[i] { + t.Errorf("unexpected item in array %q", v) } } } @@ -101,16 +105,16 @@ func TestArrayString(t *testing.T) { } func TestArrayDuration(t *testing.T) { - expected := map[time.Duration]struct{}{ - time.Second * 10: {}, - time.Minute * 5: {}, + expected := []time.Duration{ + time.Second * 10, + time.Minute * 5, } if len(expected) != len(fooFlagDuration) { t.Errorf("len array flag (%d) is not equal to %d", len(fooFlag), len(expected)) } - for _, i := range fooFlagDuration { - if _, ok := expected[i]; !ok { - t.Errorf("unexpected item in array %v", i) + for i, v := range fooFlagDuration { + if v != expected[i] { + t.Errorf("unexpected item in array %s", v) } } } @@ -130,7 +134,7 @@ func TestArrayDurationSet(t *testing.T) { } func TestArrayDurationGetOptionalArg(t *testing.T) { - f := func(s string, argIdx int, expectedValue time.Duration, defaultValue time.Duration) { + f := func(s string, argIdx int, expectedValue, defaultValue time.Duration) { t.Helper() var a ArrayDuration _ = a.Set(s) @@ -219,3 +223,60 @@ func TestArrayBoolString(t *testing.T) { f("true,false") f("false,true") } + +func TestArrayInt(t *testing.T) { + expected := []int{1, 2, 3} + if len(expected) != len(fooFlagInt) { + t.Errorf("len array flag (%d) is not equal to %d", len(fooFlag), len(expected)) + } + for i, n := range fooFlagInt { + if n != expected[i] { + t.Errorf("unexpected item in array %d", n) + } + } +} + +func TestArrayIntSet(t *testing.T) { + f := func(s string, expectedValues []int) { + t.Helper() + var a ArrayInt + _ = a.Set(s) + if !reflect.DeepEqual([]int(a), expectedValues) { + t.Fatalf("unexpected values parsed;\ngot\n%q\nwant\n%q", a, expectedValues) + } + } + f("", nil) + f(`1`, []int{1}) + f(`-2,3,-64`, []int{-2, 3, -64}) +} + +func TestArrayIntGetOptionalArg(t *testing.T) { + f := func(s string, argIdx int, expectedValue, defaultValue int) { + t.Helper() + var a ArrayInt + _ = a.Set(s) + v := a.GetOptionalArgOrDefault(argIdx, defaultValue) + if v != expectedValue { + t.Fatalf("unexpected value; got %d; want %d", v, expectedValue) + } + } + f("", 0, 123, 123) + f("", 1, -34, -34) + f("10,1", 1, 1, 234) + f("10", 3, 10, -34) +} + +func TestArrayIntString(t *testing.T) { + f := func(s string) { + t.Helper() + var a ArrayInt + _ = a.Set(s) + result := a.String() + if result != s { + t.Fatalf("unexpected string;\ngot\n%s\nwant\n%s", result, s) + } + } + f("") + f("10,1") + f("-5,1,123") +} diff --git a/lib/promscrape/client.go b/lib/promscrape/client.go index 03cbcf193..05a12f12f 100644 --- a/lib/promscrape/client.go +++ b/lib/promscrape/client.go @@ -242,6 +242,7 @@ var ( scrapesOK = metrics.NewCounter(`vm_promscrape_scrapes_total{status_code="200"}`) scrapesGunzipped = metrics.NewCounter(`vm_promscrape_scrapes_gunziped_total`) scrapesGunzipFailed = metrics.NewCounter(`vm_promscrape_scrapes_gunzip_failed_total`) + scrapeRetries = metrics.NewCounter(`vm_promscrape_scrape_retries_total`) ) func doRequestWithPossibleRetry(hc *fasthttp.HostClient, req *fasthttp.Request, resp *fasthttp.Response, deadline time.Time) error { @@ -259,6 +260,7 @@ func doRequestWithPossibleRetry(hc *fasthttp.HostClient, req *fasthttp.Request, if time.Since(deadline) >= 0 { return fmt.Errorf("the server closes all the connection attempts: %w", err) } + scrapeRetries.Inc() } } diff --git a/lib/promscrape/discoveryutils/client.go b/lib/promscrape/discoveryutils/client.go index 0acdace38..65b514d63 100644 --- a/lib/promscrape/discoveryutils/client.go +++ b/lib/promscrape/discoveryutils/client.go @@ -14,6 +14,7 @@ import ( "github.com/VictoriaMetrics/VictoriaMetrics/lib/proxy" "github.com/VictoriaMetrics/VictoriaMetrics/lib/timerpool" "github.com/VictoriaMetrics/fasthttp" + "github.com/VictoriaMetrics/metrics" ) var ( @@ -192,6 +193,7 @@ func (c *Client) getAPIResponseWithParamsAndClient(client *fasthttp.HostClient, } func doRequestWithPossibleRetry(hc *fasthttp.HostClient, req *fasthttp.Request, resp *fasthttp.Response, deadline time.Time) error { + discoveryRequests.Inc() for { // Use DoDeadline instead of Do even if hc.ReadTimeout is already set in order to guarantee the given deadline // across multiple retries. @@ -206,5 +208,11 @@ func doRequestWithPossibleRetry(hc *fasthttp.HostClient, req *fasthttp.Request, if time.Since(deadline) >= 0 { return fmt.Errorf("the server closes all the connection attempts: %w", err) } + discoveryRetries.Inc() } } + +var ( + discoveryRetries = metrics.NewCounter(`vm_promscrape_discovery_retries_total`) + discoveryRequests = metrics.NewCounter(`vm_promscrape_discovery_requests_total`) +) diff --git a/vendor/github.com/VividCortex/ewma/.gitignore b/vendor/github.com/VividCortex/ewma/.gitignore new file mode 100644 index 000000000..6c7104aef --- /dev/null +++ b/vendor/github.com/VividCortex/ewma/.gitignore @@ -0,0 +1,2 @@ +.DS_Store +.*.sw? diff --git a/vendor/github.com/VividCortex/ewma/LICENSE b/vendor/github.com/VividCortex/ewma/LICENSE new file mode 100644 index 000000000..a78d643ed --- /dev/null +++ b/vendor/github.com/VividCortex/ewma/LICENSE @@ -0,0 +1,21 @@ +The MIT License + +Copyright (c) 2013 VividCortex + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/VividCortex/ewma/README.md b/vendor/github.com/VividCortex/ewma/README.md new file mode 100644 index 000000000..7aab61b87 --- /dev/null +++ b/vendor/github.com/VividCortex/ewma/README.md @@ -0,0 +1,140 @@ +# EWMA [![GoDoc](https://godoc.org/github.com/VividCortex/ewma?status.svg)](https://godoc.org/github.com/VividCortex/ewma) ![Build Status](https://circleci.com/gh/VividCortex/moving_average.png?circle-token=1459fa37f9ca0e50cef05d1963146d96d47ea523) + +This repo provides Exponentially Weighted Moving Average algorithms, or EWMAs for short, [based on our +Quantifying Abnormal Behavior talk](https://vividcortex.com/blog/2013/07/23/a-fast-go-library-for-exponential-moving-averages/). + +### Exponentially Weighted Moving Average + +An exponentially weighted moving average is a way to continuously compute a type of +average for a series of numbers, as the numbers arrive. After a value in the series is +added to the average, its weight in the average decreases exponentially over time. This +biases the average towards more recent data. EWMAs are useful for several reasons, chiefly +their inexpensive computational and memory cost, as well as the fact that they represent +the recent central tendency of the series of values. + +The EWMA algorithm requires a decay factor, alpha. The larger the alpha, the more the average +is biased towards recent history. The alpha must be between 0 and 1, and is typically +a fairly small number, such as 0.04. We will discuss the choice of alpha later. + +The algorithm works thus, in pseudocode: + +1. Multiply the next number in the series by alpha. +2. Multiply the current value of the average by 1 minus alpha. +3. Add the result of steps 1 and 2, and store it as the new current value of the average. +4. Repeat for each number in the series. + +There are special-case behaviors for how to initialize the current value, and these vary +between implementations. One approach is to start with the first value in the series; +another is to average the first 10 or so values in the series using an arithmetic average, +and then begin the incremental updating of the average. Each method has pros and cons. + +It may help to look at it pictorially. Suppose the series has five numbers, and we choose +alpha to be 0.50 for simplicity. Here's the series, with numbers in the neighborhood of 300. + +![Data Series](https://user-images.githubusercontent.com/279875/28242350-463289a2-6977-11e7-88ca-fd778ccef1f0.png) + +Now let's take the moving average of those numbers. First we set the average to the value +of the first number. + +![EWMA Step 1](https://user-images.githubusercontent.com/279875/28242353-464c96bc-6977-11e7-9981-dc4e0789c7ba.png) + +Next we multiply the next number by alpha, multiply the current value by 1-alpha, and add +them to generate a new value. + +![EWMA Step 2](https://user-images.githubusercontent.com/279875/28242351-464abefa-6977-11e7-95d0-43900f29bef2.png) + +This continues until we are done. + +![EWMA Step N](https://user-images.githubusercontent.com/279875/28242352-464c58f0-6977-11e7-8cd0-e01e4efaac7f.png) + +Notice how each of the values in the series decays by half each time a new value +is added, and the top of the bars in the lower portion of the image represents the +size of the moving average. It is a smoothed, or low-pass, average of the original +series. + +For further reading, see [Exponentially weighted moving average](http://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average) on wikipedia. + +### Choosing Alpha + +Consider a fixed-size sliding-window moving average (not an exponentially weighted moving average) +that averages over the previous N samples. What is the average age of each sample? It is N/2. + +Now suppose that you wish to construct a EWMA whose samples have the same average age. The formula +to compute the alpha required for this is: alpha = 2/(N+1). Proof is in the book +"Production and Operations Analysis" by Steven Nahmias. + +So, for example, if you have a time-series with samples once per second, and you want to get the +moving average over the previous minute, you should use an alpha of .032786885. This, by the way, +is the constant alpha used for this repository's SimpleEWMA. + +### Implementations + +This repository contains two implementations of the EWMA algorithm, with different properties. + +The implementations all conform to the MovingAverage interface, and the constructor returns +that type. + +Current implementations assume an implicit time interval of 1.0 between every sample added. +That is, the passage of time is treated as though it's the same as the arrival of samples. +If you need time-based decay when samples are not arriving precisely at set intervals, then +this package will not support your needs at present. + +#### SimpleEWMA + +A SimpleEWMA is designed for low CPU and memory consumption. It **will** have different behavior than the VariableEWMA +for multiple reasons. It has no warm-up period and it uses a constant +decay. These properties let it use less memory. It will also behave +differently when it's equal to zero, which is assumed to mean +uninitialized, so if a value is likely to actually become zero over time, +then any non-zero value will cause a sharp jump instead of a small change. + +#### VariableEWMA + +Unlike SimpleEWMA, this supports a custom age which must be stored, and thus uses more memory. +It also has a "warmup" time when you start adding values to it. It will report a value of 0.0 +until you have added the required number of samples to it. It uses some memory to store the +number of samples added to it. As a result it uses a little over twice the memory of SimpleEWMA. + +## Usage + +### API Documentation + +View the GoDoc generated documentation [here](http://godoc.org/github.com/VividCortex/ewma). + +```go +package main +import "github.com/VividCortex/ewma" + +func main() { + samples := [100]float64{ + 4599, 5711, 4746, 4621, 5037, 4218, 4925, 4281, 5207, 5203, 5594, 5149, + } + + e := ewma.NewMovingAverage() //=> Returns a SimpleEWMA if called without params + a := ewma.NewMovingAverage(5) //=> returns a VariableEWMA with a decay of 2 / (5 + 1) + + for _, f := range samples { + e.Add(f) + a.Add(f) + } + + e.Value() //=> 13.577404704631077 + a.Value() //=> 1.5806140565521463e-12 +} +``` + +## Contributing + +We only accept pull requests for minor fixes or improvements. This includes: + +* Small bug fixes +* Typos +* Documentation or comments + +Please open issues to discuss new features. Pull requests for new features will be rejected, +so we recommend forking the repository and making changes in your fork for your use case. + +## License + +This repository is Copyright (c) 2013 VividCortex, Inc. All rights reserved. +It is licensed under the MIT license. Please see the LICENSE file for applicable license terms. diff --git a/vendor/github.com/VividCortex/ewma/ewma.go b/vendor/github.com/VividCortex/ewma/ewma.go new file mode 100644 index 000000000..44d5d53e3 --- /dev/null +++ b/vendor/github.com/VividCortex/ewma/ewma.go @@ -0,0 +1,126 @@ +// Package ewma implements exponentially weighted moving averages. +package ewma + +// Copyright (c) 2013 VividCortex, Inc. All rights reserved. +// Please see the LICENSE file for applicable license terms. + +const ( + // By default, we average over a one-minute period, which means the average + // age of the metrics in the period is 30 seconds. + AVG_METRIC_AGE float64 = 30.0 + + // The formula for computing the decay factor from the average age comes + // from "Production and Operations Analysis" by Steven Nahmias. + DECAY float64 = 2 / (float64(AVG_METRIC_AGE) + 1) + + // For best results, the moving average should not be initialized to the + // samples it sees immediately. The book "Production and Operations + // Analysis" by Steven Nahmias suggests initializing the moving average to + // the mean of the first 10 samples. Until the VariableEwma has seen this + // many samples, it is not "ready" to be queried for the value of the + // moving average. This adds some memory cost. + WARMUP_SAMPLES uint8 = 10 +) + +// MovingAverage is the interface that computes a moving average over a time- +// series stream of numbers. The average may be over a window or exponentially +// decaying. +type MovingAverage interface { + Add(float64) + Value() float64 + Set(float64) +} + +// NewMovingAverage constructs a MovingAverage that computes an average with the +// desired characteristics in the moving window or exponential decay. If no +// age is given, it constructs a default exponentially weighted implementation +// that consumes minimal memory. The age is related to the decay factor alpha +// by the formula given for the DECAY constant. It signifies the average age +// of the samples as time goes to infinity. +func NewMovingAverage(age ...float64) MovingAverage { + if len(age) == 0 || age[0] == AVG_METRIC_AGE { + return new(SimpleEWMA) + } + return &VariableEWMA{ + decay: 2 / (age[0] + 1), + } +} + +// A SimpleEWMA represents the exponentially weighted moving average of a +// series of numbers. It WILL have different behavior than the VariableEWMA +// for multiple reasons. It has no warm-up period and it uses a constant +// decay. These properties let it use less memory. It will also behave +// differently when it's equal to zero, which is assumed to mean +// uninitialized, so if a value is likely to actually become zero over time, +// then any non-zero value will cause a sharp jump instead of a small change. +// However, note that this takes a long time, and the value may just +// decays to a stable value that's close to zero, but which won't be mistaken +// for uninitialized. See http://play.golang.org/p/litxBDr_RC for example. +type SimpleEWMA struct { + // The current value of the average. After adding with Add(), this is + // updated to reflect the average of all values seen thus far. + value float64 +} + +// Add adds a value to the series and updates the moving average. +func (e *SimpleEWMA) Add(value float64) { + if e.value == 0 { // this is a proxy for "uninitialized" + e.value = value + } else { + e.value = (value * DECAY) + (e.value * (1 - DECAY)) + } +} + +// Value returns the current value of the moving average. +func (e *SimpleEWMA) Value() float64 { + return e.value +} + +// Set sets the EWMA's value. +func (e *SimpleEWMA) Set(value float64) { + e.value = value +} + +// VariableEWMA represents the exponentially weighted moving average of a series of +// numbers. Unlike SimpleEWMA, it supports a custom age, and thus uses more memory. +type VariableEWMA struct { + // The multiplier factor by which the previous samples decay. + decay float64 + // The current value of the average. + value float64 + // The number of samples added to this instance. + count uint8 +} + +// Add adds a value to the series and updates the moving average. +func (e *VariableEWMA) Add(value float64) { + switch { + case e.count < WARMUP_SAMPLES: + e.count++ + e.value += value + case e.count == WARMUP_SAMPLES: + e.count++ + e.value = e.value / float64(WARMUP_SAMPLES) + e.value = (value * e.decay) + (e.value * (1 - e.decay)) + default: + e.value = (value * e.decay) + (e.value * (1 - e.decay)) + } +} + +// Value returns the current value of the average, or 0.0 if the series hasn't +// warmed up yet. +func (e *VariableEWMA) Value() float64 { + if e.count <= WARMUP_SAMPLES { + return 0.0 + } + + return e.value +} + +// Set sets the EWMA's value. +func (e *VariableEWMA) Set(value float64) { + e.value = value + if e.count <= WARMUP_SAMPLES { + e.count = WARMUP_SAMPLES + 1 + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/config.go b/vendor/github.com/aws/aws-sdk-go/aws/config.go index 3b809e847..39fa6d5fe 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/config.go @@ -438,13 +438,6 @@ func (c *Config) WithDisableEndpointHostPrefix(t bool) *Config { return c } -// MergeIn merges the passed in configs into the existing config object. -func (c *Config) MergeIn(cfgs ...*Config) { - for _, other := range cfgs { - mergeInConfig(c, other) - } -} - // WithSTSRegionalEndpoint will set whether or not to use regional endpoint flag // when resolving the endpoint for a service func (c *Config) WithSTSRegionalEndpoint(sre endpoints.STSRegionalEndpoint) *Config { @@ -459,6 +452,27 @@ func (c *Config) WithS3UsEast1RegionalEndpoint(sre endpoints.S3UsEast1RegionalEn return c } +// WithLowerCaseHeaderMaps sets a config LowerCaseHeaderMaps value +// returning a Config pointer for chaining. +func (c *Config) WithLowerCaseHeaderMaps(t bool) *Config { + c.LowerCaseHeaderMaps = &t + return c +} + +// WithDisableRestProtocolURICleaning sets a config DisableRestProtocolURICleaning value +// returning a Config pointer for chaining. +func (c *Config) WithDisableRestProtocolURICleaning(t bool) *Config { + c.DisableRestProtocolURICleaning = &t + return c +} + +// MergeIn merges the passed in configs into the existing config object. +func (c *Config) MergeIn(cfgs ...*Config) { + for _, other := range cfgs { + mergeInConfig(c, other) + } +} + func mergeInConfig(dst *Config, other *Config) { if other == nil { return @@ -571,6 +585,10 @@ func mergeInConfig(dst *Config, other *Config) { if other.S3UsEast1RegionalEndpoint != endpoints.UnsetS3UsEast1Endpoint { dst.S3UsEast1RegionalEndpoint = other.S3UsEast1RegionalEndpoint } + + if other.LowerCaseHeaderMaps != nil { + dst.LowerCaseHeaderMaps = other.LowerCaseHeaderMaps + } } // Copy will return a shallow copy of the Config object. If any additional diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/doc.go new file mode 100644 index 000000000..18c940ab3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/doc.go @@ -0,0 +1,60 @@ +// Package ssocreds provides a credential provider for retrieving temporary AWS credentials using an SSO access token. +// +// IMPORTANT: The provider in this package does not initiate or perform the AWS SSO login flow. The SDK provider +// expects that you have already performed the SSO login flow using AWS CLI using the "aws sso login" command, or by +// some other mechanism. The provider must find a valid non-expired access token for the AWS SSO user portal URL in +// ~/.aws/sso/cache. If a cached token is not found, it is expired, or the file is malformed an error will be returned. +// +// Loading AWS SSO credentials with the AWS shared configuration file +// +// You can use configure AWS SSO credentials from the AWS shared configuration file by +// providing the specifying the required keys in the profile: +// +// sso_account_id +// sso_region +// sso_role_name +// sso_start_url +// +// For example, the following defines a profile "devsso" and specifies the AWS SSO parameters that defines the target +// account, role, sign-on portal, and the region where the user portal is located. Note: all SSO arguments must be +// provided, or an error will be returned. +// +// [profile devsso] +// sso_start_url = https://my-sso-portal.awsapps.com/start +// sso_role_name = SSOReadOnlyRole +// sso_region = us-east-1 +// sso_account_id = 123456789012 +// +// Using the config module, you can load the AWS SDK shared configuration, and specify that this profile be used to +// retrieve credentials. For example: +// +// sess, err := session.NewSessionWithOptions(session.Options{ +// SharedConfigState: session.SharedConfigEnable, +// Profile: "devsso", +// }) +// if err != nil { +// return err +// } +// +// Programmatically loading AWS SSO credentials directly +// +// You can programmatically construct the AWS SSO Provider in your application, and provide the necessary information +// to load and retrieve temporary credentials using an access token from ~/.aws/sso/cache. +// +// svc := sso.New(sess, &aws.Config{ +// Region: aws.String("us-west-2"), // Client Region must correspond to the AWS SSO user portal region +// }) +// +// provider := ssocreds.NewCredentialsWithClient(svc, "123456789012", "SSOReadOnlyRole", "https://my-sso-portal.awsapps.com/start") +// +// credentials, err := provider.Get() +// if err != nil { +// return err +// } +// +// Additional Resources +// +// Configuring the AWS CLI to use AWS Single Sign-On: https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sso.html +// +// AWS Single Sign-On User Guide: https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html +package ssocreds diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/os.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/os.go new file mode 100644 index 000000000..ceca7dcee --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/os.go @@ -0,0 +1,9 @@ +// +build !windows + +package ssocreds + +import "os" + +func getHomeDirectory() string { + return os.Getenv("HOME") +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/os_windows.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/os_windows.go new file mode 100644 index 000000000..eb48f61e5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/os_windows.go @@ -0,0 +1,7 @@ +package ssocreds + +import "os" + +func getHomeDirectory() string { + return os.Getenv("USERPROFILE") +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/provider.go new file mode 100644 index 000000000..6eda2a555 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/provider.go @@ -0,0 +1,180 @@ +package ssocreds + +import ( + "crypto/sha1" + "encoding/hex" + "encoding/json" + "fmt" + "io/ioutil" + "path/filepath" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/service/sso" + "github.com/aws/aws-sdk-go/service/sso/ssoiface" +) + +// ErrCodeSSOProviderInvalidToken is the code type that is returned if loaded token has expired or is otherwise invalid. +// To refresh the SSO session run aws sso login with the corresponding profile. +const ErrCodeSSOProviderInvalidToken = "SSOProviderInvalidToken" + +const invalidTokenMessage = "the SSO session has expired or is invalid" + +func init() { + nowTime = time.Now + defaultCacheLocation = defaultCacheLocationImpl +} + +var nowTime func() time.Time + +// ProviderName is the name of the provider used to specify the source of credentials. +const ProviderName = "SSOProvider" + +var defaultCacheLocation func() string + +func defaultCacheLocationImpl() string { + return filepath.Join(getHomeDirectory(), ".aws", "sso", "cache") +} + +// Provider is an AWS credential provider that retrieves temporary AWS credentials by exchanging an SSO login token. +type Provider struct { + credentials.Expiry + + // The Client which is configured for the AWS Region where the AWS SSO user portal is located. + Client ssoiface.SSOAPI + + // The AWS account that is assigned to the user. + AccountID string + + // The role name that is assigned to the user. + RoleName string + + // The URL that points to the organization's AWS Single Sign-On (AWS SSO) user portal. + StartURL string +} + +// NewCredentials returns a new AWS Single Sign-On (AWS SSO) credential provider. The ConfigProvider is expected to be configured +// for the AWS Region where the AWS SSO user portal is located. +func NewCredentials(configProvider client.ConfigProvider, accountID, roleName, startURL string, optFns ...func(provider *Provider)) *credentials.Credentials { + return NewCredentialsWithClient(sso.New(configProvider), accountID, roleName, startURL, optFns...) +} + +// NewCredentialsWithClient returns a new AWS Single Sign-On (AWS SSO) credential provider. The provided client is expected to be configured +// for the AWS Region where the AWS SSO user portal is located. +func NewCredentialsWithClient(client ssoiface.SSOAPI, accountID, roleName, startURL string, optFns ...func(provider *Provider)) *credentials.Credentials { + p := &Provider{ + Client: client, + AccountID: accountID, + RoleName: roleName, + StartURL: startURL, + } + + for _, fn := range optFns { + fn(p) + } + + return credentials.NewCredentials(p) +} + +// Retrieve retrieves temporary AWS credentials from the configured Amazon Single Sign-On (AWS SSO) user portal +// by exchanging the accessToken present in ~/.aws/sso/cache. +func (p *Provider) Retrieve() (credentials.Value, error) { + return p.RetrieveWithContext(aws.BackgroundContext()) +} + +// RetrieveWithContext retrieves temporary AWS credentials from the configured Amazon Single Sign-On (AWS SSO) user portal +// by exchanging the accessToken present in ~/.aws/sso/cache. +func (p *Provider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) { + tokenFile, err := loadTokenFile(p.StartURL) + if err != nil { + return credentials.Value{}, err + } + + output, err := p.Client.GetRoleCredentialsWithContext(ctx, &sso.GetRoleCredentialsInput{ + AccessToken: &tokenFile.AccessToken, + AccountId: &p.AccountID, + RoleName: &p.RoleName, + }) + if err != nil { + return credentials.Value{}, err + } + + expireTime := time.Unix(0, aws.Int64Value(output.RoleCredentials.Expiration)*int64(time.Millisecond)).UTC() + p.SetExpiration(expireTime, 0) + + return credentials.Value{ + AccessKeyID: aws.StringValue(output.RoleCredentials.AccessKeyId), + SecretAccessKey: aws.StringValue(output.RoleCredentials.SecretAccessKey), + SessionToken: aws.StringValue(output.RoleCredentials.SessionToken), + ProviderName: ProviderName, + }, nil +} + +func getCacheFileName(url string) (string, error) { + hash := sha1.New() + _, err := hash.Write([]byte(url)) + if err != nil { + return "", err + } + return strings.ToLower(hex.EncodeToString(hash.Sum(nil))) + ".json", nil +} + +type rfc3339 time.Time + +func (r *rfc3339) UnmarshalJSON(bytes []byte) error { + var value string + + if err := json.Unmarshal(bytes, &value); err != nil { + return err + } + + parse, err := time.Parse(time.RFC3339, value) + if err != nil { + return fmt.Errorf("expected RFC3339 timestamp: %v", err) + } + + *r = rfc3339(parse) + + return nil +} + +type token struct { + AccessToken string `json:"accessToken"` + ExpiresAt rfc3339 `json:"expiresAt"` + Region string `json:"region,omitempty"` + StartURL string `json:"startUrl,omitempty"` +} + +func (t token) Expired() bool { + return nowTime().Round(0).After(time.Time(t.ExpiresAt)) +} + +func loadTokenFile(startURL string) (t token, err error) { + key, err := getCacheFileName(startURL) + if err != nil { + return token{}, awserr.New(ErrCodeSSOProviderInvalidToken, invalidTokenMessage, err) + } + + fileBytes, err := ioutil.ReadFile(filepath.Join(defaultCacheLocation(), key)) + if err != nil { + return token{}, awserr.New(ErrCodeSSOProviderInvalidToken, invalidTokenMessage, err) + } + + if err := json.Unmarshal(fileBytes, &t); err != nil { + return token{}, awserr.New(ErrCodeSSOProviderInvalidToken, invalidTokenMessage, err) + } + + if len(t.AccessToken) == 0 { + return token{}, awserr.New(ErrCodeSSOProviderInvalidToken, invalidTokenMessage, nil) + } + + if t.Expired() { + return token{}, awserr.New(ErrCodeSSOProviderInvalidToken, invalidTokenMessage, nil) + } + + return t, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go index 6846ef6f8..e42c5cdbb 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go @@ -244,9 +244,11 @@ type AssumeRoleProvider struct { MaxJitterFrac float64 } -// NewCredentials returns a pointer to a new Credentials object wrapping the +// NewCredentials returns a pointer to a new Credentials value wrapping the // AssumeRoleProvider. The credentials will expire every 15 minutes and the -// role will be named after a nanosecond timestamp of this operation. +// role will be named after a nanosecond timestamp of this operation. The +// Credentials value will attempt to refresh the credentials using the provider +// when Credentials.Get is called, if the cached credentials are expiring. // // Takes a Config provider to create the STS client. The ConfigProvider is // satisfied by the session.Session type. @@ -268,9 +270,11 @@ func NewCredentials(c client.ConfigProvider, roleARN string, options ...func(*As return credentials.NewCredentials(p) } -// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping the +// NewCredentialsWithClient returns a pointer to a new Credentials value wrapping the // AssumeRoleProvider. The credentials will expire every 15 minutes and the -// role will be named after a nanosecond timestamp of this operation. +// role will be named after a nanosecond timestamp of this operation. The +// Credentials value will attempt to refresh the credentials using the provider +// when Credentials.Get is called, if the cached credentials are expiring. // // Takes an AssumeRoler which can be satisfied by the STS client. // diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index 013ccec4a..53098d6f7 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -380,9 +380,33 @@ var awsPartition = partition{ "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "api.detective-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "api.detective-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "api.detective-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "api.detective-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, }, }, "api.ecr": service{ @@ -746,6 +770,7 @@ var awsPartition = partition{ "appmesh": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -1413,6 +1438,7 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -3053,6 +3079,7 @@ var awsPartition = partition{ "fsx": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -3062,6 +3089,7 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -3095,11 +3123,12 @@ var awsPartition = partition{ Region: "us-west-2", }, }, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "gamelift": service{ @@ -3759,6 +3788,7 @@ var awsPartition = partition{ "lakeformation": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -4274,7 +4304,19 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "us-east-1": endpoint{}, - "us-west-2": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "models-fips.lex.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "models-fips.lex.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, }, }, "monitoring": service{ @@ -5188,7 +5230,19 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "us-east-1": endpoint{}, - "us-west-2": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "runtime-fips.lex.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "runtime-fips.lex.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, }, }, "runtime.sagemaker": service{ @@ -8674,7 +8728,12 @@ var awsusgovPartition = partition{ Region: "us-gov-east-1", }, }, - "us-gov-east-1": endpoint{}, + "us-gov-east-1": endpoint{ + Hostname: "greengrass.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, "us-gov-west-1": endpoint{ Hostname: "greengrass.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ @@ -9833,6 +9892,18 @@ var awsisoPartition = partition{ "us-iso-east-1": endpoint{}, }, }, + "medialive": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "mediapackage": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, "monitoring": service{ Endpoints: endpoints{ @@ -9909,6 +9980,12 @@ var awsisoPartition = partition{ }, }, }, + "ssm": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, "states": service{ Endpoints: endpoints{ diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go index fe6dac1f4..b0cef7575 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go @@ -9,6 +9,7 @@ import ( "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/credentials/processcreds" + "github.com/aws/aws-sdk-go/aws/credentials/ssocreds" "github.com/aws/aws-sdk-go/aws/credentials/stscreds" "github.com/aws/aws-sdk-go/aws/defaults" "github.com/aws/aws-sdk-go/aws/request" @@ -100,6 +101,9 @@ func resolveCredsFromProfile(cfg *aws.Config, sharedCfg.Creds, ) + case sharedCfg.hasSSOConfiguration(): + creds = resolveSSOCredentials(cfg, sharedCfg, handlers) + case len(sharedCfg.CredentialProcess) != 0: // Get credentials from CredentialProcess creds = processcreds.NewCredentials(sharedCfg.CredentialProcess) @@ -151,6 +155,21 @@ func resolveCredsFromProfile(cfg *aws.Config, return creds, nil } +func resolveSSOCredentials(cfg *aws.Config, sharedCfg sharedConfig, handlers request.Handlers) *credentials.Credentials { + cfgCopy := cfg.Copy() + cfgCopy.Region = &sharedCfg.SSORegion + + return ssocreds.NewCredentials( + &Session{ + Config: cfgCopy, + Handlers: handlers.Copy(), + }, + sharedCfg.SSOAccountID, + sharedCfg.SSORoleName, + sharedCfg.SSOStartURL, + ) +} + // valid credential source values const ( credSourceEc2Metadata = "Ec2InstanceMetadata" diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go index 08713cc34..038ae222f 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go @@ -36,7 +36,7 @@ const ( // ErrSharedConfigSourceCollision will be returned if a section contains both // source_profile and credential_source -var ErrSharedConfigSourceCollision = awserr.New(ErrCodeSharedConfig, "only source profile or credential source can be specified, not both", nil) +var ErrSharedConfigSourceCollision = awserr.New(ErrCodeSharedConfig, "only one credential type may be specified per profile: source profile, credential source, credential process, web identity token, or sso", nil) // ErrSharedConfigECSContainerEnvVarEmpty will be returned if the environment // variables are empty and Environment was set as the credential source diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go index be7daacf3..5ab05d56c 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go @@ -2,6 +2,7 @@ package session import ( "fmt" + "strings" "time" "github.com/aws/aws-sdk-go/aws/awserr" @@ -25,6 +26,12 @@ const ( roleSessionNameKey = `role_session_name` // optional roleDurationSecondsKey = "duration_seconds" // optional + // AWS Single Sign-On (AWS SSO) group + ssoAccountIDKey = "sso_account_id" + ssoRegionKey = "sso_region" + ssoRoleNameKey = "sso_role_name" + ssoStartURL = "sso_start_url" + // CSM options csmEnabledKey = `csm_enabled` csmHostKey = `csm_host` @@ -78,6 +85,11 @@ type sharedConfig struct { CredentialProcess string WebIdentityTokenFile string + SSOAccountID string + SSORegion string + SSORoleName string + SSOStartURL string + RoleARN string RoleSessionName string ExternalID string @@ -217,9 +229,9 @@ func (cfg *sharedConfig) setFromIniFiles(profiles map[string]struct{}, profile s cfg.clearAssumeRoleOptions() } else { // First time a profile has been seen, It must either be a assume role - // or credentials. Assert if the credential type requires a role ARN, - // the ARN is also set. - if err := cfg.validateCredentialsRequireARN(profile); err != nil { + // credentials, or SSO. Assert if the credential type requires a role ARN, + // the ARN is also set, or validate that the SSO configuration is complete. + if err := cfg.validateCredentialsConfig(profile); err != nil { return err } } @@ -312,6 +324,12 @@ func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile, e } cfg.S3UsEast1RegionalEndpoint = sre } + + // AWS Single Sign-On (AWS SSO) + updateString(&cfg.SSOAccountID, section, ssoAccountIDKey) + updateString(&cfg.SSORegion, section, ssoRegionKey) + updateString(&cfg.SSORoleName, section, ssoRoleNameKey) + updateString(&cfg.SSOStartURL, section, ssoStartURL) } updateString(&cfg.CredentialProcess, section, credentialProcessKey) @@ -342,6 +360,18 @@ func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile, e return nil } +func (cfg *sharedConfig) validateCredentialsConfig(profile string) error { + if err := cfg.validateCredentialsRequireARN(profile); err != nil { + return err + } + + if err := cfg.validateSSOConfiguration(profile); err != nil { + return err + } + + return nil +} + func (cfg *sharedConfig) validateCredentialsRequireARN(profile string) error { var credSource string @@ -371,6 +401,7 @@ func (cfg *sharedConfig) validateCredentialType() error { len(cfg.CredentialSource) != 0, len(cfg.CredentialProcess) != 0, len(cfg.WebIdentityTokenFile) != 0, + cfg.hasSSOConfiguration(), ) { return ErrSharedConfigSourceCollision } @@ -378,12 +409,43 @@ func (cfg *sharedConfig) validateCredentialType() error { return nil } +func (cfg *sharedConfig) validateSSOConfiguration(profile string) error { + if !cfg.hasSSOConfiguration() { + return nil + } + + var missing []string + if len(cfg.SSOAccountID) == 0 { + missing = append(missing, ssoAccountIDKey) + } + + if len(cfg.SSORegion) == 0 { + missing = append(missing, ssoRegionKey) + } + + if len(cfg.SSORoleName) == 0 { + missing = append(missing, ssoRoleNameKey) + } + + if len(cfg.SSOStartURL) == 0 { + missing = append(missing, ssoStartURL) + } + + if len(missing) > 0 { + return fmt.Errorf("profile %q is configured to use SSO but is missing required configuration: %s", + profile, strings.Join(missing, ", ")) + } + + return nil +} + func (cfg *sharedConfig) hasCredentials() bool { switch { case len(cfg.SourceProfileName) != 0: case len(cfg.CredentialSource) != 0: case len(cfg.CredentialProcess) != 0: case len(cfg.WebIdentityTokenFile) != 0: + case cfg.hasSSOConfiguration(): case cfg.Creds.HasKeys(): default: return false @@ -407,6 +469,18 @@ func (cfg *sharedConfig) clearAssumeRoleOptions() { cfg.SourceProfileName = "" } +func (cfg *sharedConfig) hasSSOConfiguration() bool { + switch { + case len(cfg.SSOAccountID) != 0: + case len(cfg.SSORegion) != 0: + case len(cfg.SSORoleName) != 0: + case len(cfg.SSOStartURL) != 0: + default: + return false + } + return true +} + func oneOrNone(bs ...bool) bool { var count int diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index 28792c952..9514f5189 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.36.25" +const SDKVersion = "1.37.1" diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go index d7d42db0a..1f1d27aea 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go @@ -1,9 +1,10 @@ package protocol import ( - "strings" - "github.com/aws/aws-sdk-go/aws/request" + "net" + "strconv" + "strings" ) // ValidateEndpointHostHandler is a request handler that will validate the @@ -22,8 +23,26 @@ var ValidateEndpointHostHandler = request.NamedHandler{ // 3986 host. Returns error if the host is not valid. func ValidateEndpointHost(opName, host string) error { paramErrs := request.ErrInvalidParams{Context: opName} - labels := strings.Split(host, ".") + var hostname string + var port string + var err error + + if strings.Contains(host, ":") { + hostname, port, err = net.SplitHostPort(host) + + if err != nil { + paramErrs.Add(request.NewErrParamFormat("endpoint", err.Error(), host)) + } + + if !ValidPortNumber(port) { + paramErrs.Add(request.NewErrParamFormat("endpoint port number", "[0-65535]", port)) + } + } else { + hostname = host + } + + labels := strings.Split(hostname, ".") for i, label := range labels { if i == len(labels)-1 && len(label) == 0 { // Allow trailing dot for FQDN hosts. @@ -36,7 +55,11 @@ func ValidateEndpointHost(opName, host string) error { } } - if len(host) > 255 { + if len(hostname) == 0 { + paramErrs.Add(request.NewErrParamMinLen("endpoint host", 1)) + } + + if len(hostname) > 255 { paramErrs.Add(request.NewErrParamMaxLen( "endpoint host", 255, host, )) @@ -66,3 +89,16 @@ func ValidHostLabel(label string) bool { return true } + +// ValidPortNumber return if the port is valid RFC 3986 port +func ValidPortNumber(port string) bool { + i, err := strconv.Atoi(port) + if err != nil { + return false + } + + if i < 0 || i > 65535 { + return false + } + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go new file mode 100644 index 000000000..a029217e4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go @@ -0,0 +1,88 @@ +// Package jsonrpc provides JSON RPC utilities for serialization of AWS +// requests and responses. +package jsonrpc + +//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/input/json.json build_test.go +//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/output/json.json unmarshal_test.go + +import ( + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil" + "github.com/aws/aws-sdk-go/private/protocol/rest" +) + +var emptyJSON = []byte("{}") + +// BuildHandler is a named request handler for building jsonrpc protocol +// requests +var BuildHandler = request.NamedHandler{ + Name: "awssdk.jsonrpc.Build", + Fn: Build, +} + +// UnmarshalHandler is a named request handler for unmarshaling jsonrpc +// protocol requests +var UnmarshalHandler = request.NamedHandler{ + Name: "awssdk.jsonrpc.Unmarshal", + Fn: Unmarshal, +} + +// UnmarshalMetaHandler is a named request handler for unmarshaling jsonrpc +// protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{ + Name: "awssdk.jsonrpc.UnmarshalMeta", + Fn: UnmarshalMeta, +} + +// Build builds a JSON payload for a JSON RPC request. +func Build(req *request.Request) { + var buf []byte + var err error + if req.ParamsFilled() { + buf, err = jsonutil.BuildJSON(req.Params) + if err != nil { + req.Error = awserr.New(request.ErrCodeSerialization, "failed encoding JSON RPC request", err) + return + } + } else { + buf = emptyJSON + } + + if req.ClientInfo.TargetPrefix != "" || string(buf) != "{}" { + req.SetBufferBody(buf) + } + + if req.ClientInfo.TargetPrefix != "" { + target := req.ClientInfo.TargetPrefix + "." + req.Operation.Name + req.HTTPRequest.Header.Add("X-Amz-Target", target) + } + + // Only set the content type if one is not already specified and an + // JSONVersion is specified. + if ct, v := req.HTTPRequest.Header.Get("Content-Type"), req.ClientInfo.JSONVersion; len(ct) == 0 && len(v) != 0 { + jsonVersion := req.ClientInfo.JSONVersion + req.HTTPRequest.Header.Set("Content-Type", "application/x-amz-json-"+jsonVersion) + } +} + +// Unmarshal unmarshals a response for a JSON RPC service. +func Unmarshal(req *request.Request) { + defer req.HTTPResponse.Body.Close() + if req.DataFilled() { + err := jsonutil.UnmarshalJSON(req.Data, req.HTTPResponse.Body) + if err != nil { + req.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, "failed decoding JSON RPC response", err), + req.HTTPResponse.StatusCode, + req.RequestID, + ) + } + } + return +} + +// UnmarshalMeta unmarshals headers from a response for a JSON RPC service. +func UnmarshalMeta(req *request.Request) { + rest.UnmarshalMeta(req) +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/unmarshal_error.go new file mode 100644 index 000000000..c0c52e2db --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/unmarshal_error.go @@ -0,0 +1,107 @@ +package jsonrpc + +import ( + "bytes" + "io" + "io/ioutil" + "net/http" + "strings" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil" +) + +// UnmarshalTypedError provides unmarshaling errors API response errors +// for both typed and untyped errors. +type UnmarshalTypedError struct { + exceptions map[string]func(protocol.ResponseMetadata) error +} + +// NewUnmarshalTypedError returns an UnmarshalTypedError initialized for the +// set of exception names to the error unmarshalers +func NewUnmarshalTypedError(exceptions map[string]func(protocol.ResponseMetadata) error) *UnmarshalTypedError { + return &UnmarshalTypedError{ + exceptions: exceptions, + } +} + +// UnmarshalError attempts to unmarshal the HTTP response error as a known +// error type. If unable to unmarshal the error type, the generic SDK error +// type will be used. +func (u *UnmarshalTypedError) UnmarshalError( + resp *http.Response, + respMeta protocol.ResponseMetadata, +) (error, error) { + + var buf bytes.Buffer + var jsonErr jsonErrorResponse + teeReader := io.TeeReader(resp.Body, &buf) + err := jsonutil.UnmarshalJSONError(&jsonErr, teeReader) + if err != nil { + return nil, err + } + body := ioutil.NopCloser(&buf) + + // Code may be separated by hash(#), with the last element being the code + // used by the SDK. + codeParts := strings.SplitN(jsonErr.Code, "#", 2) + code := codeParts[len(codeParts)-1] + msg := jsonErr.Message + + if fn, ok := u.exceptions[code]; ok { + // If exception code is know, use associated constructor to get a value + // for the exception that the JSON body can be unmarshaled into. + v := fn(respMeta) + err := jsonutil.UnmarshalJSONCaseInsensitive(v, body) + if err != nil { + return nil, err + } + + return v, nil + } + + // fallback to unmodeled generic exceptions + return awserr.NewRequestFailure( + awserr.New(code, msg, nil), + respMeta.StatusCode, + respMeta.RequestID, + ), nil +} + +// UnmarshalErrorHandler is a named request handler for unmarshaling jsonrpc +// protocol request errors +var UnmarshalErrorHandler = request.NamedHandler{ + Name: "awssdk.jsonrpc.UnmarshalError", + Fn: UnmarshalError, +} + +// UnmarshalError unmarshals an error response for a JSON RPC service. +func UnmarshalError(req *request.Request) { + defer req.HTTPResponse.Body.Close() + + var jsonErr jsonErrorResponse + err := jsonutil.UnmarshalJSONError(&jsonErr, req.HTTPResponse.Body) + if err != nil { + req.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, + "failed to unmarshal error message", err), + req.HTTPResponse.StatusCode, + req.RequestID, + ) + return + } + + codes := strings.SplitN(jsonErr.Code, "#", 2) + req.Error = awserr.NewRequestFailure( + awserr.New(codes[len(codes)-1], jsonErr.Message, nil), + req.HTTPResponse.StatusCode, + req.RequestID, + ) +} + +type jsonErrorResponse struct { + Code string `json:"__type"` + Message string `json:"message"` +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/restjson.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/restjson.go new file mode 100644 index 000000000..2e0e205af --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/restjson.go @@ -0,0 +1,59 @@ +// Package restjson provides RESTful JSON serialization of AWS +// requests and responses. +package restjson + +//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/input/rest-json.json build_test.go +//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/output/rest-json.json unmarshal_test.go + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" + "github.com/aws/aws-sdk-go/private/protocol/rest" +) + +// BuildHandler is a named request handler for building restjson protocol +// requests +var BuildHandler = request.NamedHandler{ + Name: "awssdk.restjson.Build", + Fn: Build, +} + +// UnmarshalHandler is a named request handler for unmarshaling restjson +// protocol requests +var UnmarshalHandler = request.NamedHandler{ + Name: "awssdk.restjson.Unmarshal", + Fn: Unmarshal, +} + +// UnmarshalMetaHandler is a named request handler for unmarshaling restjson +// protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{ + Name: "awssdk.restjson.UnmarshalMeta", + Fn: UnmarshalMeta, +} + +// Build builds a request for the REST JSON protocol. +func Build(r *request.Request) { + rest.Build(r) + + if t := rest.PayloadType(r.Params); t == "structure" || t == "" { + if v := r.HTTPRequest.Header.Get("Content-Type"); len(v) == 0 { + r.HTTPRequest.Header.Set("Content-Type", "application/json") + } + jsonrpc.Build(r) + } +} + +// Unmarshal unmarshals a response body for the REST JSON protocol. +func Unmarshal(r *request.Request) { + if t := rest.PayloadType(r.Data); t == "structure" || t == "" { + jsonrpc.Unmarshal(r) + } else { + rest.Unmarshal(r) + } +} + +// UnmarshalMeta unmarshals response headers for the REST JSON protocol. +func UnmarshalMeta(r *request.Request) { + rest.UnmarshalMeta(r) +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/unmarshal_error.go new file mode 100644 index 000000000..d756d8cc5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/unmarshal_error.go @@ -0,0 +1,134 @@ +package restjson + +import ( + "bytes" + "io" + "io/ioutil" + "net/http" + "strings" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil" + "github.com/aws/aws-sdk-go/private/protocol/rest" +) + +const ( + errorTypeHeader = "X-Amzn-Errortype" + errorMessageHeader = "X-Amzn-Errormessage" +) + +// UnmarshalTypedError provides unmarshaling errors API response errors +// for both typed and untyped errors. +type UnmarshalTypedError struct { + exceptions map[string]func(protocol.ResponseMetadata) error +} + +// NewUnmarshalTypedError returns an UnmarshalTypedError initialized for the +// set of exception names to the error unmarshalers +func NewUnmarshalTypedError(exceptions map[string]func(protocol.ResponseMetadata) error) *UnmarshalTypedError { + return &UnmarshalTypedError{ + exceptions: exceptions, + } +} + +// UnmarshalError attempts to unmarshal the HTTP response error as a known +// error type. If unable to unmarshal the error type, the generic SDK error +// type will be used. +func (u *UnmarshalTypedError) UnmarshalError( + resp *http.Response, + respMeta protocol.ResponseMetadata, +) (error, error) { + + code := resp.Header.Get(errorTypeHeader) + msg := resp.Header.Get(errorMessageHeader) + + body := resp.Body + if len(code) == 0 { + // If unable to get code from HTTP headers have to parse JSON message + // to determine what kind of exception this will be. + var buf bytes.Buffer + var jsonErr jsonErrorResponse + teeReader := io.TeeReader(resp.Body, &buf) + err := jsonutil.UnmarshalJSONError(&jsonErr, teeReader) + if err != nil { + return nil, err + } + + body = ioutil.NopCloser(&buf) + code = jsonErr.Code + msg = jsonErr.Message + } + + // If code has colon separators remove them so can compare against modeled + // exception names. + code = strings.SplitN(code, ":", 2)[0] + + if fn, ok := u.exceptions[code]; ok { + // If exception code is know, use associated constructor to get a value + // for the exception that the JSON body can be unmarshaled into. + v := fn(respMeta) + if err := jsonutil.UnmarshalJSONCaseInsensitive(v, body); err != nil { + return nil, err + } + + if err := rest.UnmarshalResponse(resp, v, true); err != nil { + return nil, err + } + + return v, nil + } + + // fallback to unmodeled generic exceptions + return awserr.NewRequestFailure( + awserr.New(code, msg, nil), + respMeta.StatusCode, + respMeta.RequestID, + ), nil +} + +// UnmarshalErrorHandler is a named request handler for unmarshaling restjson +// protocol request errors +var UnmarshalErrorHandler = request.NamedHandler{ + Name: "awssdk.restjson.UnmarshalError", + Fn: UnmarshalError, +} + +// UnmarshalError unmarshals a response error for the REST JSON protocol. +func UnmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + var jsonErr jsonErrorResponse + err := jsonutil.UnmarshalJSONError(&jsonErr, r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, + "failed to unmarshal response error", err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + + code := r.HTTPResponse.Header.Get(errorTypeHeader) + if code == "" { + code = jsonErr.Code + } + msg := r.HTTPResponse.Header.Get(errorMessageHeader) + if msg == "" { + msg = jsonErr.Message + } + + code = strings.SplitN(code, ":", 2)[0] + r.Error = awserr.NewRequestFailure( + awserr.New(code, jsonErr.Message, nil), + r.HTTPResponse.StatusCode, + r.RequestID, + ) +} + +type jsonErrorResponse struct { + Code string `json:"code"` + Message string `json:"message"` +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go index 8770d4041..7dba8347b 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go @@ -109,6 +109,9 @@ type UploadOutput struct { // The ID for a multipart upload to S3. In the case of an error the error // can be cast to the MultiUploadFailure interface to extract the upload ID. UploadID string + + // Entity tag of the object. + ETag *string } // WithUploaderRequestOptions appends to the Uploader's API request options. @@ -527,6 +530,7 @@ func (u *uploader) singlePart(r io.ReadSeeker, cleanup func()) (*UploadOutput, e return &UploadOutput{ Location: url, VersionID: out.VersionId, + ETag: out.ETag, }, nil } @@ -632,6 +636,7 @@ func (u *multiuploader) upload(firstBuf io.ReadSeeker, cleanup func()) (*UploadO Location: uploadLocation, VersionID: complete.VersionId, UploadID: u.uploadID, + ETag: complete.ETag, }, nil } diff --git a/vendor/github.com/aws/aws-sdk-go/service/sso/api.go b/vendor/github.com/aws/aws-sdk-go/service/sso/api.go new file mode 100644 index 000000000..4498f285e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sso/api.go @@ -0,0 +1,1210 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sso + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/restjson" +) + +const opGetRoleCredentials = "GetRoleCredentials" + +// GetRoleCredentialsRequest generates a "aws/request.Request" representing the +// client's request for the GetRoleCredentials operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetRoleCredentials for more information on using the GetRoleCredentials +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetRoleCredentialsRequest method. +// req, resp := client.GetRoleCredentialsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/GetRoleCredentials +func (c *SSO) GetRoleCredentialsRequest(input *GetRoleCredentialsInput) (req *request.Request, output *GetRoleCredentialsOutput) { + op := &request.Operation{ + Name: opGetRoleCredentials, + HTTPMethod: "GET", + HTTPPath: "/federation/credentials", + } + + if input == nil { + input = &GetRoleCredentialsInput{} + } + + output = &GetRoleCredentialsOutput{} + req = c.newRequest(op, input, output) + req.Config.Credentials = credentials.AnonymousCredentials + return +} + +// GetRoleCredentials API operation for AWS Single Sign-On. +// +// Returns the STS short-term credentials for a given role name that is assigned +// to the user. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Single Sign-On's +// API operation GetRoleCredentials for usage and error information. +// +// Returned Error Types: +// * InvalidRequestException +// Indicates that a problem occurred with the input to the request. For example, +// a required parameter might be missing or out of range. +// +// * UnauthorizedException +// Indicates that the request is not authorized. This can happen due to an invalid +// access token in the request. +// +// * TooManyRequestsException +// Indicates that the request is being made too frequently and is more than +// what the server can handle. +// +// * ResourceNotFoundException +// The specified resource doesn't exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/GetRoleCredentials +func (c *SSO) GetRoleCredentials(input *GetRoleCredentialsInput) (*GetRoleCredentialsOutput, error) { + req, out := c.GetRoleCredentialsRequest(input) + return out, req.Send() +} + +// GetRoleCredentialsWithContext is the same as GetRoleCredentials with the addition of +// the ability to pass a context and additional request options. +// +// See GetRoleCredentials for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSO) GetRoleCredentialsWithContext(ctx aws.Context, input *GetRoleCredentialsInput, opts ...request.Option) (*GetRoleCredentialsOutput, error) { + req, out := c.GetRoleCredentialsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListAccountRoles = "ListAccountRoles" + +// ListAccountRolesRequest generates a "aws/request.Request" representing the +// client's request for the ListAccountRoles operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListAccountRoles for more information on using the ListAccountRoles +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListAccountRolesRequest method. +// req, resp := client.ListAccountRolesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/ListAccountRoles +func (c *SSO) ListAccountRolesRequest(input *ListAccountRolesInput) (req *request.Request, output *ListAccountRolesOutput) { + op := &request.Operation{ + Name: opListAccountRoles, + HTTPMethod: "GET", + HTTPPath: "/assignment/roles", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListAccountRolesInput{} + } + + output = &ListAccountRolesOutput{} + req = c.newRequest(op, input, output) + req.Config.Credentials = credentials.AnonymousCredentials + return +} + +// ListAccountRoles API operation for AWS Single Sign-On. +// +// Lists all roles that are assigned to the user for a given AWS account. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Single Sign-On's +// API operation ListAccountRoles for usage and error information. +// +// Returned Error Types: +// * InvalidRequestException +// Indicates that a problem occurred with the input to the request. For example, +// a required parameter might be missing or out of range. +// +// * UnauthorizedException +// Indicates that the request is not authorized. This can happen due to an invalid +// access token in the request. +// +// * TooManyRequestsException +// Indicates that the request is being made too frequently and is more than +// what the server can handle. +// +// * ResourceNotFoundException +// The specified resource doesn't exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/ListAccountRoles +func (c *SSO) ListAccountRoles(input *ListAccountRolesInput) (*ListAccountRolesOutput, error) { + req, out := c.ListAccountRolesRequest(input) + return out, req.Send() +} + +// ListAccountRolesWithContext is the same as ListAccountRoles with the addition of +// the ability to pass a context and additional request options. +// +// See ListAccountRoles for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSO) ListAccountRolesWithContext(ctx aws.Context, input *ListAccountRolesInput, opts ...request.Option) (*ListAccountRolesOutput, error) { + req, out := c.ListAccountRolesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListAccountRolesPages iterates over the pages of a ListAccountRoles operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListAccountRoles method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListAccountRoles operation. +// pageNum := 0 +// err := client.ListAccountRolesPages(params, +// func(page *sso.ListAccountRolesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SSO) ListAccountRolesPages(input *ListAccountRolesInput, fn func(*ListAccountRolesOutput, bool) bool) error { + return c.ListAccountRolesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListAccountRolesPagesWithContext same as ListAccountRolesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSO) ListAccountRolesPagesWithContext(ctx aws.Context, input *ListAccountRolesInput, fn func(*ListAccountRolesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListAccountRolesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListAccountRolesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListAccountRolesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListAccounts = "ListAccounts" + +// ListAccountsRequest generates a "aws/request.Request" representing the +// client's request for the ListAccounts operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListAccounts for more information on using the ListAccounts +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListAccountsRequest method. +// req, resp := client.ListAccountsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/ListAccounts +func (c *SSO) ListAccountsRequest(input *ListAccountsInput) (req *request.Request, output *ListAccountsOutput) { + op := &request.Operation{ + Name: opListAccounts, + HTTPMethod: "GET", + HTTPPath: "/assignment/accounts", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListAccountsInput{} + } + + output = &ListAccountsOutput{} + req = c.newRequest(op, input, output) + req.Config.Credentials = credentials.AnonymousCredentials + return +} + +// ListAccounts API operation for AWS Single Sign-On. +// +// Lists all AWS accounts assigned to the user. These AWS accounts are assigned +// by the administrator of the account. For more information, see Assign User +// Access (https://docs.aws.amazon.com/singlesignon/latest/userguide/useraccess.html#assignusers) +// in the AWS SSO User Guide. This operation returns a paginated response. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Single Sign-On's +// API operation ListAccounts for usage and error information. +// +// Returned Error Types: +// * InvalidRequestException +// Indicates that a problem occurred with the input to the request. For example, +// a required parameter might be missing or out of range. +// +// * UnauthorizedException +// Indicates that the request is not authorized. This can happen due to an invalid +// access token in the request. +// +// * TooManyRequestsException +// Indicates that the request is being made too frequently and is more than +// what the server can handle. +// +// * ResourceNotFoundException +// The specified resource doesn't exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/ListAccounts +func (c *SSO) ListAccounts(input *ListAccountsInput) (*ListAccountsOutput, error) { + req, out := c.ListAccountsRequest(input) + return out, req.Send() +} + +// ListAccountsWithContext is the same as ListAccounts with the addition of +// the ability to pass a context and additional request options. +// +// See ListAccounts for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSO) ListAccountsWithContext(ctx aws.Context, input *ListAccountsInput, opts ...request.Option) (*ListAccountsOutput, error) { + req, out := c.ListAccountsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListAccountsPages iterates over the pages of a ListAccounts operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListAccounts method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListAccounts operation. +// pageNum := 0 +// err := client.ListAccountsPages(params, +// func(page *sso.ListAccountsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SSO) ListAccountsPages(input *ListAccountsInput, fn func(*ListAccountsOutput, bool) bool) error { + return c.ListAccountsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListAccountsPagesWithContext same as ListAccountsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSO) ListAccountsPagesWithContext(ctx aws.Context, input *ListAccountsInput, fn func(*ListAccountsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListAccountsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListAccountsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListAccountsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opLogout = "Logout" + +// LogoutRequest generates a "aws/request.Request" representing the +// client's request for the Logout operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See Logout for more information on using the Logout +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the LogoutRequest method. +// req, resp := client.LogoutRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/Logout +func (c *SSO) LogoutRequest(input *LogoutInput) (req *request.Request, output *LogoutOutput) { + op := &request.Operation{ + Name: opLogout, + HTTPMethod: "POST", + HTTPPath: "/logout", + } + + if input == nil { + input = &LogoutInput{} + } + + output = &LogoutOutput{} + req = c.newRequest(op, input, output) + req.Config.Credentials = credentials.AnonymousCredentials + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// Logout API operation for AWS Single Sign-On. +// +// Removes the client- and server-side session that is associated with the user. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Single Sign-On's +// API operation Logout for usage and error information. +// +// Returned Error Types: +// * InvalidRequestException +// Indicates that a problem occurred with the input to the request. For example, +// a required parameter might be missing or out of range. +// +// * UnauthorizedException +// Indicates that the request is not authorized. This can happen due to an invalid +// access token in the request. +// +// * TooManyRequestsException +// Indicates that the request is being made too frequently and is more than +// what the server can handle. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/Logout +func (c *SSO) Logout(input *LogoutInput) (*LogoutOutput, error) { + req, out := c.LogoutRequest(input) + return out, req.Send() +} + +// LogoutWithContext is the same as Logout with the addition of +// the ability to pass a context and additional request options. +// +// See Logout for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSO) LogoutWithContext(ctx aws.Context, input *LogoutInput, opts ...request.Option) (*LogoutOutput, error) { + req, out := c.LogoutRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// Provides information about your AWS account. +type AccountInfo struct { + _ struct{} `type:"structure"` + + // The identifier of the AWS account that is assigned to the user. + AccountId *string `locationName:"accountId" type:"string"` + + // The display name of the AWS account that is assigned to the user. + AccountName *string `locationName:"accountName" type:"string"` + + // The email address of the AWS account that is assigned to the user. + EmailAddress *string `locationName:"emailAddress" min:"1" type:"string"` +} + +// String returns the string representation +func (s AccountInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccountInfo) GoString() string { + return s.String() +} + +// SetAccountId sets the AccountId field's value. +func (s *AccountInfo) SetAccountId(v string) *AccountInfo { + s.AccountId = &v + return s +} + +// SetAccountName sets the AccountName field's value. +func (s *AccountInfo) SetAccountName(v string) *AccountInfo { + s.AccountName = &v + return s +} + +// SetEmailAddress sets the EmailAddress field's value. +func (s *AccountInfo) SetEmailAddress(v string) *AccountInfo { + s.EmailAddress = &v + return s +} + +type GetRoleCredentialsInput struct { + _ struct{} `type:"structure"` + + // The token issued by the CreateToken API call. For more information, see CreateToken + // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) + // in the AWS SSO OIDC API Reference Guide. + // + // AccessToken is a required field + AccessToken *string `location:"header" locationName:"x-amz-sso_bearer_token" type:"string" required:"true" sensitive:"true"` + + // The identifier for the AWS account that is assigned to the user. + // + // AccountId is a required field + AccountId *string `location:"querystring" locationName:"account_id" type:"string" required:"true"` + + // The friendly name of the role that is assigned to the user. + // + // RoleName is a required field + RoleName *string `location:"querystring" locationName:"role_name" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetRoleCredentialsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetRoleCredentialsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetRoleCredentialsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetRoleCredentialsInput"} + if s.AccessToken == nil { + invalidParams.Add(request.NewErrParamRequired("AccessToken")) + } + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) + } + if s.RoleName == nil { + invalidParams.Add(request.NewErrParamRequired("RoleName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessToken sets the AccessToken field's value. +func (s *GetRoleCredentialsInput) SetAccessToken(v string) *GetRoleCredentialsInput { + s.AccessToken = &v + return s +} + +// SetAccountId sets the AccountId field's value. +func (s *GetRoleCredentialsInput) SetAccountId(v string) *GetRoleCredentialsInput { + s.AccountId = &v + return s +} + +// SetRoleName sets the RoleName field's value. +func (s *GetRoleCredentialsInput) SetRoleName(v string) *GetRoleCredentialsInput { + s.RoleName = &v + return s +} + +type GetRoleCredentialsOutput struct { + _ struct{} `type:"structure"` + + // The credentials for the role that is assigned to the user. + RoleCredentials *RoleCredentials `locationName:"roleCredentials" type:"structure"` +} + +// String returns the string representation +func (s GetRoleCredentialsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetRoleCredentialsOutput) GoString() string { + return s.String() +} + +// SetRoleCredentials sets the RoleCredentials field's value. +func (s *GetRoleCredentialsOutput) SetRoleCredentials(v *RoleCredentials) *GetRoleCredentialsOutput { + s.RoleCredentials = v + return s +} + +// Indicates that a problem occurred with the input to the request. For example, +// a required parameter might be missing or out of range. +type InvalidRequestException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s InvalidRequestException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvalidRequestException) GoString() string { + return s.String() +} + +func newErrorInvalidRequestException(v protocol.ResponseMetadata) error { + return &InvalidRequestException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidRequestException) Code() string { + return "InvalidRequestException" +} + +// Message returns the exception's message. +func (s *InvalidRequestException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidRequestException) OrigErr() error { + return nil +} + +func (s *InvalidRequestException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidRequestException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidRequestException) RequestID() string { + return s.RespMetadata.RequestID +} + +type ListAccountRolesInput struct { + _ struct{} `type:"structure"` + + // The token issued by the CreateToken API call. For more information, see CreateToken + // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) + // in the AWS SSO OIDC API Reference Guide. + // + // AccessToken is a required field + AccessToken *string `location:"header" locationName:"x-amz-sso_bearer_token" type:"string" required:"true" sensitive:"true"` + + // The identifier for the AWS account that is assigned to the user. + // + // AccountId is a required field + AccountId *string `location:"querystring" locationName:"account_id" type:"string" required:"true"` + + // The number of items that clients can request per page. + MaxResults *int64 `location:"querystring" locationName:"max_result" min:"1" type:"integer"` + + // The page token from the previous response output when you request subsequent + // pages. + NextToken *string `location:"querystring" locationName:"next_token" type:"string"` +} + +// String returns the string representation +func (s ListAccountRolesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAccountRolesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListAccountRolesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListAccountRolesInput"} + if s.AccessToken == nil { + invalidParams.Add(request.NewErrParamRequired("AccessToken")) + } + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessToken sets the AccessToken field's value. +func (s *ListAccountRolesInput) SetAccessToken(v string) *ListAccountRolesInput { + s.AccessToken = &v + return s +} + +// SetAccountId sets the AccountId field's value. +func (s *ListAccountRolesInput) SetAccountId(v string) *ListAccountRolesInput { + s.AccountId = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListAccountRolesInput) SetMaxResults(v int64) *ListAccountRolesInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListAccountRolesInput) SetNextToken(v string) *ListAccountRolesInput { + s.NextToken = &v + return s +} + +type ListAccountRolesOutput struct { + _ struct{} `type:"structure"` + + // The page token client that is used to retrieve the list of accounts. + NextToken *string `locationName:"nextToken" type:"string"` + + // A paginated response with the list of roles and the next token if more results + // are available. + RoleList []*RoleInfo `locationName:"roleList" type:"list"` +} + +// String returns the string representation +func (s ListAccountRolesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAccountRolesOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *ListAccountRolesOutput) SetNextToken(v string) *ListAccountRolesOutput { + s.NextToken = &v + return s +} + +// SetRoleList sets the RoleList field's value. +func (s *ListAccountRolesOutput) SetRoleList(v []*RoleInfo) *ListAccountRolesOutput { + s.RoleList = v + return s +} + +type ListAccountsInput struct { + _ struct{} `type:"structure"` + + // The token issued by the CreateToken API call. For more information, see CreateToken + // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) + // in the AWS SSO OIDC API Reference Guide. + // + // AccessToken is a required field + AccessToken *string `location:"header" locationName:"x-amz-sso_bearer_token" type:"string" required:"true" sensitive:"true"` + + // This is the number of items clients can request per page. + MaxResults *int64 `location:"querystring" locationName:"max_result" min:"1" type:"integer"` + + // (Optional) When requesting subsequent pages, this is the page token from + // the previous response output. + NextToken *string `location:"querystring" locationName:"next_token" type:"string"` +} + +// String returns the string representation +func (s ListAccountsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAccountsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListAccountsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListAccountsInput"} + if s.AccessToken == nil { + invalidParams.Add(request.NewErrParamRequired("AccessToken")) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessToken sets the AccessToken field's value. +func (s *ListAccountsInput) SetAccessToken(v string) *ListAccountsInput { + s.AccessToken = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListAccountsInput) SetMaxResults(v int64) *ListAccountsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListAccountsInput) SetNextToken(v string) *ListAccountsInput { + s.NextToken = &v + return s +} + +type ListAccountsOutput struct { + _ struct{} `type:"structure"` + + // A paginated response with the list of account information and the next token + // if more results are available. + AccountList []*AccountInfo `locationName:"accountList" type:"list"` + + // The page token client that is used to retrieve the list of accounts. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListAccountsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAccountsOutput) GoString() string { + return s.String() +} + +// SetAccountList sets the AccountList field's value. +func (s *ListAccountsOutput) SetAccountList(v []*AccountInfo) *ListAccountsOutput { + s.AccountList = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListAccountsOutput) SetNextToken(v string) *ListAccountsOutput { + s.NextToken = &v + return s +} + +type LogoutInput struct { + _ struct{} `type:"structure"` + + // The token issued by the CreateToken API call. For more information, see CreateToken + // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) + // in the AWS SSO OIDC API Reference Guide. + // + // AccessToken is a required field + AccessToken *string `location:"header" locationName:"x-amz-sso_bearer_token" type:"string" required:"true" sensitive:"true"` +} + +// String returns the string representation +func (s LogoutInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LogoutInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LogoutInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LogoutInput"} + if s.AccessToken == nil { + invalidParams.Add(request.NewErrParamRequired("AccessToken")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessToken sets the AccessToken field's value. +func (s *LogoutInput) SetAccessToken(v string) *LogoutInput { + s.AccessToken = &v + return s +} + +type LogoutOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s LogoutOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LogoutOutput) GoString() string { + return s.String() +} + +// The specified resource doesn't exist. +type ResourceNotFoundException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s ResourceNotFoundException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResourceNotFoundException) GoString() string { + return s.String() +} + +func newErrorResourceNotFoundException(v protocol.ResponseMetadata) error { + return &ResourceNotFoundException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ResourceNotFoundException) Code() string { + return "ResourceNotFoundException" +} + +// Message returns the exception's message. +func (s *ResourceNotFoundException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ResourceNotFoundException) OrigErr() error { + return nil +} + +func (s *ResourceNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ResourceNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ResourceNotFoundException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Provides information about the role credentials that are assigned to the +// user. +type RoleCredentials struct { + _ struct{} `type:"structure"` + + // The identifier used for the temporary security credentials. For more information, + // see Using Temporary Security Credentials to Request Access to AWS Resources + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html) + // in the AWS IAM User Guide. + AccessKeyId *string `locationName:"accessKeyId" type:"string"` + + // The date on which temporary security credentials expire. + Expiration *int64 `locationName:"expiration" type:"long"` + + // The key that is used to sign the request. For more information, see Using + // Temporary Security Credentials to Request Access to AWS Resources (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html) + // in the AWS IAM User Guide. + SecretAccessKey *string `locationName:"secretAccessKey" type:"string" sensitive:"true"` + + // The token used for temporary credentials. For more information, see Using + // Temporary Security Credentials to Request Access to AWS Resources (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html) + // in the AWS IAM User Guide. + SessionToken *string `locationName:"sessionToken" type:"string" sensitive:"true"` +} + +// String returns the string representation +func (s RoleCredentials) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RoleCredentials) GoString() string { + return s.String() +} + +// SetAccessKeyId sets the AccessKeyId field's value. +func (s *RoleCredentials) SetAccessKeyId(v string) *RoleCredentials { + s.AccessKeyId = &v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *RoleCredentials) SetExpiration(v int64) *RoleCredentials { + s.Expiration = &v + return s +} + +// SetSecretAccessKey sets the SecretAccessKey field's value. +func (s *RoleCredentials) SetSecretAccessKey(v string) *RoleCredentials { + s.SecretAccessKey = &v + return s +} + +// SetSessionToken sets the SessionToken field's value. +func (s *RoleCredentials) SetSessionToken(v string) *RoleCredentials { + s.SessionToken = &v + return s +} + +// Provides information about the role that is assigned to the user. +type RoleInfo struct { + _ struct{} `type:"structure"` + + // The identifier of the AWS account assigned to the user. + AccountId *string `locationName:"accountId" type:"string"` + + // The friendly name of the role that is assigned to the user. + RoleName *string `locationName:"roleName" type:"string"` +} + +// String returns the string representation +func (s RoleInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RoleInfo) GoString() string { + return s.String() +} + +// SetAccountId sets the AccountId field's value. +func (s *RoleInfo) SetAccountId(v string) *RoleInfo { + s.AccountId = &v + return s +} + +// SetRoleName sets the RoleName field's value. +func (s *RoleInfo) SetRoleName(v string) *RoleInfo { + s.RoleName = &v + return s +} + +// Indicates that the request is being made too frequently and is more than +// what the server can handle. +type TooManyRequestsException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s TooManyRequestsException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TooManyRequestsException) GoString() string { + return s.String() +} + +func newErrorTooManyRequestsException(v protocol.ResponseMetadata) error { + return &TooManyRequestsException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *TooManyRequestsException) Code() string { + return "TooManyRequestsException" +} + +// Message returns the exception's message. +func (s *TooManyRequestsException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *TooManyRequestsException) OrigErr() error { + return nil +} + +func (s *TooManyRequestsException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *TooManyRequestsException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *TooManyRequestsException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Indicates that the request is not authorized. This can happen due to an invalid +// access token in the request. +type UnauthorizedException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s UnauthorizedException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnauthorizedException) GoString() string { + return s.String() +} + +func newErrorUnauthorizedException(v protocol.ResponseMetadata) error { + return &UnauthorizedException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *UnauthorizedException) Code() string { + return "UnauthorizedException" +} + +// Message returns the exception's message. +func (s *UnauthorizedException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *UnauthorizedException) OrigErr() error { + return nil +} + +func (s *UnauthorizedException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *UnauthorizedException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *UnauthorizedException) RequestID() string { + return s.RespMetadata.RequestID +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sso/doc.go b/vendor/github.com/aws/aws-sdk-go/service/sso/doc.go new file mode 100644 index 000000000..92d82b2af --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sso/doc.go @@ -0,0 +1,44 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package sso provides the client and types for making API +// requests to AWS Single Sign-On. +// +// AWS Single Sign-On Portal is a web service that makes it easy for you to +// assign user access to AWS SSO resources such as the user portal. Users can +// get AWS account applications and roles assigned to them and get federated +// into the application. +// +// For general information about AWS SSO, see What is AWS Single Sign-On? (https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html) +// in the AWS SSO User Guide. +// +// This API reference guide describes the AWS SSO Portal operations that you +// can call programatically and includes detailed information on data types +// and errors. +// +// AWS provides SDKs that consist of libraries and sample code for various programming +// languages and platforms, such as Java, Ruby, .Net, iOS, or Android. The SDKs +// provide a convenient way to create programmatic access to AWS SSO and other +// AWS services. For more information about the AWS SDKs, including how to download +// and install them, see Tools for Amazon Web Services (http://aws.amazon.com/tools/). +// +// See https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10 for more information on this service. +// +// See sso package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/sso/ +// +// Using the Client +// +// To contact AWS Single Sign-On with the SDK use the New function to create +// a new service client. With that client you can make API requests to the service. +// These clients are safe to use concurrently. +// +// See the SDK's documentation for more information on how to use the SDK. +// https://docs.aws.amazon.com/sdk-for-go/api/ +// +// See aws.Config documentation for more information on configuring SDK clients. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config +// +// See the AWS Single Sign-On client SSO for more +// information on creating client for this service. +// https://docs.aws.amazon.com/sdk-for-go/api/service/sso/#New +package sso diff --git a/vendor/github.com/aws/aws-sdk-go/service/sso/errors.go b/vendor/github.com/aws/aws-sdk-go/service/sso/errors.go new file mode 100644 index 000000000..77a6792e3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sso/errors.go @@ -0,0 +1,44 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sso + +import ( + "github.com/aws/aws-sdk-go/private/protocol" +) + +const ( + + // ErrCodeInvalidRequestException for service response error code + // "InvalidRequestException". + // + // Indicates that a problem occurred with the input to the request. For example, + // a required parameter might be missing or out of range. + ErrCodeInvalidRequestException = "InvalidRequestException" + + // ErrCodeResourceNotFoundException for service response error code + // "ResourceNotFoundException". + // + // The specified resource doesn't exist. + ErrCodeResourceNotFoundException = "ResourceNotFoundException" + + // ErrCodeTooManyRequestsException for service response error code + // "TooManyRequestsException". + // + // Indicates that the request is being made too frequently and is more than + // what the server can handle. + ErrCodeTooManyRequestsException = "TooManyRequestsException" + + // ErrCodeUnauthorizedException for service response error code + // "UnauthorizedException". + // + // Indicates that the request is not authorized. This can happen due to an invalid + // access token in the request. + ErrCodeUnauthorizedException = "UnauthorizedException" +) + +var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ + "InvalidRequestException": newErrorInvalidRequestException, + "ResourceNotFoundException": newErrorResourceNotFoundException, + "TooManyRequestsException": newErrorTooManyRequestsException, + "UnauthorizedException": newErrorUnauthorizedException, +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sso/service.go b/vendor/github.com/aws/aws-sdk-go/service/sso/service.go new file mode 100644 index 000000000..35175331f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sso/service.go @@ -0,0 +1,104 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sso + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/restjson" +) + +// SSO provides the API operation methods for making requests to +// AWS Single Sign-On. See this package's package overview docs +// for details on the service. +// +// SSO methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. +type SSO struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// Service information constants +const ( + ServiceName = "SSO" // Name of service. + EndpointsID = "portal.sso" // ID to lookup a service endpoint with. + ServiceID = "SSO" // ServiceID is a unique identifier of a specific service. +) + +// New creates a new instance of the SSO client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// mySession := session.Must(session.NewSession()) +// +// // Create a SSO client from just a session. +// svc := sso.New(mySession) +// +// // Create a SSO client with additional configuration +// svc := sso.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *SSO { + c := p.ClientConfig(EndpointsID, cfgs...) + if c.SigningNameDerived || len(c.SigningName) == 0 { + c.SigningName = "awsssoportal" + } + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *SSO { + svc := &SSO{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + ServiceID: ServiceID, + SigningName: signingName, + SigningRegion: signingRegion, + PartitionID: partitionID, + Endpoint: endpoint, + APIVersion: "2019-06-10", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed( + protocol.NewUnmarshalErrorHandler(restjson.NewUnmarshalTypedError(exceptionFromCode)).NamedHandler(), + ) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a SSO operation and runs any +// custom request initialization. +func (c *SSO) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sso/ssoiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/sso/ssoiface/interface.go new file mode 100644 index 000000000..4cac247c1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sso/ssoiface/interface.go @@ -0,0 +1,86 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package ssoiface provides an interface to enable mocking the AWS Single Sign-On service client +// for testing your code. +// +// It is important to note that this interface will have breaking changes +// when the service model is updated and adds new API operations, paginators, +// and waiters. +package ssoiface + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/sso" +) + +// SSOAPI provides an interface to enable mocking the +// sso.SSO service client's API operation, +// paginators, and waiters. This make unit testing your code that calls out +// to the SDK's service client's calls easier. +// +// The best way to use this interface is so the SDK's service client's calls +// can be stubbed out for unit testing your code with the SDK without needing +// to inject custom request handlers into the SDK's request pipeline. +// +// // myFunc uses an SDK service client to make a request to +// // AWS Single Sign-On. +// func myFunc(svc ssoiface.SSOAPI) bool { +// // Make svc.GetRoleCredentials request +// } +// +// func main() { +// sess := session.New() +// svc := sso.New(sess) +// +// myFunc(svc) +// } +// +// In your _test.go file: +// +// // Define a mock struct to be used in your unit tests of myFunc. +// type mockSSOClient struct { +// ssoiface.SSOAPI +// } +// func (m *mockSSOClient) GetRoleCredentials(input *sso.GetRoleCredentialsInput) (*sso.GetRoleCredentialsOutput, error) { +// // mock response/functionality +// } +// +// func TestMyFunc(t *testing.T) { +// // Setup Test +// mockSvc := &mockSSOClient{} +// +// myfunc(mockSvc) +// +// // Verify myFunc's functionality +// } +// +// It is important to note that this interface will have breaking changes +// when the service model is updated and adds new API operations, paginators, +// and waiters. Its suggested to use the pattern above for testing, or using +// tooling to generate mocks to satisfy the interfaces. +type SSOAPI interface { + GetRoleCredentials(*sso.GetRoleCredentialsInput) (*sso.GetRoleCredentialsOutput, error) + GetRoleCredentialsWithContext(aws.Context, *sso.GetRoleCredentialsInput, ...request.Option) (*sso.GetRoleCredentialsOutput, error) + GetRoleCredentialsRequest(*sso.GetRoleCredentialsInput) (*request.Request, *sso.GetRoleCredentialsOutput) + + ListAccountRoles(*sso.ListAccountRolesInput) (*sso.ListAccountRolesOutput, error) + ListAccountRolesWithContext(aws.Context, *sso.ListAccountRolesInput, ...request.Option) (*sso.ListAccountRolesOutput, error) + ListAccountRolesRequest(*sso.ListAccountRolesInput) (*request.Request, *sso.ListAccountRolesOutput) + + ListAccountRolesPages(*sso.ListAccountRolesInput, func(*sso.ListAccountRolesOutput, bool) bool) error + ListAccountRolesPagesWithContext(aws.Context, *sso.ListAccountRolesInput, func(*sso.ListAccountRolesOutput, bool) bool, ...request.Option) error + + ListAccounts(*sso.ListAccountsInput) (*sso.ListAccountsOutput, error) + ListAccountsWithContext(aws.Context, *sso.ListAccountsInput, ...request.Option) (*sso.ListAccountsOutput, error) + ListAccountsRequest(*sso.ListAccountsInput) (*request.Request, *sso.ListAccountsOutput) + + ListAccountsPages(*sso.ListAccountsInput, func(*sso.ListAccountsOutput, bool) bool) error + ListAccountsPagesWithContext(aws.Context, *sso.ListAccountsInput, func(*sso.ListAccountsOutput, bool) bool, ...request.Option) error + + Logout(*sso.LogoutInput) (*sso.LogoutOutput, error) + LogoutWithContext(aws.Context, *sso.LogoutInput, ...request.Option) (*sso.LogoutOutput, error) + LogoutRequest(*sso.LogoutInput) (*request.Request, *sso.LogoutOutput) +} + +var _ SSOAPI = (*sso.SSO)(nil) diff --git a/vendor/github.com/cheggaaa/pb/v3/LICENSE b/vendor/github.com/cheggaaa/pb/v3/LICENSE new file mode 100644 index 000000000..511970333 --- /dev/null +++ b/vendor/github.com/cheggaaa/pb/v3/LICENSE @@ -0,0 +1,12 @@ +Copyright (c) 2012-2015, Sergey Cherepanov +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + +* Neither the name of the author nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/github.com/cheggaaa/pb/v3/element.go b/vendor/github.com/cheggaaa/pb/v3/element.go new file mode 100644 index 000000000..965183fe7 --- /dev/null +++ b/vendor/github.com/cheggaaa/pb/v3/element.go @@ -0,0 +1,290 @@ +package pb + +import ( + "bytes" + "fmt" + "math" + "sync" + "time" +) + +const ( + adElPlaceholder = "%_ad_el_%" + adElPlaceholderLen = len(adElPlaceholder) +) + +var ( + defaultBarEls = [5]string{"[", "-", ">", "_", "]"} +) + +// Element is an interface for bar elements +type Element interface { + ProgressElement(state *State, args ...string) string +} + +// ElementFunc type implements Element interface and created for simplify elements +type ElementFunc func(state *State, args ...string) string + +// ProgressElement just call self func +func (e ElementFunc) ProgressElement(state *State, args ...string) string { + return e(state, args...) +} + +var elementsM sync.Mutex + +var elements = map[string]Element{ + "percent": ElementPercent, + "counters": ElementCounters, + "bar": adaptiveWrap(ElementBar), + "speed": ElementSpeed, + "rtime": ElementRemainingTime, + "etime": ElementElapsedTime, + "string": ElementString, + "cycle": ElementCycle, +} + +// RegisterElement give you a chance to use custom elements +func RegisterElement(name string, el Element, adaptive bool) { + if adaptive { + el = adaptiveWrap(el) + } + elementsM.Lock() + elements[name] = el + elementsM.Unlock() +} + +type argsHelper []string + +func (args argsHelper) getOr(n int, value string) string { + if len(args) > n { + return args[n] + } + return value +} + +func (args argsHelper) getNotEmptyOr(n int, value string) (v string) { + if v = args.getOr(n, value); v == "" { + return value + } + return +} + +func adaptiveWrap(el Element) Element { + return ElementFunc(func(state *State, args ...string) string { + state.recalc = append(state.recalc, ElementFunc(func(s *State, _ ...string) (result string) { + s.adaptive = true + result = el.ProgressElement(s, args...) + s.adaptive = false + return + })) + return adElPlaceholder + }) +} + +// ElementPercent shows current percent of progress. +// Optionally can take one or two string arguments. +// First string will be used as value for format float64, default is "%.02f%%". +// Second string will be used when percent can't be calculated, default is "?%" +// In template use as follows: {{percent .}} or {{percent . "%.03f%%"}} or {{percent . "%.03f%%" "?"}} +var ElementPercent ElementFunc = func(state *State, args ...string) string { + argsh := argsHelper(args) + if state.Total() > 0 { + return fmt.Sprintf( + argsh.getNotEmptyOr(0, "%.02f%%"), + float64(state.Value())/(float64(state.Total())/float64(100)), + ) + } + return argsh.getOr(1, "?%") +} + +// ElementCounters shows current and total values. +// Optionally can take one or two string arguments. +// First string will be used as format value when Total is present (>0). Default is "%s / %s" +// Second string will be used when total <= 0. Default is "%[1]s" +// In template use as follows: {{counters .}} or {{counters . "%s/%s"}} or {{counters . "%s/%s" "%s/?"}} +var ElementCounters ElementFunc = func(state *State, args ...string) string { + var f string + if state.Total() > 0 { + f = argsHelper(args).getNotEmptyOr(0, "%s / %s") + } else { + f = argsHelper(args).getNotEmptyOr(1, "%[1]s") + } + return fmt.Sprintf(f, state.Format(state.Value()), state.Format(state.Total())) +} + +type elementKey int + +const ( + barObj elementKey = iota + speedObj + cycleObj +) + +type bar struct { + eb [5][]byte // elements in bytes + cc [5]int // cell counts + buf *bytes.Buffer +} + +func (p *bar) write(state *State, eln, width int) int { + repeat := width / p.cc[eln] + for i := 0; i < repeat; i++ { + p.buf.Write(p.eb[eln]) + } + StripStringToBuffer(string(p.eb[eln]), width%p.cc[eln], p.buf) + return width +} + +func getProgressObj(state *State, args ...string) (p *bar) { + var ok bool + if p, ok = state.Get(barObj).(*bar); !ok { + p = &bar{ + buf: bytes.NewBuffer(nil), + } + state.Set(barObj, p) + } + argsH := argsHelper(args) + for i := range p.eb { + arg := argsH.getNotEmptyOr(i, defaultBarEls[i]) + if string(p.eb[i]) != arg { + p.cc[i] = CellCount(arg) + p.eb[i] = []byte(arg) + if p.cc[i] == 0 { + p.cc[i] = 1 + p.eb[i] = []byte(" ") + } + } + } + return +} + +// ElementBar make progress bar view [-->__] +// Optionally can take up to 5 string arguments. Defaults is "[", "-", ">", "_", "]" +// In template use as follows: {{bar . }} or {{bar . "<" "oOo" "|" "~" ">"}} +// Color args: {{bar . (red "[") (green "-") ... +var ElementBar ElementFunc = func(state *State, args ...string) string { + // init + var p = getProgressObj(state, args...) + + total, value := state.Total(), state.Value() + if total < 0 { + total = -total + } + if value < 0 { + value = -value + } + + // check for overflow + if total != 0 && value > total { + total = value + } + + p.buf.Reset() + + var widthLeft = state.AdaptiveElWidth() + if widthLeft <= 0 || !state.IsAdaptiveWidth() { + widthLeft = 30 + } + + // write left border + if p.cc[0] < widthLeft { + widthLeft -= p.write(state, 0, p.cc[0]) + } else { + p.write(state, 0, widthLeft) + return p.buf.String() + } + + // check right border size + if p.cc[4] < widthLeft { + // write later + widthLeft -= p.cc[4] + } else { + p.write(state, 4, widthLeft) + return p.buf.String() + } + + var curCount int + + if total > 0 { + // calculate count of currenct space + curCount = int(math.Ceil((float64(value) / float64(total)) * float64(widthLeft))) + } + + // write bar + if total == value && state.IsFinished() { + widthLeft -= p.write(state, 1, curCount) + } else if toWrite := curCount - p.cc[2]; toWrite > 0 { + widthLeft -= p.write(state, 1, toWrite) + widthLeft -= p.write(state, 2, p.cc[2]) + } else if curCount > 0 { + widthLeft -= p.write(state, 2, curCount) + } + if widthLeft > 0 { + widthLeft -= p.write(state, 3, widthLeft) + } + // write right border + p.write(state, 4, p.cc[4]) + // cut result and return string + return p.buf.String() +} + +// ElementRemainingTime calculates remaining time based on speed (EWMA) +// Optionally can take one or two string arguments. +// First string will be used as value for format time duration string, default is "%s". +// Second string will be used when bar finished and value indicates elapsed time, default is "%s" +// Third string will be used when value not available, default is "?" +// In template use as follows: {{rtime .}} or {{rtime . "%s remain"}} or {{rtime . "%s remain" "%s total" "???"}} +var ElementRemainingTime ElementFunc = func(state *State, args ...string) string { + var rts string + sp := getSpeedObj(state).value(state) + if !state.IsFinished() { + if sp > 0 { + remain := float64(state.Total() - state.Value()) + remainDur := time.Duration(remain/sp) * time.Second + rts = remainDur.String() + } else { + return argsHelper(args).getOr(2, "?") + } + } else { + rts = state.Time().Truncate(time.Second).Sub(state.StartTime().Truncate(time.Second)).String() + return fmt.Sprintf(argsHelper(args).getOr(1, "%s"), rts) + } + return fmt.Sprintf(argsHelper(args).getOr(0, "%s"), rts) +} + +// ElementElapsedTime shows elapsed time +// Optionally cat take one argument - it's format for time string. +// In template use as follows: {{etime .}} or {{etime . "%s elapsed"}} +var ElementElapsedTime ElementFunc = func(state *State, args ...string) string { + etm := state.Time().Truncate(time.Second).Sub(state.StartTime().Truncate(time.Second)) + return fmt.Sprintf(argsHelper(args).getOr(0, "%s"), etm.String()) +} + +// ElementString get value from bar by given key and print them +// bar.Set("myKey", "string to print") +// In template use as follows: {{string . "myKey"}} +var ElementString ElementFunc = func(state *State, args ...string) string { + if len(args) == 0 { + return "" + } + v := state.Get(args[0]) + if v == nil { + return "" + } + return fmt.Sprint(v) +} + +// ElementCycle return next argument for every call +// In template use as follows: {{cycle . "1" "2" "3"}} +// Or mix width other elements: {{ bar . "" "" (cycle . "↖" "↗" "↘" "↙" )}} +var ElementCycle ElementFunc = func(state *State, args ...string) string { + if len(args) == 0 { + return "" + } + n, _ := state.Get(cycleObj).(int) + if n >= len(args) { + n = 0 + } + state.Set(cycleObj, n+1) + return args[n] +} diff --git a/vendor/github.com/cheggaaa/pb/v3/go.mod b/vendor/github.com/cheggaaa/pb/v3/go.mod new file mode 100644 index 000000000..666c86bc6 --- /dev/null +++ b/vendor/github.com/cheggaaa/pb/v3/go.mod @@ -0,0 +1,11 @@ +module github.com/cheggaaa/pb/v3 + +require ( + github.com/VividCortex/ewma v1.1.1 + github.com/fatih/color v1.7.0 + github.com/mattn/go-colorable v0.1.2 + github.com/mattn/go-isatty v0.0.12 + github.com/mattn/go-runewidth v0.0.7 +) + +go 1.12 diff --git a/vendor/github.com/cheggaaa/pb/v3/go.sum b/vendor/github.com/cheggaaa/pb/v3/go.sum new file mode 100644 index 000000000..71cb18331 --- /dev/null +++ b/vendor/github.com/cheggaaa/pb/v3/go.sum @@ -0,0 +1,21 @@ +github.com/VividCortex/ewma v1.1.1 h1:MnEK4VOv6n0RSY4vtRe3h11qjxL3+t0B8yOL8iMXdcM= +github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmxzcbUokwA= +github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU= +github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-isatty v0.0.8 h1:HLtExJ+uU2HOZ+wI0Tt5DtUDrx8yhUqDcp7fYERX4CE= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.10 h1:qxFzApOv4WsAL965uUPIsXzAKCZxN2p9UqdhFS4ZW10= +github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= +github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-runewidth v0.0.7 h1:Ei8KR0497xHyKJPAv59M1dkC+rOZCMBJ+t3fZ+twI54= +github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223 h1:DH4skfRX4EBpamg7iV4ZlCpblAHI6s6TDM39bFZumv8= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191128015809-6d18c012aee9 h1:ZBzSG/7F4eNKz2L3GE9o300RX0Az1Bw5HF7PDraD+qU= +golang.org/x/sys v0.0.0-20191128015809-6d18c012aee9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42 h1:vEOn+mP2zCOVzKckCZy6YsCtDblrpj/w7B9nxGNELpg= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/vendor/github.com/cheggaaa/pb/v3/io.go b/vendor/github.com/cheggaaa/pb/v3/io.go new file mode 100644 index 000000000..6ad5abc24 --- /dev/null +++ b/vendor/github.com/cheggaaa/pb/v3/io.go @@ -0,0 +1,49 @@ +package pb + +import ( + "io" +) + +// Reader it's a wrapper for given reader, but with progress handle +type Reader struct { + io.Reader + bar *ProgressBar +} + +// Read reads bytes from wrapped reader and add amount of bytes to progress bar +func (r *Reader) Read(p []byte) (n int, err error) { + n, err = r.Reader.Read(p) + r.bar.Add(n) + return +} + +// Close the wrapped reader when it implements io.Closer +func (r *Reader) Close() (err error) { + r.bar.Finish() + if closer, ok := r.Reader.(io.Closer); ok { + return closer.Close() + } + return +} + +// Writer it's a wrapper for given writer, but with progress handle +type Writer struct { + io.Writer + bar *ProgressBar +} + +// Write writes bytes to wrapped writer and add amount of bytes to progress bar +func (r *Writer) Write(p []byte) (n int, err error) { + n, err = r.Writer.Write(p) + r.bar.Add(n) + return +} + +// Close the wrapped reader when it implements io.Closer +func (r *Writer) Close() (err error) { + r.bar.Finish() + if closer, ok := r.Writer.(io.Closer); ok { + return closer.Close() + } + return +} diff --git a/vendor/github.com/cheggaaa/pb/v3/pb.go b/vendor/github.com/cheggaaa/pb/v3/pb.go new file mode 100644 index 000000000..17f3750be --- /dev/null +++ b/vendor/github.com/cheggaaa/pb/v3/pb.go @@ -0,0 +1,566 @@ +package pb + +import ( + "bytes" + "fmt" + "io" + "os" + "strconv" + "strings" + "sync" + "sync/atomic" + "text/template" + "time" + + "github.com/mattn/go-colorable" + "github.com/mattn/go-isatty" + + "github.com/cheggaaa/pb/v3/termutil" +) + +// Version of ProgressBar library +const Version = "3.0.5" + +type key int + +const ( + // Bytes means we're working with byte sizes. Numbers will print as Kb, Mb, etc + // bar.Set(pb.Bytes, true) + Bytes key = 1 << iota + + // Use SI bytes prefix names (kB, MB, etc) instead of IEC prefix names (KiB, MiB, etc) + SIBytesPrefix + + // Terminal means we're will print to terminal and can use ascii sequences + // Also we're will try to use terminal width + Terminal + + // Static means progress bar will not update automaticly + Static + + // ReturnSymbol - by default in terminal mode it's '\r' + ReturnSymbol + + // Color by default is true when output is tty, but you can set to false for disabling colors + Color +) + +const ( + defaultBarWidth = 100 + defaultRefreshRate = time.Millisecond * 200 +) + +// New creates new ProgressBar object +func New(total int) *ProgressBar { + return New64(int64(total)) +} + +// New64 creates new ProgressBar object using int64 as total +func New64(total int64) *ProgressBar { + pb := new(ProgressBar) + return pb.SetTotal(total) +} + +// StartNew starts new ProgressBar with Default template +func StartNew(total int) *ProgressBar { + return New(total).Start() +} + +// Start64 starts new ProgressBar with Default template. Using int64 as total. +func Start64(total int64) *ProgressBar { + return New64(total).Start() +} + +var ( + terminalWidth = termutil.TerminalWidth + isTerminal = isatty.IsTerminal + isCygwinTerminal = isatty.IsCygwinTerminal +) + +// ProgressBar is the main object of bar +type ProgressBar struct { + current, total int64 + width int + maxWidth int + mu sync.RWMutex + rm sync.Mutex + vars map[interface{}]interface{} + elements map[string]Element + output io.Writer + coutput io.Writer + nocoutput io.Writer + startTime time.Time + refreshRate time.Duration + tmpl *template.Template + state *State + buf *bytes.Buffer + ticker *time.Ticker + finish chan struct{} + finished bool + configured bool + err error +} + +func (pb *ProgressBar) configure() { + if pb.configured { + return + } + pb.configured = true + + if pb.vars == nil { + pb.vars = make(map[interface{}]interface{}) + } + if pb.output == nil { + pb.output = os.Stderr + } + + if pb.tmpl == nil { + pb.tmpl, pb.err = getTemplate(string(Default)) + if pb.err != nil { + return + } + } + if pb.vars[Terminal] == nil { + if f, ok := pb.output.(*os.File); ok { + if isTerminal(f.Fd()) || isCygwinTerminal(f.Fd()) { + pb.vars[Terminal] = true + } + } + } + if pb.vars[ReturnSymbol] == nil { + if tm, ok := pb.vars[Terminal].(bool); ok && tm { + pb.vars[ReturnSymbol] = "\r" + } + } + if pb.vars[Color] == nil { + if tm, ok := pb.vars[Terminal].(bool); ok && tm { + pb.vars[Color] = true + } + } + if pb.refreshRate == 0 { + pb.refreshRate = defaultRefreshRate + } + if f, ok := pb.output.(*os.File); ok { + pb.coutput = colorable.NewColorable(f) + } else { + pb.coutput = pb.output + } + pb.nocoutput = colorable.NewNonColorable(pb.output) +} + +// Start starts the bar +func (pb *ProgressBar) Start() *ProgressBar { + pb.mu.Lock() + defer pb.mu.Unlock() + if pb.finish != nil { + return pb + } + pb.configure() + pb.finished = false + pb.state = nil + pb.startTime = time.Now() + if st, ok := pb.vars[Static].(bool); ok && st { + return pb + } + pb.finish = make(chan struct{}) + pb.ticker = time.NewTicker(pb.refreshRate) + go pb.writer(pb.finish) + return pb +} + +func (pb *ProgressBar) writer(finish chan struct{}) { + for { + select { + case <-pb.ticker.C: + pb.write(false) + case <-finish: + pb.ticker.Stop() + pb.write(true) + finish <- struct{}{} + return + } + } +} + +// Write performs write to the output +func (pb *ProgressBar) Write() *ProgressBar { + pb.mu.RLock() + finished := pb.finished + pb.mu.RUnlock() + pb.write(finished) + return pb +} + +func (pb *ProgressBar) write(finish bool) { + result, width := pb.render() + if pb.Err() != nil { + return + } + if pb.GetBool(Terminal) { + if r := (width - CellCount(result)); r > 0 { + result += strings.Repeat(" ", r) + } + } + if ret, ok := pb.Get(ReturnSymbol).(string); ok { + result = ret + result + if finish && ret == "\r" { + result += "\n" + } + } + if pb.GetBool(Color) { + pb.coutput.Write([]byte(result)) + } else { + pb.nocoutput.Write([]byte(result)) + } +} + +// Total return current total bar value +func (pb *ProgressBar) Total() int64 { + return atomic.LoadInt64(&pb.total) +} + +// SetTotal sets the total bar value +func (pb *ProgressBar) SetTotal(value int64) *ProgressBar { + atomic.StoreInt64(&pb.total, value) + return pb +} + +// SetCurrent sets the current bar value +func (pb *ProgressBar) SetCurrent(value int64) *ProgressBar { + atomic.StoreInt64(&pb.current, value) + return pb +} + +// Current return current bar value +func (pb *ProgressBar) Current() int64 { + return atomic.LoadInt64(&pb.current) +} + +// Add adding given int64 value to bar value +func (pb *ProgressBar) Add64(value int64) *ProgressBar { + atomic.AddInt64(&pb.current, value) + return pb +} + +// Add adding given int value to bar value +func (pb *ProgressBar) Add(value int) *ProgressBar { + return pb.Add64(int64(value)) +} + +// Increment atomically increments the progress +func (pb *ProgressBar) Increment() *ProgressBar { + return pb.Add64(1) +} + +// Set sets any value by any key +func (pb *ProgressBar) Set(key, value interface{}) *ProgressBar { + pb.mu.Lock() + defer pb.mu.Unlock() + if pb.vars == nil { + pb.vars = make(map[interface{}]interface{}) + } + pb.vars[key] = value + return pb +} + +// Get return value by key +func (pb *ProgressBar) Get(key interface{}) interface{} { + pb.mu.RLock() + defer pb.mu.RUnlock() + if pb.vars == nil { + return nil + } + return pb.vars[key] +} + +// GetBool return value by key and try to convert there to boolean +// If value doesn't set or not boolean - return false +func (pb *ProgressBar) GetBool(key interface{}) bool { + if v, ok := pb.Get(key).(bool); ok { + return v + } + return false +} + +// SetWidth sets the bar width +// When given value <= 0 would be using the terminal width (if possible) or default value. +func (pb *ProgressBar) SetWidth(width int) *ProgressBar { + pb.mu.Lock() + pb.width = width + pb.mu.Unlock() + return pb +} + +// SetMaxWidth sets the bar maximum width +// When given value <= 0 would be using the terminal width (if possible) or default value. +func (pb *ProgressBar) SetMaxWidth(maxWidth int) *ProgressBar { + pb.mu.Lock() + pb.maxWidth = maxWidth + pb.mu.Unlock() + return pb +} + +// Width return the bar width +// It's current terminal width or settled over 'SetWidth' value. +func (pb *ProgressBar) Width() (width int) { + defer func() { + if r := recover(); r != nil { + width = defaultBarWidth + } + }() + pb.mu.RLock() + width = pb.width + maxWidth := pb.maxWidth + pb.mu.RUnlock() + if width <= 0 { + var err error + if width, err = terminalWidth(); err != nil { + return defaultBarWidth + } + } + if maxWidth > 0 && width > maxWidth { + width = maxWidth + } + return +} + +func (pb *ProgressBar) SetRefreshRate(dur time.Duration) *ProgressBar { + pb.mu.Lock() + if dur > 0 { + pb.refreshRate = dur + } + pb.mu.Unlock() + return pb +} + +// SetWriter sets the io.Writer. Bar will write in this writer +// By default this is os.Stderr +func (pb *ProgressBar) SetWriter(w io.Writer) *ProgressBar { + pb.mu.Lock() + pb.output = w + pb.configured = false + pb.configure() + pb.mu.Unlock() + return pb +} + +// StartTime return the time when bar started +func (pb *ProgressBar) StartTime() time.Time { + pb.mu.RLock() + defer pb.mu.RUnlock() + return pb.startTime +} + +// Format convert int64 to string according to the current settings +func (pb *ProgressBar) Format(v int64) string { + if pb.GetBool(Bytes) { + return formatBytes(v, pb.GetBool(SIBytesPrefix)) + } + return strconv.FormatInt(v, 10) +} + +// Finish stops the bar +func (pb *ProgressBar) Finish() *ProgressBar { + pb.mu.Lock() + if pb.finished { + pb.mu.Unlock() + return pb + } + finishChan := pb.finish + pb.finished = true + pb.mu.Unlock() + if finishChan != nil { + finishChan <- struct{}{} + <-finishChan + pb.mu.Lock() + pb.finish = nil + pb.mu.Unlock() + } + return pb +} + +// IsStarted indicates progress bar state +func (pb *ProgressBar) IsStarted() bool { + pb.mu.RLock() + defer pb.mu.RUnlock() + return pb.finish != nil +} + +// SetTemplateString sets ProgressBar tempate string and parse it +func (pb *ProgressBar) SetTemplateString(tmpl string) *ProgressBar { + pb.mu.Lock() + defer pb.mu.Unlock() + pb.tmpl, pb.err = getTemplate(tmpl) + return pb +} + +// SetTemplateString sets ProgressBarTempate and parse it +func (pb *ProgressBar) SetTemplate(tmpl ProgressBarTemplate) *ProgressBar { + return pb.SetTemplateString(string(tmpl)) +} + +// NewProxyReader creates a wrapper for given reader, but with progress handle +// Takes io.Reader or io.ReadCloser +// Also, it automatically switches progress bar to handle units as bytes +func (pb *ProgressBar) NewProxyReader(r io.Reader) *Reader { + pb.Set(Bytes, true) + return &Reader{r, pb} +} + +// NewProxyWriter creates a wrapper for given writer, but with progress handle +// Takes io.Writer or io.WriteCloser +// Also, it automatically switches progress bar to handle units as bytes +func (pb *ProgressBar) NewProxyWriter(r io.Writer) *Writer { + pb.Set(Bytes, true) + return &Writer{r, pb} +} + +func (pb *ProgressBar) render() (result string, width int) { + defer func() { + if r := recover(); r != nil { + pb.SetErr(fmt.Errorf("render panic: %v", r)) + } + }() + pb.rm.Lock() + defer pb.rm.Unlock() + pb.mu.Lock() + pb.configure() + if pb.state == nil { + pb.state = &State{ProgressBar: pb} + pb.buf = bytes.NewBuffer(nil) + } + if pb.startTime.IsZero() { + pb.startTime = time.Now() + } + pb.state.id++ + pb.state.finished = pb.finished + pb.state.time = time.Now() + pb.mu.Unlock() + + pb.state.width = pb.Width() + width = pb.state.width + pb.state.total = pb.Total() + pb.state.current = pb.Current() + pb.buf.Reset() + + if e := pb.tmpl.Execute(pb.buf, pb.state); e != nil { + pb.SetErr(e) + return "", 0 + } + + result = pb.buf.String() + + aec := len(pb.state.recalc) + if aec == 0 { + // no adaptive elements + return + } + + staticWidth := CellCount(result) - (aec * adElPlaceholderLen) + + if pb.state.Width()-staticWidth <= 0 { + result = strings.Replace(result, adElPlaceholder, "", -1) + result = StripString(result, pb.state.Width()) + } else { + pb.state.adaptiveElWidth = (width - staticWidth) / aec + for _, el := range pb.state.recalc { + result = strings.Replace(result, adElPlaceholder, el.ProgressElement(pb.state), 1) + } + } + pb.state.recalc = pb.state.recalc[:0] + return +} + +// SetErr sets error to the ProgressBar +// Error will be available over Err() +func (pb *ProgressBar) SetErr(err error) *ProgressBar { + pb.mu.Lock() + pb.err = err + pb.mu.Unlock() + return pb +} + +// Err return possible error +// When all ok - will be nil +// May contain template.Execute errors +func (pb *ProgressBar) Err() error { + pb.mu.RLock() + defer pb.mu.RUnlock() + return pb.err +} + +// String return currrent string representation of ProgressBar +func (pb *ProgressBar) String() string { + res, _ := pb.render() + return res +} + +// ProgressElement implements Element interface +func (pb *ProgressBar) ProgressElement(s *State, args ...string) string { + if s.IsAdaptiveWidth() { + pb.SetWidth(s.AdaptiveElWidth()) + } + return pb.String() +} + +// State represents the current state of bar +// Need for bar elements +type State struct { + *ProgressBar + + id uint64 + total, current int64 + width, adaptiveElWidth int + finished, adaptive bool + time time.Time + + recalc []Element +} + +// Id it's the current state identifier +// - incremental +// - starts with 1 +// - resets after finish/start +func (s *State) Id() uint64 { + return s.id +} + +// Total it's bar int64 total +func (s *State) Total() int64 { + return s.total +} + +// Value it's current value +func (s *State) Value() int64 { + return s.current +} + +// Width of bar +func (s *State) Width() int { + return s.width +} + +// AdaptiveElWidth - adaptive elements must return string with given cell count (when AdaptiveElWidth > 0) +func (s *State) AdaptiveElWidth() int { + return s.adaptiveElWidth +} + +// IsAdaptiveWidth returns true when element must be shown as adaptive +func (s *State) IsAdaptiveWidth() bool { + return s.adaptive +} + +// IsFinished return true when bar is finished +func (s *State) IsFinished() bool { + return s.finished +} + +// IsFirst return true only in first render +func (s *State) IsFirst() bool { + return s.id == 1 +} + +// Time when state was created +func (s *State) Time() time.Time { + return s.time +} diff --git a/vendor/github.com/cheggaaa/pb/v3/preset.go b/vendor/github.com/cheggaaa/pb/v3/preset.go new file mode 100644 index 000000000..f5e2fff57 --- /dev/null +++ b/vendor/github.com/cheggaaa/pb/v3/preset.go @@ -0,0 +1,15 @@ +package pb + +var ( + // Full - preset with all default available elements + // Example: 'Prefix 20/100 [-->______] 20% 1 p/s ETA 1m Suffix' + Full ProgressBarTemplate = `{{string . "prefix"}}{{counters . }} {{bar . }} {{percent . }} {{speed . }} {{rtime . "ETA %s"}}{{string . "suffix"}}` + + // Default - preset like Full but without elapsed time + // Example: 'Prefix 20/100 [-->______] 20% 1 p/s ETA 1m Suffix' + Default ProgressBarTemplate = `{{string . "prefix"}}{{counters . }} {{bar . }} {{percent . }} {{speed . }}{{string . "suffix"}}` + + // Simple - preset without speed and any timers. Only counters, bar and percents + // Example: 'Prefix 20/100 [-->______] 20% Suffix' + Simple ProgressBarTemplate = `{{string . "prefix"}}{{counters . }} {{bar . }} {{percent . }}{{string . "suffix"}}` +) diff --git a/vendor/github.com/cheggaaa/pb/v3/speed.go b/vendor/github.com/cheggaaa/pb/v3/speed.go new file mode 100644 index 000000000..17a6b1bfa --- /dev/null +++ b/vendor/github.com/cheggaaa/pb/v3/speed.go @@ -0,0 +1,83 @@ +package pb + +import ( + "fmt" + "math" + "time" + + "github.com/VividCortex/ewma" +) + +var speedAddLimit = time.Second / 2 + +type speed struct { + ewma ewma.MovingAverage + lastStateId uint64 + prevValue, startValue int64 + prevTime, startTime time.Time +} + +func (s *speed) value(state *State) float64 { + if s.ewma == nil { + s.ewma = ewma.NewMovingAverage() + } + if state.IsFirst() || state.Id() < s.lastStateId { + s.reset(state) + return 0 + } + if state.Id() == s.lastStateId { + return s.ewma.Value() + } + if state.IsFinished() { + return s.absValue(state) + } + dur := state.Time().Sub(s.prevTime) + if dur < speedAddLimit { + return s.ewma.Value() + } + diff := math.Abs(float64(state.Value() - s.prevValue)) + lastSpeed := diff / dur.Seconds() + s.prevTime = state.Time() + s.prevValue = state.Value() + s.lastStateId = state.Id() + s.ewma.Add(lastSpeed) + return s.ewma.Value() +} + +func (s *speed) reset(state *State) { + s.lastStateId = state.Id() + s.startTime = state.Time() + s.prevTime = state.Time() + s.startValue = state.Value() + s.prevValue = state.Value() + s.ewma = ewma.NewMovingAverage() +} + +func (s *speed) absValue(state *State) float64 { + if dur := state.Time().Sub(s.startTime); dur > 0 { + return float64(state.Value()) / dur.Seconds() + } + return 0 +} + +func getSpeedObj(state *State) (s *speed) { + if sObj, ok := state.Get(speedObj).(*speed); ok { + return sObj + } + s = new(speed) + state.Set(speedObj, s) + return +} + +// ElementSpeed calculates current speed by EWMA +// Optionally can take one or two string arguments. +// First string will be used as value for format speed, default is "%s p/s". +// Second string will be used when speed not available, default is "? p/s" +// In template use as follows: {{speed .}} or {{speed . "%s per second"}} or {{speed . "%s ps" "..."} +var ElementSpeed ElementFunc = func(state *State, args ...string) string { + sp := getSpeedObj(state).value(state) + if sp == 0 { + return argsHelper(args).getNotEmptyOr(1, "? p/s") + } + return fmt.Sprintf(argsHelper(args).getNotEmptyOr(0, "%s p/s"), state.Format(int64(round(sp)))) +} diff --git a/vendor/github.com/cheggaaa/pb/v3/template.go b/vendor/github.com/cheggaaa/pb/v3/template.go new file mode 100644 index 000000000..ecfc27112 --- /dev/null +++ b/vendor/github.com/cheggaaa/pb/v3/template.go @@ -0,0 +1,88 @@ +package pb + +import ( + "math/rand" + "sync" + "text/template" + + "github.com/fatih/color" +) + +// ProgressBarTemplate that template string +type ProgressBarTemplate string + +// New creates new bar from template +func (pbt ProgressBarTemplate) New(total int) *ProgressBar { + return New(total).SetTemplate(pbt) +} + +// Start64 create and start new bar with given int64 total value +func (pbt ProgressBarTemplate) Start64(total int64) *ProgressBar { + return New64(total).SetTemplate(pbt).Start() +} + +// Start create and start new bar with given int total value +func (pbt ProgressBarTemplate) Start(total int) *ProgressBar { + return pbt.Start64(int64(total)) +} + +var templateCacheMu sync.Mutex +var templateCache = make(map[string]*template.Template) + +var defaultTemplateFuncs = template.FuncMap{ + // colors + "black": color.New(color.FgBlack).SprintFunc(), + "red": color.New(color.FgRed).SprintFunc(), + "green": color.New(color.FgGreen).SprintFunc(), + "yellow": color.New(color.FgYellow).SprintFunc(), + "blue": color.New(color.FgBlue).SprintFunc(), + "magenta": color.New(color.FgMagenta).SprintFunc(), + "cyan": color.New(color.FgCyan).SprintFunc(), + "white": color.New(color.FgWhite).SprintFunc(), + "resetcolor": color.New(color.Reset).SprintFunc(), + "rndcolor": rndcolor, + "rnd": rnd, +} + +func getTemplate(tmpl string) (t *template.Template, err error) { + templateCacheMu.Lock() + defer templateCacheMu.Unlock() + t = templateCache[tmpl] + if t != nil { + // found in cache + return + } + t = template.New("") + fillTemplateFuncs(t) + _, err = t.Parse(tmpl) + if err != nil { + t = nil + return + } + templateCache[tmpl] = t + return +} + +func fillTemplateFuncs(t *template.Template) { + t.Funcs(defaultTemplateFuncs) + emf := make(template.FuncMap) + elementsM.Lock() + for k, v := range elements { + emf[k] = v + } + elementsM.Unlock() + t.Funcs(emf) + return +} + +func rndcolor(s string) string { + c := rand.Intn(int(color.FgWhite-color.FgBlack)) + int(color.FgBlack) + return color.New(color.Attribute(c)).Sprint(s) +} + +func rnd(args ...string) string { + if len(args) == 0 { + return "" + } + return args[rand.Intn(len(args))] +} diff --git a/vendor/github.com/cheggaaa/pb/v3/termutil/term.go b/vendor/github.com/cheggaaa/pb/v3/termutil/term.go new file mode 100644 index 000000000..02b52797e --- /dev/null +++ b/vendor/github.com/cheggaaa/pb/v3/termutil/term.go @@ -0,0 +1,56 @@ +package termutil + +import ( + "errors" + "os" + "os/signal" + "sync" +) + +var echoLocked bool +var echoLockMutex sync.Mutex +var errLocked = errors.New("terminal locked") + +// RawModeOn switches terminal to raw mode +func RawModeOn() (quit chan struct{}, err error) { + echoLockMutex.Lock() + defer echoLockMutex.Unlock() + if echoLocked { + err = errLocked + return + } + if err = lockEcho(); err != nil { + return + } + echoLocked = true + quit = make(chan struct{}, 1) + go catchTerminate(quit) + return +} + +// RawModeOff restore previous terminal state +func RawModeOff() (err error) { + echoLockMutex.Lock() + defer echoLockMutex.Unlock() + if !echoLocked { + return + } + if err = unlockEcho(); err != nil { + return + } + echoLocked = false + return +} + +// listen exit signals and restore terminal state +func catchTerminate(quit chan struct{}) { + sig := make(chan os.Signal, 1) + signal.Notify(sig, unlockSignals...) + defer signal.Stop(sig) + select { + case <-quit: + RawModeOff() + case <-sig: + RawModeOff() + } +} diff --git a/vendor/github.com/cheggaaa/pb/v3/termutil/term_appengine.go b/vendor/github.com/cheggaaa/pb/v3/termutil/term_appengine.go new file mode 100644 index 000000000..4b7b20e6b --- /dev/null +++ b/vendor/github.com/cheggaaa/pb/v3/termutil/term_appengine.go @@ -0,0 +1,11 @@ +// +build appengine + +package termutil + +import "errors" + +// terminalWidth returns width of the terminal, which is not supported +// and should always failed on appengine classic which is a sandboxed PaaS. +func TerminalWidth() (int, error) { + return 0, errors.New("Not supported") +} diff --git a/vendor/github.com/cheggaaa/pb/v3/termutil/term_bsd.go b/vendor/github.com/cheggaaa/pb/v3/termutil/term_bsd.go new file mode 100644 index 000000000..272659a12 --- /dev/null +++ b/vendor/github.com/cheggaaa/pb/v3/termutil/term_bsd.go @@ -0,0 +1,9 @@ +// +build darwin freebsd netbsd openbsd dragonfly +// +build !appengine + +package termutil + +import "syscall" + +const ioctlReadTermios = syscall.TIOCGETA +const ioctlWriteTermios = syscall.TIOCSETA diff --git a/vendor/github.com/cheggaaa/pb/v3/termutil/term_linux.go b/vendor/github.com/cheggaaa/pb/v3/termutil/term_linux.go new file mode 100644 index 000000000..2f59e53e1 --- /dev/null +++ b/vendor/github.com/cheggaaa/pb/v3/termutil/term_linux.go @@ -0,0 +1,7 @@ +// +build linux +// +build !appengine + +package termutil + +const ioctlReadTermios = 0x5401 // syscall.TCGETS +const ioctlWriteTermios = 0x5402 // syscall.TCSETS diff --git a/vendor/github.com/cheggaaa/pb/v3/termutil/term_nix.go b/vendor/github.com/cheggaaa/pb/v3/termutil/term_nix.go new file mode 100644 index 000000000..14277e71f --- /dev/null +++ b/vendor/github.com/cheggaaa/pb/v3/termutil/term_nix.go @@ -0,0 +1,8 @@ +// +build linux darwin freebsd netbsd openbsd dragonfly +// +build !appengine + +package termutil + +import "syscall" + +const sysIoctl = syscall.SYS_IOCTL diff --git a/vendor/github.com/cheggaaa/pb/v3/termutil/term_plan9.go b/vendor/github.com/cheggaaa/pb/v3/termutil/term_plan9.go new file mode 100644 index 000000000..f3934c6ec --- /dev/null +++ b/vendor/github.com/cheggaaa/pb/v3/termutil/term_plan9.go @@ -0,0 +1,50 @@ +package termutil + +import ( + "errors" + "os" + "syscall" +) + +var ( + consctl *os.File + + // Plan 9 doesn't have syscall.SIGQUIT + unlockSignals = []os.Signal{ + os.Interrupt, syscall.SIGTERM, syscall.SIGKILL, + } +) + +// TerminalWidth returns width of the terminal. +func TerminalWidth() (int, error) { + return 0, errors.New("Not supported") +} + +func lockEcho() error { + if consctl != nil { + return errors.New("consctl already open") + } + var err error + consctl, err = os.OpenFile("/dev/consctl", os.O_WRONLY, 0) + if err != nil { + return err + } + _, err = consctl.WriteString("rawon") + if err != nil { + consctl.Close() + consctl = nil + return err + } + return nil +} + +func unlockEcho() error { + if consctl == nil { + return nil + } + if err := consctl.Close(); err != nil { + return err + } + consctl = nil + return nil +} diff --git a/vendor/github.com/cheggaaa/pb/v3/termutil/term_solaris.go b/vendor/github.com/cheggaaa/pb/v3/termutil/term_solaris.go new file mode 100644 index 000000000..fc96c2b7f --- /dev/null +++ b/vendor/github.com/cheggaaa/pb/v3/termutil/term_solaris.go @@ -0,0 +1,8 @@ +// +build solaris +// +build !appengine + +package termutil + +const ioctlReadTermios = 0x5401 // syscall.TCGETS +const ioctlWriteTermios = 0x5402 // syscall.TCSETS +const sysIoctl = 54 diff --git a/vendor/github.com/cheggaaa/pb/v3/termutil/term_win.go b/vendor/github.com/cheggaaa/pb/v3/termutil/term_win.go new file mode 100644 index 000000000..c867d2722 --- /dev/null +++ b/vendor/github.com/cheggaaa/pb/v3/termutil/term_win.go @@ -0,0 +1,155 @@ +// +build windows + +package termutil + +import ( + "fmt" + "os" + "os/exec" + "strconv" + "syscall" + "unsafe" +) + +var ( + tty = os.Stdin + + unlockSignals = []os.Signal{ + os.Interrupt, syscall.SIGQUIT, syscall.SIGTERM, syscall.SIGKILL, + } +) + +var ( + kernel32 = syscall.NewLazyDLL("kernel32.dll") + + // GetConsoleScreenBufferInfo retrieves information about the + // specified console screen buffer. + // http://msdn.microsoft.com/en-us/library/windows/desktop/ms683171(v=vs.85).aspx + procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo") + + // GetConsoleMode retrieves the current input mode of a console's + // input buffer or the current output mode of a console screen buffer. + // https://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx + getConsoleMode = kernel32.NewProc("GetConsoleMode") + + // SetConsoleMode sets the input mode of a console's input buffer + // or the output mode of a console screen buffer. + // https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx + setConsoleMode = kernel32.NewProc("SetConsoleMode") + + // SetConsoleCursorPosition sets the cursor position in the + // specified console screen buffer. + // https://msdn.microsoft.com/en-us/library/windows/desktop/ms686025(v=vs.85).aspx + setConsoleCursorPosition = kernel32.NewProc("SetConsoleCursorPosition") + + mingw = isMingw() +) + +type ( + // Defines the coordinates of the upper left and lower right corners + // of a rectangle. + // See + // http://msdn.microsoft.com/en-us/library/windows/desktop/ms686311(v=vs.85).aspx + smallRect struct { + Left, Top, Right, Bottom int16 + } + + // Defines the coordinates of a character cell in a console screen + // buffer. The origin of the coordinate system (0,0) is at the top, left cell + // of the buffer. + // See + // http://msdn.microsoft.com/en-us/library/windows/desktop/ms682119(v=vs.85).aspx + coordinates struct { + X, Y int16 + } + + word int16 + + // Contains information about a console screen buffer. + // http://msdn.microsoft.com/en-us/library/windows/desktop/ms682093(v=vs.85).aspx + consoleScreenBufferInfo struct { + dwSize coordinates + dwCursorPosition coordinates + wAttributes word + srWindow smallRect + dwMaximumWindowSize coordinates + } +) + +// TerminalWidth returns width of the terminal. +func TerminalWidth() (width int, err error) { + if mingw { + return termWidthTPut() + } + return termWidthCmd() +} + +func termWidthCmd() (width int, err error) { + var info consoleScreenBufferInfo + _, _, e := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(syscall.Stdout), uintptr(unsafe.Pointer(&info)), 0) + if e != 0 { + return 0, error(e) + } + return int(info.dwSize.X) - 1, nil +} + +func isMingw() bool { + return os.Getenv("MINGW_PREFIX") != "" || os.Getenv("MSYSTEM") == "MINGW64" +} + +func termWidthTPut() (width int, err error) { + // TODO: maybe anybody knows a better way to get it on mintty... + var res []byte + cmd := exec.Command("tput", "cols") + cmd.Stdin = os.Stdin + if res, err = cmd.CombinedOutput(); err != nil { + return 0, fmt.Errorf("%s: %v", string(res), err) + } + if len(res) > 1 { + res = res[:len(res)-1] + } + return strconv.Atoi(string(res)) +} + +func getCursorPos() (pos coordinates, err error) { + var info consoleScreenBufferInfo + _, _, e := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(syscall.Stdout), uintptr(unsafe.Pointer(&info)), 0) + if e != 0 { + return info.dwCursorPosition, error(e) + } + return info.dwCursorPosition, nil +} + +func setCursorPos(pos coordinates) error { + _, _, e := syscall.Syscall(setConsoleCursorPosition.Addr(), 2, uintptr(syscall.Stdout), uintptr(uint32(uint16(pos.Y))<<16|uint32(uint16(pos.X))), 0) + if e != 0 { + return error(e) + } + return nil +} + +var oldState word + +func lockEcho() (err error) { + if _, _, e := syscall.Syscall(getConsoleMode.Addr(), 2, uintptr(syscall.Stdout), uintptr(unsafe.Pointer(&oldState)), 0); e != 0 { + err = fmt.Errorf("Can't get terminal settings: %v", e) + return + } + + newState := oldState + const ENABLE_ECHO_INPUT = 0x0004 + const ENABLE_LINE_INPUT = 0x0002 + newState = newState & (^(ENABLE_LINE_INPUT | ENABLE_ECHO_INPUT)) + if _, _, e := syscall.Syscall(setConsoleMode.Addr(), 2, uintptr(syscall.Stdout), uintptr(newState), 0); e != 0 { + err = fmt.Errorf("Can't set terminal settings: %v", e) + return + } + return +} + +func unlockEcho() (err error) { + if _, _, e := syscall.Syscall(setConsoleMode.Addr(), 2, uintptr(syscall.Stdout), uintptr(oldState), 0); e != 0 { + err = fmt.Errorf("Can't set terminal settings") + } + return +} diff --git a/vendor/github.com/cheggaaa/pb/v3/termutil/term_x.go b/vendor/github.com/cheggaaa/pb/v3/termutil/term_x.go new file mode 100644 index 000000000..693775549 --- /dev/null +++ b/vendor/github.com/cheggaaa/pb/v3/termutil/term_x.go @@ -0,0 +1,76 @@ +// +build linux darwin freebsd netbsd openbsd solaris dragonfly +// +build !appengine + +package termutil + +import ( + "fmt" + "os" + "syscall" + "unsafe" +) + +var ( + tty *os.File + + unlockSignals = []os.Signal{ + os.Interrupt, syscall.SIGQUIT, syscall.SIGTERM, syscall.SIGKILL, + } +) + +type window struct { + Row uint16 + Col uint16 + Xpixel uint16 + Ypixel uint16 +} + +func init() { + var err error + tty, err = os.Open("/dev/tty") + if err != nil { + tty = os.Stdin + } +} + +// TerminalWidth returns width of the terminal. +func TerminalWidth() (int, error) { + w := new(window) + res, _, err := syscall.Syscall(sysIoctl, + tty.Fd(), + uintptr(syscall.TIOCGWINSZ), + uintptr(unsafe.Pointer(w)), + ) + if int(res) == -1 { + return 0, err + } + return int(w.Col), nil +} + +var oldState syscall.Termios + +func lockEcho() (err error) { + fd := tty.Fd() + if _, _, e := syscall.Syscall6(sysIoctl, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&oldState)), 0, 0, 0); e != 0 { + err = fmt.Errorf("Can't get terminal settings: %v", e) + return + } + + newState := oldState + newState.Lflag &^= syscall.ECHO + newState.Lflag |= syscall.ICANON | syscall.ISIG + newState.Iflag |= syscall.ICRNL + if _, _, e := syscall.Syscall6(sysIoctl, fd, ioctlWriteTermios, uintptr(unsafe.Pointer(&newState)), 0, 0, 0); e != 0 { + err = fmt.Errorf("Can't set terminal settings: %v", e) + return + } + return +} + +func unlockEcho() (err error) { + fd := tty.Fd() + if _, _, e := syscall.Syscall6(sysIoctl, fd, ioctlWriteTermios, uintptr(unsafe.Pointer(&oldState)), 0, 0, 0); e != 0 { + err = fmt.Errorf("Can't set terminal settings") + } + return +} diff --git a/vendor/github.com/cheggaaa/pb/v3/util.go b/vendor/github.com/cheggaaa/pb/v3/util.go new file mode 100644 index 000000000..078123420 --- /dev/null +++ b/vendor/github.com/cheggaaa/pb/v3/util.go @@ -0,0 +1,115 @@ +package pb + +import ( + "bytes" + "fmt" + "github.com/mattn/go-runewidth" + "math" + "regexp" + //"unicode/utf8" +) + +const ( + _KiB = 1024 + _MiB = 1048576 + _GiB = 1073741824 + _TiB = 1099511627776 + + _kB = 1e3 + _MB = 1e6 + _GB = 1e9 + _TB = 1e12 +) + +var ctrlFinder = regexp.MustCompile("\x1b\x5b[0-9]+\x6d") + +func CellCount(s string) int { + n := runewidth.StringWidth(s) + for _, sm := range ctrlFinder.FindAllString(s, -1) { + n -= runewidth.StringWidth(sm) + } + return n +} + +func StripString(s string, w int) string { + l := CellCount(s) + if l <= w { + return s + } + var buf = bytes.NewBuffer(make([]byte, 0, len(s))) + StripStringToBuffer(s, w, buf) + return buf.String() +} + +func StripStringToBuffer(s string, w int, buf *bytes.Buffer) { + var seqs = ctrlFinder.FindAllStringIndex(s, -1) +mainloop: + for i, r := range s { + for _, seq := range seqs { + if i >= seq[0] && i < seq[1] { + buf.WriteRune(r) + continue mainloop + } + } + if rw := CellCount(string(r)); rw <= w { + w -= rw + buf.WriteRune(r) + } else { + break + } + } + for w > 0 { + buf.WriteByte(' ') + w-- + } + return +} + +func round(val float64) (newVal float64) { + roundOn := 0.5 + places := 0 + var round float64 + pow := math.Pow(10, float64(places)) + digit := pow * val + _, div := math.Modf(digit) + if div >= roundOn { + round = math.Ceil(digit) + } else { + round = math.Floor(digit) + } + newVal = round / pow + return +} + +// Convert bytes to human readable string. Like a 2 MiB, 64.2 KiB, or 2 MB, 64.2 kB +// if useSIPrefix is set to true +func formatBytes(i int64, useSIPrefix bool) (result string) { + if !useSIPrefix { + switch { + case i >= _TiB: + result = fmt.Sprintf("%.02f TiB", float64(i)/_TiB) + case i >= _GiB: + result = fmt.Sprintf("%.02f GiB", float64(i)/_GiB) + case i >= _MiB: + result = fmt.Sprintf("%.02f MiB", float64(i)/_MiB) + case i >= _KiB: + result = fmt.Sprintf("%.02f KiB", float64(i)/_KiB) + default: + result = fmt.Sprintf("%d B", i) + } + } else { + switch { + case i >= _TB: + result = fmt.Sprintf("%.02f TB", float64(i)/_TB) + case i >= _GB: + result = fmt.Sprintf("%.02f GB", float64(i)/_GB) + case i >= _MB: + result = fmt.Sprintf("%.02f MB", float64(i)/_MB) + case i >= _kB: + result = fmt.Sprintf("%.02f kB", float64(i)/_kB) + default: + result = fmt.Sprintf("%d B", i) + } + } + return +} diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/LICENSE.md b/vendor/github.com/cpuguy83/go-md2man/v2/LICENSE.md new file mode 100644 index 000000000..1cade6cef --- /dev/null +++ b/vendor/github.com/cpuguy83/go-md2man/v2/LICENSE.md @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Brian Goff + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go new file mode 100644 index 000000000..b48005673 --- /dev/null +++ b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go @@ -0,0 +1,14 @@ +package md2man + +import ( + "github.com/russross/blackfriday/v2" +) + +// Render converts a markdown document into a roff formatted document. +func Render(doc []byte) []byte { + renderer := NewRoffRenderer() + + return blackfriday.Run(doc, + []blackfriday.Option{blackfriday.WithRenderer(renderer), + blackfriday.WithExtensions(renderer.GetExtensions())}...) +} diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go new file mode 100644 index 000000000..0668a66cf --- /dev/null +++ b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go @@ -0,0 +1,345 @@ +package md2man + +import ( + "fmt" + "io" + "os" + "strings" + + "github.com/russross/blackfriday/v2" +) + +// roffRenderer implements the blackfriday.Renderer interface for creating +// roff format (manpages) from markdown text +type roffRenderer struct { + extensions blackfriday.Extensions + listCounters []int + firstHeader bool + defineTerm bool + listDepth int +} + +const ( + titleHeader = ".TH " + topLevelHeader = "\n\n.SH " + secondLevelHdr = "\n.SH " + otherHeader = "\n.SS " + crTag = "\n" + emphTag = "\\fI" + emphCloseTag = "\\fP" + strongTag = "\\fB" + strongCloseTag = "\\fP" + breakTag = "\n.br\n" + paraTag = "\n.PP\n" + hruleTag = "\n.ti 0\n\\l'\\n(.lu'\n" + linkTag = "\n\\[la]" + linkCloseTag = "\\[ra]" + codespanTag = "\\fB\\fC" + codespanCloseTag = "\\fR" + codeTag = "\n.PP\n.RS\n\n.nf\n" + codeCloseTag = "\n.fi\n.RE\n" + quoteTag = "\n.PP\n.RS\n" + quoteCloseTag = "\n.RE\n" + listTag = "\n.RS\n" + listCloseTag = "\n.RE\n" + arglistTag = "\n.TP\n" + tableStart = "\n.TS\nallbox;\n" + tableEnd = ".TE\n" + tableCellStart = "T{\n" + tableCellEnd = "\nT}\n" +) + +// NewRoffRenderer creates a new blackfriday Renderer for generating roff documents +// from markdown +func NewRoffRenderer() *roffRenderer { // nolint: golint + var extensions blackfriday.Extensions + + extensions |= blackfriday.NoIntraEmphasis + extensions |= blackfriday.Tables + extensions |= blackfriday.FencedCode + extensions |= blackfriday.SpaceHeadings + extensions |= blackfriday.Footnotes + extensions |= blackfriday.Titleblock + extensions |= blackfriday.DefinitionLists + return &roffRenderer{ + extensions: extensions, + } +} + +// GetExtensions returns the list of extensions used by this renderer implementation +func (r *roffRenderer) GetExtensions() blackfriday.Extensions { + return r.extensions +} + +// RenderHeader handles outputting the header at document start +func (r *roffRenderer) RenderHeader(w io.Writer, ast *blackfriday.Node) { + // disable hyphenation + out(w, ".nh\n") +} + +// RenderFooter handles outputting the footer at the document end; the roff +// renderer has no footer information +func (r *roffRenderer) RenderFooter(w io.Writer, ast *blackfriday.Node) { +} + +// RenderNode is called for each node in a markdown document; based on the node +// type the equivalent roff output is sent to the writer +func (r *roffRenderer) RenderNode(w io.Writer, node *blackfriday.Node, entering bool) blackfriday.WalkStatus { + + var walkAction = blackfriday.GoToNext + + switch node.Type { + case blackfriday.Text: + r.handleText(w, node, entering) + case blackfriday.Softbreak: + out(w, crTag) + case blackfriday.Hardbreak: + out(w, breakTag) + case blackfriday.Emph: + if entering { + out(w, emphTag) + } else { + out(w, emphCloseTag) + } + case blackfriday.Strong: + if entering { + out(w, strongTag) + } else { + out(w, strongCloseTag) + } + case blackfriday.Link: + if !entering { + out(w, linkTag+string(node.LinkData.Destination)+linkCloseTag) + } + case blackfriday.Image: + // ignore images + walkAction = blackfriday.SkipChildren + case blackfriday.Code: + out(w, codespanTag) + escapeSpecialChars(w, node.Literal) + out(w, codespanCloseTag) + case blackfriday.Document: + break + case blackfriday.Paragraph: + // roff .PP markers break lists + if r.listDepth > 0 { + return blackfriday.GoToNext + } + if entering { + out(w, paraTag) + } else { + out(w, crTag) + } + case blackfriday.BlockQuote: + if entering { + out(w, quoteTag) + } else { + out(w, quoteCloseTag) + } + case blackfriday.Heading: + r.handleHeading(w, node, entering) + case blackfriday.HorizontalRule: + out(w, hruleTag) + case blackfriday.List: + r.handleList(w, node, entering) + case blackfriday.Item: + r.handleItem(w, node, entering) + case blackfriday.CodeBlock: + out(w, codeTag) + escapeSpecialChars(w, node.Literal) + out(w, codeCloseTag) + case blackfriday.Table: + r.handleTable(w, node, entering) + case blackfriday.TableCell: + r.handleTableCell(w, node, entering) + case blackfriday.TableHead: + case blackfriday.TableBody: + case blackfriday.TableRow: + // no action as cell entries do all the nroff formatting + return blackfriday.GoToNext + default: + fmt.Fprintln(os.Stderr, "WARNING: go-md2man does not handle node type "+node.Type.String()) + } + return walkAction +} + +func (r *roffRenderer) handleText(w io.Writer, node *blackfriday.Node, entering bool) { + var ( + start, end string + ) + // handle special roff table cell text encapsulation + if node.Parent.Type == blackfriday.TableCell { + if len(node.Literal) > 30 { + start = tableCellStart + end = tableCellEnd + } else { + // end rows that aren't terminated by "tableCellEnd" with a cr if end of row + if node.Parent.Next == nil && !node.Parent.IsHeader { + end = crTag + } + } + } + out(w, start) + escapeSpecialChars(w, node.Literal) + out(w, end) +} + +func (r *roffRenderer) handleHeading(w io.Writer, node *blackfriday.Node, entering bool) { + if entering { + switch node.Level { + case 1: + if !r.firstHeader { + out(w, titleHeader) + r.firstHeader = true + break + } + out(w, topLevelHeader) + case 2: + out(w, secondLevelHdr) + default: + out(w, otherHeader) + } + } +} + +func (r *roffRenderer) handleList(w io.Writer, node *blackfriday.Node, entering bool) { + openTag := listTag + closeTag := listCloseTag + if node.ListFlags&blackfriday.ListTypeDefinition != 0 { + // tags for definition lists handled within Item node + openTag = "" + closeTag = "" + } + if entering { + r.listDepth++ + if node.ListFlags&blackfriday.ListTypeOrdered != 0 { + r.listCounters = append(r.listCounters, 1) + } + out(w, openTag) + } else { + if node.ListFlags&blackfriday.ListTypeOrdered != 0 { + r.listCounters = r.listCounters[:len(r.listCounters)-1] + } + out(w, closeTag) + r.listDepth-- + } +} + +func (r *roffRenderer) handleItem(w io.Writer, node *blackfriday.Node, entering bool) { + if entering { + if node.ListFlags&blackfriday.ListTypeOrdered != 0 { + out(w, fmt.Sprintf(".IP \"%3d.\" 5\n", r.listCounters[len(r.listCounters)-1])) + r.listCounters[len(r.listCounters)-1]++ + } else if node.ListFlags&blackfriday.ListTypeDefinition != 0 { + // state machine for handling terms and following definitions + // since blackfriday does not distinguish them properly, nor + // does it seperate them into separate lists as it should + if !r.defineTerm { + out(w, arglistTag) + r.defineTerm = true + } else { + r.defineTerm = false + } + } else { + out(w, ".IP \\(bu 2\n") + } + } else { + out(w, "\n") + } +} + +func (r *roffRenderer) handleTable(w io.Writer, node *blackfriday.Node, entering bool) { + if entering { + out(w, tableStart) + //call walker to count cells (and rows?) so format section can be produced + columns := countColumns(node) + out(w, strings.Repeat("l ", columns)+"\n") + out(w, strings.Repeat("l ", columns)+".\n") + } else { + out(w, tableEnd) + } +} + +func (r *roffRenderer) handleTableCell(w io.Writer, node *blackfriday.Node, entering bool) { + var ( + start, end string + ) + if node.IsHeader { + start = codespanTag + end = codespanCloseTag + } + if entering { + if node.Prev != nil && node.Prev.Type == blackfriday.TableCell { + out(w, "\t"+start) + } else { + out(w, start) + } + } else { + // need to carriage return if we are at the end of the header row + if node.IsHeader && node.Next == nil { + end = end + crTag + } + out(w, end) + } +} + +// because roff format requires knowing the column count before outputting any table +// data we need to walk a table tree and count the columns +func countColumns(node *blackfriday.Node) int { + var columns int + + node.Walk(func(node *blackfriday.Node, entering bool) blackfriday.WalkStatus { + switch node.Type { + case blackfriday.TableRow: + if !entering { + return blackfriday.Terminate + } + case blackfriday.TableCell: + if entering { + columns++ + } + default: + } + return blackfriday.GoToNext + }) + return columns +} + +func out(w io.Writer, output string) { + io.WriteString(w, output) // nolint: errcheck +} + +func needsBackslash(c byte) bool { + for _, r := range []byte("-_&\\~") { + if c == r { + return true + } + } + return false +} + +func escapeSpecialChars(w io.Writer, text []byte) { + for i := 0; i < len(text); i++ { + // escape initial apostrophe or period + if len(text) >= 1 && (text[0] == '\'' || text[0] == '.') { + out(w, "\\&") + } + + // directly copy normal characters + org := i + + for i < len(text) && !needsBackslash(text[i]) { + i++ + } + if i > org { + w.Write(text[org:i]) // nolint: errcheck + } + + // escape a character + if i >= len(text) { + break + } + + w.Write([]byte{'\\', text[i]}) // nolint: errcheck + } +} diff --git a/vendor/github.com/fatih/color/LICENSE.md b/vendor/github.com/fatih/color/LICENSE.md new file mode 100644 index 000000000..25fdaf639 --- /dev/null +++ b/vendor/github.com/fatih/color/LICENSE.md @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2013 Fatih Arslan + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/fatih/color/README.md b/vendor/github.com/fatih/color/README.md new file mode 100644 index 000000000..d62e4024a --- /dev/null +++ b/vendor/github.com/fatih/color/README.md @@ -0,0 +1,173 @@ +# color [![](https://github.com/fatih/color/workflows/build/badge.svg)](https://github.com/fatih/color/actions) [![PkgGoDev](https://pkg.go.dev/badge/github.com/fatih/color)](https://pkg.go.dev/github.com/fatih/color) + +Color lets you use colorized outputs in terms of [ANSI Escape +Codes](http://en.wikipedia.org/wiki/ANSI_escape_code#Colors) in Go (Golang). It +has support for Windows too! The API can be used in several ways, pick one that +suits you. + +![Color](https://user-images.githubusercontent.com/438920/96832689-03b3e000-13f4-11eb-9803-46f4c4de3406.jpg) + + +## Install + +```bash +go get github.com/fatih/color +``` + +## Examples + +### Standard colors + +```go +// Print with default helper functions +color.Cyan("Prints text in cyan.") + +// A newline will be appended automatically +color.Blue("Prints %s in blue.", "text") + +// These are using the default foreground colors +color.Red("We have red") +color.Magenta("And many others ..") + +``` + +### Mix and reuse colors + +```go +// Create a new color object +c := color.New(color.FgCyan).Add(color.Underline) +c.Println("Prints cyan text with an underline.") + +// Or just add them to New() +d := color.New(color.FgCyan, color.Bold) +d.Printf("This prints bold cyan %s\n", "too!.") + +// Mix up foreground and background colors, create new mixes! +red := color.New(color.FgRed) + +boldRed := red.Add(color.Bold) +boldRed.Println("This will print text in bold red.") + +whiteBackground := red.Add(color.BgWhite) +whiteBackground.Println("Red text with white background.") +``` + +### Use your own output (io.Writer) + +```go +// Use your own io.Writer output +color.New(color.FgBlue).Fprintln(myWriter, "blue color!") + +blue := color.New(color.FgBlue) +blue.Fprint(writer, "This will print text in blue.") +``` + +### Custom print functions (PrintFunc) + +```go +// Create a custom print function for convenience +red := color.New(color.FgRed).PrintfFunc() +red("Warning") +red("Error: %s", err) + +// Mix up multiple attributes +notice := color.New(color.Bold, color.FgGreen).PrintlnFunc() +notice("Don't forget this...") +``` + +### Custom fprint functions (FprintFunc) + +```go +blue := color.New(FgBlue).FprintfFunc() +blue(myWriter, "important notice: %s", stars) + +// Mix up with multiple attributes +success := color.New(color.Bold, color.FgGreen).FprintlnFunc() +success(myWriter, "Don't forget this...") +``` + +### Insert into noncolor strings (SprintFunc) + +```go +// Create SprintXxx functions to mix strings with other non-colorized strings: +yellow := color.New(color.FgYellow).SprintFunc() +red := color.New(color.FgRed).SprintFunc() +fmt.Printf("This is a %s and this is %s.\n", yellow("warning"), red("error")) + +info := color.New(color.FgWhite, color.BgGreen).SprintFunc() +fmt.Printf("This %s rocks!\n", info("package")) + +// Use helper functions +fmt.Println("This", color.RedString("warning"), "should be not neglected.") +fmt.Printf("%v %v\n", color.GreenString("Info:"), "an important message.") + +// Windows supported too! Just don't forget to change the output to color.Output +fmt.Fprintf(color.Output, "Windows support: %s", color.GreenString("PASS")) +``` + +### Plug into existing code + +```go +// Use handy standard colors +color.Set(color.FgYellow) + +fmt.Println("Existing text will now be in yellow") +fmt.Printf("This one %s\n", "too") + +color.Unset() // Don't forget to unset + +// You can mix up parameters +color.Set(color.FgMagenta, color.Bold) +defer color.Unset() // Use it in your function + +fmt.Println("All text will now be bold magenta.") +``` + +### Disable/Enable color + +There might be a case where you want to explicitly disable/enable color output. the +`go-isatty` package will automatically disable color output for non-tty output streams +(for example if the output were piped directly to `less`) + +`Color` has support to disable/enable colors both globally and for single color +definitions. For example suppose you have a CLI app and a `--no-color` bool flag. You +can easily disable the color output with: + +```go + +var flagNoColor = flag.Bool("no-color", false, "Disable color output") + +if *flagNoColor { + color.NoColor = true // disables colorized output +} +``` + +It also has support for single color definitions (local). You can +disable/enable color output on the fly: + +```go +c := color.New(color.FgCyan) +c.Println("Prints cyan text") + +c.DisableColor() +c.Println("This is printed without any color") + +c.EnableColor() +c.Println("This prints again cyan...") +``` + +## Todo + +* Save/Return previous values +* Evaluate fmt.Formatter interface + + +## Credits + + * [Fatih Arslan](https://github.com/fatih) + * Windows support via @mattn: [colorable](https://github.com/mattn/go-colorable) + +## License + +The MIT License (MIT) - see [`LICENSE.md`](https://github.com/fatih/color/blob/master/LICENSE.md) for more details + diff --git a/vendor/github.com/fatih/color/color.go b/vendor/github.com/fatih/color/color.go new file mode 100644 index 000000000..91c8e9f06 --- /dev/null +++ b/vendor/github.com/fatih/color/color.go @@ -0,0 +1,603 @@ +package color + +import ( + "fmt" + "io" + "os" + "strconv" + "strings" + "sync" + + "github.com/mattn/go-colorable" + "github.com/mattn/go-isatty" +) + +var ( + // NoColor defines if the output is colorized or not. It's dynamically set to + // false or true based on the stdout's file descriptor referring to a terminal + // or not. This is a global option and affects all colors. For more control + // over each color block use the methods DisableColor() individually. + NoColor = os.Getenv("TERM") == "dumb" || + (!isatty.IsTerminal(os.Stdout.Fd()) && !isatty.IsCygwinTerminal(os.Stdout.Fd())) + + // Output defines the standard output of the print functions. By default + // os.Stdout is used. + Output = colorable.NewColorableStdout() + + // Error defines a color supporting writer for os.Stderr. + Error = colorable.NewColorableStderr() + + // colorsCache is used to reduce the count of created Color objects and + // allows to reuse already created objects with required Attribute. + colorsCache = make(map[Attribute]*Color) + colorsCacheMu sync.Mutex // protects colorsCache +) + +// Color defines a custom color object which is defined by SGR parameters. +type Color struct { + params []Attribute + noColor *bool +} + +// Attribute defines a single SGR Code +type Attribute int + +const escape = "\x1b" + +// Base attributes +const ( + Reset Attribute = iota + Bold + Faint + Italic + Underline + BlinkSlow + BlinkRapid + ReverseVideo + Concealed + CrossedOut +) + +// Foreground text colors +const ( + FgBlack Attribute = iota + 30 + FgRed + FgGreen + FgYellow + FgBlue + FgMagenta + FgCyan + FgWhite +) + +// Foreground Hi-Intensity text colors +const ( + FgHiBlack Attribute = iota + 90 + FgHiRed + FgHiGreen + FgHiYellow + FgHiBlue + FgHiMagenta + FgHiCyan + FgHiWhite +) + +// Background text colors +const ( + BgBlack Attribute = iota + 40 + BgRed + BgGreen + BgYellow + BgBlue + BgMagenta + BgCyan + BgWhite +) + +// Background Hi-Intensity text colors +const ( + BgHiBlack Attribute = iota + 100 + BgHiRed + BgHiGreen + BgHiYellow + BgHiBlue + BgHiMagenta + BgHiCyan + BgHiWhite +) + +// New returns a newly created color object. +func New(value ...Attribute) *Color { + c := &Color{params: make([]Attribute, 0)} + c.Add(value...) + return c +} + +// Set sets the given parameters immediately. It will change the color of +// output with the given SGR parameters until color.Unset() is called. +func Set(p ...Attribute) *Color { + c := New(p...) + c.Set() + return c +} + +// Unset resets all escape attributes and clears the output. Usually should +// be called after Set(). +func Unset() { + if NoColor { + return + } + + fmt.Fprintf(Output, "%s[%dm", escape, Reset) +} + +// Set sets the SGR sequence. +func (c *Color) Set() *Color { + if c.isNoColorSet() { + return c + } + + fmt.Fprintf(Output, c.format()) + return c +} + +func (c *Color) unset() { + if c.isNoColorSet() { + return + } + + Unset() +} + +func (c *Color) setWriter(w io.Writer) *Color { + if c.isNoColorSet() { + return c + } + + fmt.Fprintf(w, c.format()) + return c +} + +func (c *Color) unsetWriter(w io.Writer) { + if c.isNoColorSet() { + return + } + + if NoColor { + return + } + + fmt.Fprintf(w, "%s[%dm", escape, Reset) +} + +// Add is used to chain SGR parameters. Use as many as parameters to combine +// and create custom color objects. Example: Add(color.FgRed, color.Underline). +func (c *Color) Add(value ...Attribute) *Color { + c.params = append(c.params, value...) + return c +} + +func (c *Color) prepend(value Attribute) { + c.params = append(c.params, 0) + copy(c.params[1:], c.params[0:]) + c.params[0] = value +} + +// Fprint formats using the default formats for its operands and writes to w. +// Spaces are added between operands when neither is a string. +// It returns the number of bytes written and any write error encountered. +// On Windows, users should wrap w with colorable.NewColorable() if w is of +// type *os.File. +func (c *Color) Fprint(w io.Writer, a ...interface{}) (n int, err error) { + c.setWriter(w) + defer c.unsetWriter(w) + + return fmt.Fprint(w, a...) +} + +// Print formats using the default formats for its operands and writes to +// standard output. Spaces are added between operands when neither is a +// string. It returns the number of bytes written and any write error +// encountered. This is the standard fmt.Print() method wrapped with the given +// color. +func (c *Color) Print(a ...interface{}) (n int, err error) { + c.Set() + defer c.unset() + + return fmt.Fprint(Output, a...) +} + +// Fprintf formats according to a format specifier and writes to w. +// It returns the number of bytes written and any write error encountered. +// On Windows, users should wrap w with colorable.NewColorable() if w is of +// type *os.File. +func (c *Color) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { + c.setWriter(w) + defer c.unsetWriter(w) + + return fmt.Fprintf(w, format, a...) +} + +// Printf formats according to a format specifier and writes to standard output. +// It returns the number of bytes written and any write error encountered. +// This is the standard fmt.Printf() method wrapped with the given color. +func (c *Color) Printf(format string, a ...interface{}) (n int, err error) { + c.Set() + defer c.unset() + + return fmt.Fprintf(Output, format, a...) +} + +// Fprintln formats using the default formats for its operands and writes to w. +// Spaces are always added between operands and a newline is appended. +// On Windows, users should wrap w with colorable.NewColorable() if w is of +// type *os.File. +func (c *Color) Fprintln(w io.Writer, a ...interface{}) (n int, err error) { + c.setWriter(w) + defer c.unsetWriter(w) + + return fmt.Fprintln(w, a...) +} + +// Println formats using the default formats for its operands and writes to +// standard output. Spaces are always added between operands and a newline is +// appended. It returns the number of bytes written and any write error +// encountered. This is the standard fmt.Print() method wrapped with the given +// color. +func (c *Color) Println(a ...interface{}) (n int, err error) { + c.Set() + defer c.unset() + + return fmt.Fprintln(Output, a...) +} + +// Sprint is just like Print, but returns a string instead of printing it. +func (c *Color) Sprint(a ...interface{}) string { + return c.wrap(fmt.Sprint(a...)) +} + +// Sprintln is just like Println, but returns a string instead of printing it. +func (c *Color) Sprintln(a ...interface{}) string { + return c.wrap(fmt.Sprintln(a...)) +} + +// Sprintf is just like Printf, but returns a string instead of printing it. +func (c *Color) Sprintf(format string, a ...interface{}) string { + return c.wrap(fmt.Sprintf(format, a...)) +} + +// FprintFunc returns a new function that prints the passed arguments as +// colorized with color.Fprint(). +func (c *Color) FprintFunc() func(w io.Writer, a ...interface{}) { + return func(w io.Writer, a ...interface{}) { + c.Fprint(w, a...) + } +} + +// PrintFunc returns a new function that prints the passed arguments as +// colorized with color.Print(). +func (c *Color) PrintFunc() func(a ...interface{}) { + return func(a ...interface{}) { + c.Print(a...) + } +} + +// FprintfFunc returns a new function that prints the passed arguments as +// colorized with color.Fprintf(). +func (c *Color) FprintfFunc() func(w io.Writer, format string, a ...interface{}) { + return func(w io.Writer, format string, a ...interface{}) { + c.Fprintf(w, format, a...) + } +} + +// PrintfFunc returns a new function that prints the passed arguments as +// colorized with color.Printf(). +func (c *Color) PrintfFunc() func(format string, a ...interface{}) { + return func(format string, a ...interface{}) { + c.Printf(format, a...) + } +} + +// FprintlnFunc returns a new function that prints the passed arguments as +// colorized with color.Fprintln(). +func (c *Color) FprintlnFunc() func(w io.Writer, a ...interface{}) { + return func(w io.Writer, a ...interface{}) { + c.Fprintln(w, a...) + } +} + +// PrintlnFunc returns a new function that prints the passed arguments as +// colorized with color.Println(). +func (c *Color) PrintlnFunc() func(a ...interface{}) { + return func(a ...interface{}) { + c.Println(a...) + } +} + +// SprintFunc returns a new function that returns colorized strings for the +// given arguments with fmt.Sprint(). Useful to put into or mix into other +// string. Windows users should use this in conjunction with color.Output, example: +// +// put := New(FgYellow).SprintFunc() +// fmt.Fprintf(color.Output, "This is a %s", put("warning")) +func (c *Color) SprintFunc() func(a ...interface{}) string { + return func(a ...interface{}) string { + return c.wrap(fmt.Sprint(a...)) + } +} + +// SprintfFunc returns a new function that returns colorized strings for the +// given arguments with fmt.Sprintf(). Useful to put into or mix into other +// string. Windows users should use this in conjunction with color.Output. +func (c *Color) SprintfFunc() func(format string, a ...interface{}) string { + return func(format string, a ...interface{}) string { + return c.wrap(fmt.Sprintf(format, a...)) + } +} + +// SprintlnFunc returns a new function that returns colorized strings for the +// given arguments with fmt.Sprintln(). Useful to put into or mix into other +// string. Windows users should use this in conjunction with color.Output. +func (c *Color) SprintlnFunc() func(a ...interface{}) string { + return func(a ...interface{}) string { + return c.wrap(fmt.Sprintln(a...)) + } +} + +// sequence returns a formatted SGR sequence to be plugged into a "\x1b[...m" +// an example output might be: "1;36" -> bold cyan +func (c *Color) sequence() string { + format := make([]string, len(c.params)) + for i, v := range c.params { + format[i] = strconv.Itoa(int(v)) + } + + return strings.Join(format, ";") +} + +// wrap wraps the s string with the colors attributes. The string is ready to +// be printed. +func (c *Color) wrap(s string) string { + if c.isNoColorSet() { + return s + } + + return c.format() + s + c.unformat() +} + +func (c *Color) format() string { + return fmt.Sprintf("%s[%sm", escape, c.sequence()) +} + +func (c *Color) unformat() string { + return fmt.Sprintf("%s[%dm", escape, Reset) +} + +// DisableColor disables the color output. Useful to not change any existing +// code and still being able to output. Can be used for flags like +// "--no-color". To enable back use EnableColor() method. +func (c *Color) DisableColor() { + c.noColor = boolPtr(true) +} + +// EnableColor enables the color output. Use it in conjunction with +// DisableColor(). Otherwise this method has no side effects. +func (c *Color) EnableColor() { + c.noColor = boolPtr(false) +} + +func (c *Color) isNoColorSet() bool { + // check first if we have user setted action + if c.noColor != nil { + return *c.noColor + } + + // if not return the global option, which is disabled by default + return NoColor +} + +// Equals returns a boolean value indicating whether two colors are equal. +func (c *Color) Equals(c2 *Color) bool { + if len(c.params) != len(c2.params) { + return false + } + + for _, attr := range c.params { + if !c2.attrExists(attr) { + return false + } + } + + return true +} + +func (c *Color) attrExists(a Attribute) bool { + for _, attr := range c.params { + if attr == a { + return true + } + } + + return false +} + +func boolPtr(v bool) *bool { + return &v +} + +func getCachedColor(p Attribute) *Color { + colorsCacheMu.Lock() + defer colorsCacheMu.Unlock() + + c, ok := colorsCache[p] + if !ok { + c = New(p) + colorsCache[p] = c + } + + return c +} + +func colorPrint(format string, p Attribute, a ...interface{}) { + c := getCachedColor(p) + + if !strings.HasSuffix(format, "\n") { + format += "\n" + } + + if len(a) == 0 { + c.Print(format) + } else { + c.Printf(format, a...) + } +} + +func colorString(format string, p Attribute, a ...interface{}) string { + c := getCachedColor(p) + + if len(a) == 0 { + return c.SprintFunc()(format) + } + + return c.SprintfFunc()(format, a...) +} + +// Black is a convenient helper function to print with black foreground. A +// newline is appended to format by default. +func Black(format string, a ...interface{}) { colorPrint(format, FgBlack, a...) } + +// Red is a convenient helper function to print with red foreground. A +// newline is appended to format by default. +func Red(format string, a ...interface{}) { colorPrint(format, FgRed, a...) } + +// Green is a convenient helper function to print with green foreground. A +// newline is appended to format by default. +func Green(format string, a ...interface{}) { colorPrint(format, FgGreen, a...) } + +// Yellow is a convenient helper function to print with yellow foreground. +// A newline is appended to format by default. +func Yellow(format string, a ...interface{}) { colorPrint(format, FgYellow, a...) } + +// Blue is a convenient helper function to print with blue foreground. A +// newline is appended to format by default. +func Blue(format string, a ...interface{}) { colorPrint(format, FgBlue, a...) } + +// Magenta is a convenient helper function to print with magenta foreground. +// A newline is appended to format by default. +func Magenta(format string, a ...interface{}) { colorPrint(format, FgMagenta, a...) } + +// Cyan is a convenient helper function to print with cyan foreground. A +// newline is appended to format by default. +func Cyan(format string, a ...interface{}) { colorPrint(format, FgCyan, a...) } + +// White is a convenient helper function to print with white foreground. A +// newline is appended to format by default. +func White(format string, a ...interface{}) { colorPrint(format, FgWhite, a...) } + +// BlackString is a convenient helper function to return a string with black +// foreground. +func BlackString(format string, a ...interface{}) string { return colorString(format, FgBlack, a...) } + +// RedString is a convenient helper function to return a string with red +// foreground. +func RedString(format string, a ...interface{}) string { return colorString(format, FgRed, a...) } + +// GreenString is a convenient helper function to return a string with green +// foreground. +func GreenString(format string, a ...interface{}) string { return colorString(format, FgGreen, a...) } + +// YellowString is a convenient helper function to return a string with yellow +// foreground. +func YellowString(format string, a ...interface{}) string { return colorString(format, FgYellow, a...) } + +// BlueString is a convenient helper function to return a string with blue +// foreground. +func BlueString(format string, a ...interface{}) string { return colorString(format, FgBlue, a...) } + +// MagentaString is a convenient helper function to return a string with magenta +// foreground. +func MagentaString(format string, a ...interface{}) string { + return colorString(format, FgMagenta, a...) +} + +// CyanString is a convenient helper function to return a string with cyan +// foreground. +func CyanString(format string, a ...interface{}) string { return colorString(format, FgCyan, a...) } + +// WhiteString is a convenient helper function to return a string with white +// foreground. +func WhiteString(format string, a ...interface{}) string { return colorString(format, FgWhite, a...) } + +// HiBlack is a convenient helper function to print with hi-intensity black foreground. A +// newline is appended to format by default. +func HiBlack(format string, a ...interface{}) { colorPrint(format, FgHiBlack, a...) } + +// HiRed is a convenient helper function to print with hi-intensity red foreground. A +// newline is appended to format by default. +func HiRed(format string, a ...interface{}) { colorPrint(format, FgHiRed, a...) } + +// HiGreen is a convenient helper function to print with hi-intensity green foreground. A +// newline is appended to format by default. +func HiGreen(format string, a ...interface{}) { colorPrint(format, FgHiGreen, a...) } + +// HiYellow is a convenient helper function to print with hi-intensity yellow foreground. +// A newline is appended to format by default. +func HiYellow(format string, a ...interface{}) { colorPrint(format, FgHiYellow, a...) } + +// HiBlue is a convenient helper function to print with hi-intensity blue foreground. A +// newline is appended to format by default. +func HiBlue(format string, a ...interface{}) { colorPrint(format, FgHiBlue, a...) } + +// HiMagenta is a convenient helper function to print with hi-intensity magenta foreground. +// A newline is appended to format by default. +func HiMagenta(format string, a ...interface{}) { colorPrint(format, FgHiMagenta, a...) } + +// HiCyan is a convenient helper function to print with hi-intensity cyan foreground. A +// newline is appended to format by default. +func HiCyan(format string, a ...interface{}) { colorPrint(format, FgHiCyan, a...) } + +// HiWhite is a convenient helper function to print with hi-intensity white foreground. A +// newline is appended to format by default. +func HiWhite(format string, a ...interface{}) { colorPrint(format, FgHiWhite, a...) } + +// HiBlackString is a convenient helper function to return a string with hi-intensity black +// foreground. +func HiBlackString(format string, a ...interface{}) string { + return colorString(format, FgHiBlack, a...) +} + +// HiRedString is a convenient helper function to return a string with hi-intensity red +// foreground. +func HiRedString(format string, a ...interface{}) string { return colorString(format, FgHiRed, a...) } + +// HiGreenString is a convenient helper function to return a string with hi-intensity green +// foreground. +func HiGreenString(format string, a ...interface{}) string { + return colorString(format, FgHiGreen, a...) +} + +// HiYellowString is a convenient helper function to return a string with hi-intensity yellow +// foreground. +func HiYellowString(format string, a ...interface{}) string { + return colorString(format, FgHiYellow, a...) +} + +// HiBlueString is a convenient helper function to return a string with hi-intensity blue +// foreground. +func HiBlueString(format string, a ...interface{}) string { return colorString(format, FgHiBlue, a...) } + +// HiMagentaString is a convenient helper function to return a string with hi-intensity magenta +// foreground. +func HiMagentaString(format string, a ...interface{}) string { + return colorString(format, FgHiMagenta, a...) +} + +// HiCyanString is a convenient helper function to return a string with hi-intensity cyan +// foreground. +func HiCyanString(format string, a ...interface{}) string { return colorString(format, FgHiCyan, a...) } + +// HiWhiteString is a convenient helper function to return a string with hi-intensity white +// foreground. +func HiWhiteString(format string, a ...interface{}) string { + return colorString(format, FgHiWhite, a...) +} diff --git a/vendor/github.com/fatih/color/doc.go b/vendor/github.com/fatih/color/doc.go new file mode 100644 index 000000000..cf1e96500 --- /dev/null +++ b/vendor/github.com/fatih/color/doc.go @@ -0,0 +1,133 @@ +/* +Package color is an ANSI color package to output colorized or SGR defined +output to the standard output. The API can be used in several way, pick one +that suits you. + +Use simple and default helper functions with predefined foreground colors: + + color.Cyan("Prints text in cyan.") + + // a newline will be appended automatically + color.Blue("Prints %s in blue.", "text") + + // More default foreground colors.. + color.Red("We have red") + color.Yellow("Yellow color too!") + color.Magenta("And many others ..") + + // Hi-intensity colors + color.HiGreen("Bright green color.") + color.HiBlack("Bright black means gray..") + color.HiWhite("Shiny white color!") + +However there are times where custom color mixes are required. Below are some +examples to create custom color objects and use the print functions of each +separate color object. + + // Create a new color object + c := color.New(color.FgCyan).Add(color.Underline) + c.Println("Prints cyan text with an underline.") + + // Or just add them to New() + d := color.New(color.FgCyan, color.Bold) + d.Printf("This prints bold cyan %s\n", "too!.") + + + // Mix up foreground and background colors, create new mixes! + red := color.New(color.FgRed) + + boldRed := red.Add(color.Bold) + boldRed.Println("This will print text in bold red.") + + whiteBackground := red.Add(color.BgWhite) + whiteBackground.Println("Red text with White background.") + + // Use your own io.Writer output + color.New(color.FgBlue).Fprintln(myWriter, "blue color!") + + blue := color.New(color.FgBlue) + blue.Fprint(myWriter, "This will print text in blue.") + +You can create PrintXxx functions to simplify even more: + + // Create a custom print function for convenient + red := color.New(color.FgRed).PrintfFunc() + red("warning") + red("error: %s", err) + + // Mix up multiple attributes + notice := color.New(color.Bold, color.FgGreen).PrintlnFunc() + notice("don't forget this...") + +You can also FprintXxx functions to pass your own io.Writer: + + blue := color.New(FgBlue).FprintfFunc() + blue(myWriter, "important notice: %s", stars) + + // Mix up with multiple attributes + success := color.New(color.Bold, color.FgGreen).FprintlnFunc() + success(myWriter, don't forget this...") + + +Or create SprintXxx functions to mix strings with other non-colorized strings: + + yellow := New(FgYellow).SprintFunc() + red := New(FgRed).SprintFunc() + + fmt.Printf("this is a %s and this is %s.\n", yellow("warning"), red("error")) + + info := New(FgWhite, BgGreen).SprintFunc() + fmt.Printf("this %s rocks!\n", info("package")) + +Windows support is enabled by default. All Print functions work as intended. +However only for color.SprintXXX functions, user should use fmt.FprintXXX and +set the output to color.Output: + + fmt.Fprintf(color.Output, "Windows support: %s", color.GreenString("PASS")) + + info := New(FgWhite, BgGreen).SprintFunc() + fmt.Fprintf(color.Output, "this %s rocks!\n", info("package")) + +Using with existing code is possible. Just use the Set() method to set the +standard output to the given parameters. That way a rewrite of an existing +code is not required. + + // Use handy standard colors. + color.Set(color.FgYellow) + + fmt.Println("Existing text will be now in Yellow") + fmt.Printf("This one %s\n", "too") + + color.Unset() // don't forget to unset + + // You can mix up parameters + color.Set(color.FgMagenta, color.Bold) + defer color.Unset() // use it in your function + + fmt.Println("All text will be now bold magenta.") + +There might be a case where you want to disable color output (for example to +pipe the standard output of your app to somewhere else). `Color` has support to +disable colors both globally and for single color definition. For example +suppose you have a CLI app and a `--no-color` bool flag. You can easily disable +the color output with: + + var flagNoColor = flag.Bool("no-color", false, "Disable color output") + + if *flagNoColor { + color.NoColor = true // disables colorized output + } + +It also has support for single color definitions (local). You can +disable/enable color output on the fly: + + c := color.New(color.FgCyan) + c.Println("Prints cyan text") + + c.DisableColor() + c.Println("This is printed without any color") + + c.EnableColor() + c.Println("This prints again cyan...") +*/ +package color diff --git a/vendor/github.com/fatih/color/go.mod b/vendor/github.com/fatih/color/go.mod new file mode 100644 index 000000000..78872815e --- /dev/null +++ b/vendor/github.com/fatih/color/go.mod @@ -0,0 +1,8 @@ +module github.com/fatih/color + +go 1.13 + +require ( + github.com/mattn/go-colorable v0.1.8 + github.com/mattn/go-isatty v0.0.12 +) diff --git a/vendor/github.com/fatih/color/go.sum b/vendor/github.com/fatih/color/go.sum new file mode 100644 index 000000000..54f7c46e8 --- /dev/null +++ b/vendor/github.com/fatih/color/go.sum @@ -0,0 +1,7 @@ +github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8= +github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae h1:/WDfKMnPU+m5M4xB+6x4kaepxRw6jWvR5iDRdvjHgy8= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/vendor/github.com/influxdata/influxdb/LICENSE b/vendor/github.com/influxdata/influxdb/LICENSE new file mode 100644 index 000000000..cfd3bfe67 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2013-2018 InfluxData Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/influxdata/influxdb/client/v2/client.go b/vendor/github.com/influxdata/influxdb/client/v2/client.go new file mode 100644 index 000000000..cc97baf5f --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/client/v2/client.go @@ -0,0 +1,747 @@ +// Package client (v2) is the current official Go client for InfluxDB. +package client // import "github.com/influxdata/influxdb/client/v2" + +import ( + "bytes" + "context" + "crypto/tls" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "mime" + "net" + "net/http" + "net/url" + "path" + "strconv" + "strings" + "time" + + "github.com/influxdata/influxdb/models" +) + +// HTTPConfig is the config data needed to create an HTTP Client. +type HTTPConfig struct { + // Addr should be of the form "http://host:port" + // or "http://[ipv6-host%zone]:port". + Addr string + + // Username is the influxdb username, optional. + Username string + + // Password is the influxdb password, optional. + Password string + + // UserAgent is the http User Agent, defaults to "InfluxDBClient". + UserAgent string + + // Timeout for influxdb writes, defaults to no timeout. + Timeout time.Duration + + // InsecureSkipVerify gets passed to the http client, if true, it will + // skip https certificate verification. Defaults to false. + InsecureSkipVerify bool + + // TLSConfig allows the user to set their own TLS config for the HTTP + // Client. If set, this option overrides InsecureSkipVerify. + TLSConfig *tls.Config + + // Proxy configures the Proxy function on the HTTP client. + Proxy func(req *http.Request) (*url.URL, error) + + // DialContext specifies the dial function for creating unencrypted TCP connections. + // If DialContext is nil then the transport dials using package net. + DialContext func(ctx context.Context, network, addr string) (net.Conn, error) +} + +// BatchPointsConfig is the config data needed to create an instance of the BatchPoints struct. +type BatchPointsConfig struct { + // Precision is the write precision of the points, defaults to "ns". + Precision string + + // Database is the database to write points to. + Database string + + // RetentionPolicy is the retention policy of the points. + RetentionPolicy string + + // Write consistency is the number of servers required to confirm write. + WriteConsistency string +} + +// Client is a client interface for writing & querying the database. +type Client interface { + // Ping checks that status of cluster, and will always return 0 time and no + // error for UDP clients. + Ping(timeout time.Duration) (time.Duration, string, error) + + // Write takes a BatchPoints object and writes all Points to InfluxDB. + Write(bp BatchPoints) error + + // Query makes an InfluxDB Query on the database. This will fail if using + // the UDP client. + Query(q Query) (*Response, error) + + // QueryCtx makes an InfluxDB Query on the database. This will fail if using + // the UDP client. + QueryCtx(ctx context.Context, q Query) (*Response, error) + + // QueryAsChunk makes an InfluxDB Query on the database. This will fail if using + // the UDP client. + QueryAsChunk(q Query) (*ChunkedResponse, error) + + // Close releases any resources a Client may be using. + Close() error +} + +// NewHTTPClient returns a new Client from the provided config. +// Client is safe for concurrent use by multiple goroutines. +func NewHTTPClient(conf HTTPConfig) (Client, error) { + if conf.UserAgent == "" { + conf.UserAgent = "InfluxDBClient" + } + + u, err := url.Parse(conf.Addr) + if err != nil { + return nil, err + } else if u.Scheme != "http" && u.Scheme != "https" { + m := fmt.Sprintf("Unsupported protocol scheme: %s, your address"+ + " must start with http:// or https://", u.Scheme) + return nil, errors.New(m) + } + + tr := &http.Transport{ + TLSClientConfig: &tls.Config{ + InsecureSkipVerify: conf.InsecureSkipVerify, + }, + Proxy: conf.Proxy, + DialContext: conf.DialContext, + } + if conf.TLSConfig != nil { + tr.TLSClientConfig = conf.TLSConfig + // Make sure to preserve the InsecureSkipVerify setting from the config. + tr.TLSClientConfig.InsecureSkipVerify = conf.InsecureSkipVerify + } + return &client{ + url: *u, + username: conf.Username, + password: conf.Password, + useragent: conf.UserAgent, + httpClient: &http.Client{ + Timeout: conf.Timeout, + Transport: tr, + }, + transport: tr, + }, nil +} + +// Ping will check to see if the server is up with an optional timeout on waiting for leader. +// Ping returns how long the request took, the version of the server it connected to, and an error if one occurred. +func (c *client) Ping(timeout time.Duration) (time.Duration, string, error) { + now := time.Now() + + u := c.url + u.Path = path.Join(u.Path, "ping") + + req, err := http.NewRequest("GET", u.String(), nil) + if err != nil { + return 0, "", err + } + + req.Header.Set("User-Agent", c.useragent) + + if c.username != "" { + req.SetBasicAuth(c.username, c.password) + } + + if timeout > 0 { + params := req.URL.Query() + params.Set("wait_for_leader", fmt.Sprintf("%.0fs", timeout.Seconds())) + req.URL.RawQuery = params.Encode() + } + + resp, err := c.httpClient.Do(req) + if err != nil { + return 0, "", err + } + defer resp.Body.Close() + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return 0, "", err + } + + if resp.StatusCode != http.StatusNoContent { + var err = errors.New(string(body)) + return 0, "", err + } + + version := resp.Header.Get("X-Influxdb-Version") + return time.Since(now), version, nil +} + +// Close releases the client's resources. +func (c *client) Close() error { + c.transport.CloseIdleConnections() + return nil +} + +// client is safe for concurrent use as the fields are all read-only +// once the client is instantiated. +type client struct { + // N.B - if url.UserInfo is accessed in future modifications to the + // methods on client, you will need to synchronize access to url. + url url.URL + username string + password string + useragent string + httpClient *http.Client + transport *http.Transport +} + +// BatchPoints is an interface into a batched grouping of points to write into +// InfluxDB together. BatchPoints is NOT thread-safe, you must create a separate +// batch for each goroutine. +type BatchPoints interface { + // AddPoint adds the given point to the Batch of points. + AddPoint(p *Point) + // AddPoints adds the given points to the Batch of points. + AddPoints(ps []*Point) + // Points lists the points in the Batch. + Points() []*Point + + // Precision returns the currently set precision of this Batch. + Precision() string + // SetPrecision sets the precision of this batch. + SetPrecision(s string) error + + // Database returns the currently set database of this Batch. + Database() string + // SetDatabase sets the database of this Batch. + SetDatabase(s string) + + // WriteConsistency returns the currently set write consistency of this Batch. + WriteConsistency() string + // SetWriteConsistency sets the write consistency of this Batch. + SetWriteConsistency(s string) + + // RetentionPolicy returns the currently set retention policy of this Batch. + RetentionPolicy() string + // SetRetentionPolicy sets the retention policy of this Batch. + SetRetentionPolicy(s string) +} + +// NewBatchPoints returns a BatchPoints interface based on the given config. +func NewBatchPoints(conf BatchPointsConfig) (BatchPoints, error) { + if conf.Precision == "" { + conf.Precision = "ns" + } + if _, err := time.ParseDuration("1" + conf.Precision); err != nil { + return nil, err + } + bp := &batchpoints{ + database: conf.Database, + precision: conf.Precision, + retentionPolicy: conf.RetentionPolicy, + writeConsistency: conf.WriteConsistency, + } + return bp, nil +} + +type batchpoints struct { + points []*Point + database string + precision string + retentionPolicy string + writeConsistency string +} + +func (bp *batchpoints) AddPoint(p *Point) { + bp.points = append(bp.points, p) +} + +func (bp *batchpoints) AddPoints(ps []*Point) { + bp.points = append(bp.points, ps...) +} + +func (bp *batchpoints) Points() []*Point { + return bp.points +} + +func (bp *batchpoints) Precision() string { + return bp.precision +} + +func (bp *batchpoints) Database() string { + return bp.database +} + +func (bp *batchpoints) WriteConsistency() string { + return bp.writeConsistency +} + +func (bp *batchpoints) RetentionPolicy() string { + return bp.retentionPolicy +} + +func (bp *batchpoints) SetPrecision(p string) error { + if _, err := time.ParseDuration("1" + p); err != nil { + return err + } + bp.precision = p + return nil +} + +func (bp *batchpoints) SetDatabase(db string) { + bp.database = db +} + +func (bp *batchpoints) SetWriteConsistency(wc string) { + bp.writeConsistency = wc +} + +func (bp *batchpoints) SetRetentionPolicy(rp string) { + bp.retentionPolicy = rp +} + +// Point represents a single data point. +type Point struct { + pt models.Point +} + +// NewPoint returns a point with the given timestamp. If a timestamp is not +// given, then data is sent to the database without a timestamp, in which case +// the server will assign local time upon reception. NOTE: it is recommended to +// send data with a timestamp. +func NewPoint( + name string, + tags map[string]string, + fields map[string]interface{}, + t ...time.Time, +) (*Point, error) { + var T time.Time + if len(t) > 0 { + T = t[0] + } + + pt, err := models.NewPoint(name, models.NewTags(tags), fields, T) + if err != nil { + return nil, err + } + return &Point{ + pt: pt, + }, nil +} + +// String returns a line-protocol string of the Point. +func (p *Point) String() string { + return p.pt.String() +} + +// PrecisionString returns a line-protocol string of the Point, +// with the timestamp formatted for the given precision. +func (p *Point) PrecisionString(precision string) string { + return p.pt.PrecisionString(precision) +} + +// Name returns the measurement name of the point. +func (p *Point) Name() string { + return string(p.pt.Name()) +} + +// Tags returns the tags associated with the point. +func (p *Point) Tags() map[string]string { + return p.pt.Tags().Map() +} + +// Time return the timestamp for the point. +func (p *Point) Time() time.Time { + return p.pt.Time() +} + +// UnixNano returns timestamp of the point in nanoseconds since Unix epoch. +func (p *Point) UnixNano() int64 { + return p.pt.UnixNano() +} + +// Fields returns the fields for the point. +func (p *Point) Fields() (map[string]interface{}, error) { + return p.pt.Fields() +} + +// NewPointFrom returns a point from the provided models.Point. +func NewPointFrom(pt models.Point) *Point { + return &Point{pt: pt} +} + +func (c *client) Write(bp BatchPoints) error { + var b bytes.Buffer + + for _, p := range bp.Points() { + if p == nil { + continue + } + if _, err := b.WriteString(p.pt.PrecisionString(bp.Precision())); err != nil { + return err + } + + if err := b.WriteByte('\n'); err != nil { + return err + } + } + + u := c.url + u.Path = path.Join(u.Path, "write") + + req, err := http.NewRequest("POST", u.String(), &b) + if err != nil { + return err + } + req.Header.Set("Content-Type", "") + req.Header.Set("User-Agent", c.useragent) + if c.username != "" { + req.SetBasicAuth(c.username, c.password) + } + + params := req.URL.Query() + params.Set("db", bp.Database()) + params.Set("rp", bp.RetentionPolicy()) + params.Set("precision", bp.Precision()) + params.Set("consistency", bp.WriteConsistency()) + req.URL.RawQuery = params.Encode() + + resp, err := c.httpClient.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return err + } + + if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK { + var err = errors.New(string(body)) + return err + } + + return nil +} + +// Query defines a query to send to the server. +type Query struct { + Command string + Database string + RetentionPolicy string + Precision string + Chunked bool + ChunkSize int + Parameters map[string]interface{} +} + +// Params is a type alias to the query parameters. +type Params map[string]interface{} + +// NewQuery returns a query object. +// The database and precision arguments can be empty strings if they are not needed for the query. +func NewQuery(command, database, precision string) Query { + return Query{ + Command: command, + Database: database, + Precision: precision, + Parameters: make(map[string]interface{}), + } +} + +// NewQueryWithRP returns a query object. +// The database, retention policy, and precision arguments can be empty strings if they are not needed +// for the query. Setting the retention policy only works on InfluxDB versions 1.6 or greater. +func NewQueryWithRP(command, database, retentionPolicy, precision string) Query { + return Query{ + Command: command, + Database: database, + RetentionPolicy: retentionPolicy, + Precision: precision, + Parameters: make(map[string]interface{}), + } +} + +// NewQueryWithParameters returns a query object. +// The database and precision arguments can be empty strings if they are not needed for the query. +// parameters is a map of the parameter names used in the command to their values. +func NewQueryWithParameters(command, database, precision string, parameters map[string]interface{}) Query { + return Query{ + Command: command, + Database: database, + Precision: precision, + Parameters: parameters, + } +} + +// Response represents a list of statement results. +type Response struct { + Results []Result + Err string `json:"error,omitempty"` +} + +// Error returns the first error from any statement. +// It returns nil if no errors occurred on any statements. +func (r *Response) Error() error { + if r.Err != "" { + return errors.New(r.Err) + } + for _, result := range r.Results { + if result.Err != "" { + return errors.New(result.Err) + } + } + return nil +} + +// Message represents a user message. +type Message struct { + Level string + Text string +} + +// Result represents a resultset returned from a single statement. +type Result struct { + Series []models.Row + Messages []*Message + Err string `json:"error,omitempty"` +} + +// Query sends a command to the server and returns the Response. +func (c *client) Query(q Query) (*Response, error) { + return c.QueryCtx(nil, q) +} + +// QueryCtx sends a command to the server and returns the Response. +func (c *client) QueryCtx(ctx context.Context, q Query) (*Response, error) { + req, err := c.createDefaultRequest(ctx, q) + if err != nil { + return nil, err + } + params := req.URL.Query() + if q.Chunked { + params.Set("chunked", "true") + if q.ChunkSize > 0 { + params.Set("chunk_size", strconv.Itoa(q.ChunkSize)) + } + req.URL.RawQuery = params.Encode() + } + resp, err := c.httpClient.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if err := checkResponse(resp); err != nil { + return nil, err + } + + var response Response + if q.Chunked { + cr := NewChunkedResponse(resp.Body) + for { + r, err := cr.NextResponse() + if err != nil { + if err == io.EOF { + break + } + // If we got an error while decoding the response, send that back. + return nil, err + } + + if r == nil { + break + } + + response.Results = append(response.Results, r.Results...) + if r.Err != "" { + response.Err = r.Err + break + } + } + } else { + dec := json.NewDecoder(resp.Body) + dec.UseNumber() + decErr := dec.Decode(&response) + + // ignore this error if we got an invalid status code + if decErr != nil && decErr.Error() == "EOF" && resp.StatusCode != http.StatusOK { + decErr = nil + } + // If we got a valid decode error, send that back + if decErr != nil { + return nil, fmt.Errorf("unable to decode json: received status code %d err: %s", resp.StatusCode, decErr) + } + } + + // If we don't have an error in our json response, and didn't get statusOK + // then send back an error + if resp.StatusCode != http.StatusOK && response.Error() == nil { + return &response, fmt.Errorf("received status code %d from server", resp.StatusCode) + } + return &response, nil +} + +// QueryAsChunk sends a command to the server and returns the Response. +func (c *client) QueryAsChunk(q Query) (*ChunkedResponse, error) { + req, err := c.createDefaultRequest(nil, q) + if err != nil { + return nil, err + } + params := req.URL.Query() + params.Set("chunked", "true") + if q.ChunkSize > 0 { + params.Set("chunk_size", strconv.Itoa(q.ChunkSize)) + } + req.URL.RawQuery = params.Encode() + resp, err := c.httpClient.Do(req) + if err != nil { + return nil, err + } + + if err := checkResponse(resp); err != nil { + return nil, err + } + return NewChunkedResponse(resp.Body), nil +} + +func checkResponse(resp *http.Response) error { + // If we lack a X-Influxdb-Version header, then we didn't get a response from influxdb + // but instead some other service. If the error code is also a 500+ code, then some + // downstream loadbalancer/proxy/etc had an issue and we should report that. + if resp.Header.Get("X-Influxdb-Version") == "" && resp.StatusCode >= http.StatusInternalServerError { + body, err := ioutil.ReadAll(resp.Body) + if err != nil || len(body) == 0 { + return fmt.Errorf("received status code %d from downstream server", resp.StatusCode) + } + + return fmt.Errorf("received status code %d from downstream server, with response body: %q", resp.StatusCode, body) + } + + // If we get an unexpected content type, then it is also not from influx direct and therefore + // we want to know what we received and what status code was returned for debugging purposes. + if cType, _, _ := mime.ParseMediaType(resp.Header.Get("Content-Type")); cType != "application/json" { + // Read up to 1kb of the body to help identify downstream errors and limit the impact of things + // like downstream serving a large file + body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1024)) + if err != nil || len(body) == 0 { + return fmt.Errorf("expected json response, got empty body, with status: %v", resp.StatusCode) + } + + return fmt.Errorf("expected json response, got %q, with status: %v and response body: %q", cType, resp.StatusCode, body) + } + return nil +} + +func (c *client) createDefaultRequest(ctx context.Context, q Query) (*http.Request, error) { + u := c.url + u.Path = path.Join(u.Path, "query") + + jsonParameters, err := json.Marshal(q.Parameters) + if err != nil { + return nil, err + } + + req, err := http.NewRequest("POST", u.String(), nil) + if err != nil { + return nil, err + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + req.Header.Set("Content-Type", "") + req.Header.Set("User-Agent", c.useragent) + + if c.username != "" { + req.SetBasicAuth(c.username, c.password) + } + + params := req.URL.Query() + params.Set("q", q.Command) + params.Set("db", q.Database) + if q.RetentionPolicy != "" { + params.Set("rp", q.RetentionPolicy) + } + params.Set("params", string(jsonParameters)) + + if q.Precision != "" { + params.Set("epoch", q.Precision) + } + req.URL.RawQuery = params.Encode() + + return req, nil + +} + +// duplexReader reads responses and writes it to another writer while +// satisfying the reader interface. +type duplexReader struct { + r io.ReadCloser + w io.Writer +} + +func (r *duplexReader) Read(p []byte) (n int, err error) { + n, err = r.r.Read(p) + if err == nil { + r.w.Write(p[:n]) + } + return n, err +} + +// Close closes the response. +func (r *duplexReader) Close() error { + return r.r.Close() +} + +// ChunkedResponse represents a response from the server that +// uses chunking to stream the output. +type ChunkedResponse struct { + dec *json.Decoder + duplex *duplexReader + buf bytes.Buffer +} + +// NewChunkedResponse reads a stream and produces responses from the stream. +func NewChunkedResponse(r io.Reader) *ChunkedResponse { + rc, ok := r.(io.ReadCloser) + if !ok { + rc = ioutil.NopCloser(r) + } + resp := &ChunkedResponse{} + resp.duplex = &duplexReader{r: rc, w: &resp.buf} + resp.dec = json.NewDecoder(resp.duplex) + resp.dec.UseNumber() + return resp +} + +// NextResponse reads the next line of the stream and returns a response. +func (r *ChunkedResponse) NextResponse() (*Response, error) { + var response Response + if err := r.dec.Decode(&response); err != nil { + if err == io.EOF { + return nil, err + } + // A decoding error happened. This probably means the server crashed + // and sent a last-ditch error message to us. Ensure we have read the + // entirety of the connection to get any remaining error text. + io.Copy(ioutil.Discard, r.duplex) + return nil, errors.New(strings.TrimSpace(r.buf.String())) + } + + r.buf.Reset() + return &response, nil +} + +// Close closes the response. +func (r *ChunkedResponse) Close() error { + return r.duplex.Close() +} diff --git a/vendor/github.com/influxdata/influxdb/client/v2/params.go b/vendor/github.com/influxdata/influxdb/client/v2/params.go new file mode 100644 index 000000000..5616bfbd7 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/client/v2/params.go @@ -0,0 +1,73 @@ +package client + +import ( + "encoding/json" + "time" +) + +type ( + // Identifier is an identifier value. + Identifier string + + // StringValue is a string literal. + StringValue string + + // RegexValue is a regexp literal. + RegexValue string + + // NumberValue is a number literal. + NumberValue float64 + + // IntegerValue is an integer literal. + IntegerValue int64 + + // BooleanValue is a boolean literal. + BooleanValue bool + + // TimeValue is a time literal. + TimeValue time.Time + + // DurationValue is a duration literal. + DurationValue time.Duration +) + +func (v Identifier) MarshalJSON() ([]byte, error) { + m := map[string]string{"identifier": string(v)} + return json.Marshal(m) +} + +func (v StringValue) MarshalJSON() ([]byte, error) { + m := map[string]string{"string": string(v)} + return json.Marshal(m) +} + +func (v RegexValue) MarshalJSON() ([]byte, error) { + m := map[string]string{"regex": string(v)} + return json.Marshal(m) +} + +func (v NumberValue) MarshalJSON() ([]byte, error) { + m := map[string]float64{"number": float64(v)} + return json.Marshal(m) +} + +func (v IntegerValue) MarshalJSON() ([]byte, error) { + m := map[string]int64{"integer": int64(v)} + return json.Marshal(m) +} + +func (v BooleanValue) MarshalJSON() ([]byte, error) { + m := map[string]bool{"boolean": bool(v)} + return json.Marshal(m) +} + +func (v TimeValue) MarshalJSON() ([]byte, error) { + t := time.Time(v) + m := map[string]string{"string": t.Format(time.RFC3339Nano)} + return json.Marshal(m) +} + +func (v DurationValue) MarshalJSON() ([]byte, error) { + m := map[string]int64{"duration": int64(v)} + return json.Marshal(m) +} diff --git a/vendor/github.com/influxdata/influxdb/client/v2/udp.go b/vendor/github.com/influxdata/influxdb/client/v2/udp.go new file mode 100644 index 000000000..1256011dd --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/client/v2/udp.go @@ -0,0 +1,121 @@ +package client + +import ( + "context" + "fmt" + "io" + "net" + "time" +) + +const ( + // UDPPayloadSize is a reasonable default payload size for UDP packets that + // could be travelling over the internet. + UDPPayloadSize = 512 +) + +// UDPConfig is the config data needed to create a UDP Client. +type UDPConfig struct { + // Addr should be of the form "host:port" + // or "[ipv6-host%zone]:port". + Addr string + + // PayloadSize is the maximum size of a UDP client message, optional + // Tune this based on your network. Defaults to UDPPayloadSize. + PayloadSize int +} + +// NewUDPClient returns a client interface for writing to an InfluxDB UDP +// service from the given config. +func NewUDPClient(conf UDPConfig) (Client, error) { + var udpAddr *net.UDPAddr + udpAddr, err := net.ResolveUDPAddr("udp", conf.Addr) + if err != nil { + return nil, err + } + + conn, err := net.DialUDP("udp", nil, udpAddr) + if err != nil { + return nil, err + } + + payloadSize := conf.PayloadSize + if payloadSize == 0 { + payloadSize = UDPPayloadSize + } + + return &udpclient{ + conn: conn, + payloadSize: payloadSize, + }, nil +} + +// Close releases the udpclient's resources. +func (uc *udpclient) Close() error { + return uc.conn.Close() +} + +type udpclient struct { + conn io.WriteCloser + payloadSize int +} + +func (uc *udpclient) Write(bp BatchPoints) error { + var b = make([]byte, 0, uc.payloadSize) // initial buffer size, it will grow as needed + var d, _ = time.ParseDuration("1" + bp.Precision()) + + var delayedError error + + var checkBuffer = func(n int) { + if len(b) > 0 && len(b)+n > uc.payloadSize { + if _, err := uc.conn.Write(b); err != nil { + delayedError = err + } + b = b[:0] + } + } + + for _, p := range bp.Points() { + p.pt.Round(d) + pointSize := p.pt.StringSize() + 1 // include newline in size + //point := p.pt.RoundedString(d) + "\n" + + checkBuffer(pointSize) + + if p.Time().IsZero() || pointSize <= uc.payloadSize { + b = p.pt.AppendString(b) + b = append(b, '\n') + continue + } + + points := p.pt.Split(uc.payloadSize - 1) // account for newline character + for _, sp := range points { + checkBuffer(sp.StringSize() + 1) + b = sp.AppendString(b) + b = append(b, '\n') + } + } + + if len(b) > 0 { + if _, err := uc.conn.Write(b); err != nil { + return err + } + } + return delayedError +} + +func (uc *udpclient) Query(q Query) (*Response, error) { + return nil, fmt.Errorf("querying via UDP is not supported") +} + +func (uc *udpclient) QueryCtx(ctx context.Context, q Query) (*Response, error) { + return nil, fmt.Errorf("querying via UDP is not supported") +} + +func (uc *udpclient) QueryAsChunk(q Query) (*ChunkedResponse, error) { + return nil, fmt.Errorf("querying via UDP is not supported") +} + +func (uc *udpclient) Ping(timeout time.Duration) (time.Duration, string, error) { + return 0, "", nil +} diff --git a/vendor/github.com/influxdata/influxdb/models/consistency.go b/vendor/github.com/influxdata/influxdb/models/consistency.go new file mode 100644 index 000000000..2a3269bca --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/models/consistency.go @@ -0,0 +1,48 @@ +package models + +import ( + "errors" + "strings" +) + +// ConsistencyLevel represent a required replication criteria before a write can +// be returned as successful. +// +// The consistency level is handled in open-source InfluxDB but only applicable to clusters. +type ConsistencyLevel int + +const ( + // ConsistencyLevelAny allows for hinted handoff, potentially no write happened yet. + ConsistencyLevelAny ConsistencyLevel = iota + + // ConsistencyLevelOne requires at least one data node acknowledged a write. + ConsistencyLevelOne + + // ConsistencyLevelQuorum requires a quorum of data nodes to acknowledge a write. + ConsistencyLevelQuorum + + // ConsistencyLevelAll requires all data nodes to acknowledge a write. + ConsistencyLevelAll +) + +var ( + // ErrInvalidConsistencyLevel is returned when parsing the string version + // of a consistency level. + ErrInvalidConsistencyLevel = errors.New("invalid consistency level") +) + +// ParseConsistencyLevel converts a consistency level string to the corresponding ConsistencyLevel const. +func ParseConsistencyLevel(level string) (ConsistencyLevel, error) { + switch strings.ToLower(level) { + case "any": + return ConsistencyLevelAny, nil + case "one": + return ConsistencyLevelOne, nil + case "quorum": + return ConsistencyLevelQuorum, nil + case "all": + return ConsistencyLevelAll, nil + default: + return 0, ErrInvalidConsistencyLevel + } +} diff --git a/vendor/github.com/influxdata/influxdb/models/fieldtype_string.go b/vendor/github.com/influxdata/influxdb/models/fieldtype_string.go new file mode 100644 index 000000000..3d181aa99 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/models/fieldtype_string.go @@ -0,0 +1,16 @@ +// Code generated by "stringer -type=FieldType"; DO NOT EDIT. + +package models + +import "strconv" + +const _FieldType_name = "IntegerFloatBooleanStringEmptyUnsigned" + +var _FieldType_index = [...]uint8{0, 7, 12, 19, 25, 30, 38} + +func (i FieldType) String() string { + if i < 0 || i >= FieldType(len(_FieldType_index)-1) { + return "FieldType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _FieldType_name[_FieldType_index[i]:_FieldType_index[i+1]] +} diff --git a/vendor/github.com/influxdata/influxdb/models/gen.go b/vendor/github.com/influxdata/influxdb/models/gen.go new file mode 100644 index 000000000..0aaa43f20 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/models/gen.go @@ -0,0 +1,3 @@ +package models + +//go:generate stringer -type=FieldType diff --git a/vendor/github.com/influxdata/influxdb/models/inline_fnv.go b/vendor/github.com/influxdata/influxdb/models/inline_fnv.go new file mode 100644 index 000000000..eec1ae8b0 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/models/inline_fnv.go @@ -0,0 +1,32 @@ +package models // import "github.com/influxdata/influxdb/models" + +// from stdlib hash/fnv/fnv.go +const ( + prime64 = 1099511628211 + offset64 = 14695981039346656037 +) + +// InlineFNV64a is an alloc-free port of the standard library's fnv64a. +// See https://en.wikipedia.org/wiki/Fowler%E2%80%93Noll%E2%80%93Vo_hash_function. +type InlineFNV64a uint64 + +// NewInlineFNV64a returns a new instance of InlineFNV64a. +func NewInlineFNV64a() InlineFNV64a { + return offset64 +} + +// Write adds data to the running hash. +func (s *InlineFNV64a) Write(data []byte) (int, error) { + hash := uint64(*s) + for _, c := range data { + hash ^= uint64(c) + hash *= prime64 + } + *s = InlineFNV64a(hash) + return len(data), nil +} + +// Sum64 returns the uint64 of the current resulting hash. +func (s *InlineFNV64a) Sum64() uint64 { + return uint64(*s) +} diff --git a/vendor/github.com/influxdata/influxdb/models/inline_strconv_parse.go b/vendor/github.com/influxdata/influxdb/models/inline_strconv_parse.go new file mode 100644 index 000000000..8db483738 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/models/inline_strconv_parse.go @@ -0,0 +1,44 @@ +package models // import "github.com/influxdata/influxdb/models" + +import ( + "reflect" + "strconv" + "unsafe" +) + +// parseIntBytes is a zero-alloc wrapper around strconv.ParseInt. +func parseIntBytes(b []byte, base int, bitSize int) (i int64, err error) { + s := unsafeBytesToString(b) + return strconv.ParseInt(s, base, bitSize) +} + +// parseUintBytes is a zero-alloc wrapper around strconv.ParseUint. +func parseUintBytes(b []byte, base int, bitSize int) (i uint64, err error) { + s := unsafeBytesToString(b) + return strconv.ParseUint(s, base, bitSize) +} + +// parseFloatBytes is a zero-alloc wrapper around strconv.ParseFloat. +func parseFloatBytes(b []byte, bitSize int) (float64, error) { + s := unsafeBytesToString(b) + return strconv.ParseFloat(s, bitSize) +} + +// parseBoolBytes is a zero-alloc wrapper around strconv.ParseBool. +func parseBoolBytes(b []byte) (bool, error) { + return strconv.ParseBool(unsafeBytesToString(b)) +} + +// unsafeBytesToString converts a []byte to a string without a heap allocation. +// +// It is unsafe, and is intended to prepare input to short-lived functions +// that require strings. +func unsafeBytesToString(in []byte) string { + src := *(*reflect.SliceHeader)(unsafe.Pointer(&in)) + dst := reflect.StringHeader{ + Data: src.Data, + Len: src.Len, + } + s := *(*string)(unsafe.Pointer(&dst)) + return s +} diff --git a/vendor/github.com/influxdata/influxdb/models/points.go b/vendor/github.com/influxdata/influxdb/models/points.go new file mode 100644 index 000000000..f4062584e --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/models/points.go @@ -0,0 +1,2476 @@ +// Package models implements basic objects used throughout the TICK stack. +package models // import "github.com/influxdata/influxdb/models" + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "math" + "sort" + "strconv" + "strings" + "time" + "unicode" + "unicode/utf8" + + "github.com/influxdata/influxdb/pkg/escape" +) + +// Values used to store the field key and measurement name as special internal +// tags. +const ( + FieldKeyTagKey = "\xff" + MeasurementTagKey = "\x00" +) + +// Predefined byte representations of special tag keys. +var ( + FieldKeyTagKeyBytes = []byte(FieldKeyTagKey) + MeasurementTagKeyBytes = []byte(MeasurementTagKey) +) + +type escapeSet struct { + k [1]byte + esc [2]byte +} + +var ( + measurementEscapeCodes = [...]escapeSet{ + {k: [1]byte{','}, esc: [2]byte{'\\', ','}}, + {k: [1]byte{' '}, esc: [2]byte{'\\', ' '}}, + } + + tagEscapeCodes = [...]escapeSet{ + {k: [1]byte{','}, esc: [2]byte{'\\', ','}}, + {k: [1]byte{' '}, esc: [2]byte{'\\', ' '}}, + {k: [1]byte{'='}, esc: [2]byte{'\\', '='}}, + } + + // ErrPointMustHaveAField is returned when operating on a point that does not have any fields. + ErrPointMustHaveAField = errors.New("point without fields is unsupported") + + // ErrInvalidNumber is returned when a number is expected but not provided. + ErrInvalidNumber = errors.New("invalid number") + + // ErrInvalidPoint is returned when a point cannot be parsed correctly. + ErrInvalidPoint = errors.New("point is invalid") +) + +const ( + // MaxKeyLength is the largest allowed size of the combined measurement and tag keys. + MaxKeyLength = 65535 +) + +// enableUint64Support will enable uint64 support if set to true. +var enableUint64Support = false + +// EnableUintSupport manually enables uint support for the point parser. +// This function will be removed in the future and only exists for unit tests during the +// transition. +func EnableUintSupport() { + enableUint64Support = true +} + +// Point defines the values that will be written to the database. +type Point interface { + // Name return the measurement name for the point. + Name() []byte + + // SetName updates the measurement name for the point. + SetName(string) + + // Tags returns the tag set for the point. + Tags() Tags + + // ForEachTag iterates over each tag invoking fn. If fn return false, iteration stops. + ForEachTag(fn func(k, v []byte) bool) + + // AddTag adds or replaces a tag value for a point. + AddTag(key, value string) + + // SetTags replaces the tags for the point. + SetTags(tags Tags) + + // HasTag returns true if the tag exists for the point. + HasTag(tag []byte) bool + + // Fields returns the fields for the point. + Fields() (Fields, error) + + // Time return the timestamp for the point. + Time() time.Time + + // SetTime updates the timestamp for the point. + SetTime(t time.Time) + + // UnixNano returns the timestamp of the point as nanoseconds since Unix epoch. + UnixNano() int64 + + // HashID returns a non-cryptographic checksum of the point's key. + HashID() uint64 + + // Key returns the key (measurement joined with tags) of the point. + Key() []byte + + // String returns a string representation of the point. If there is a + // timestamp associated with the point then it will be specified with the default + // precision of nanoseconds. + String() string + + // MarshalBinary returns a binary representation of the point. + MarshalBinary() ([]byte, error) + + // PrecisionString returns a string representation of the point. If there + // is a timestamp associated with the point then it will be specified in the + // given unit. + PrecisionString(precision string) string + + // RoundedString returns a string representation of the point. If there + // is a timestamp associated with the point, then it will be rounded to the + // given duration. + RoundedString(d time.Duration) string + + // Split will attempt to return multiple points with the same timestamp whose + // string representations are no longer than size. Points with a single field or + // a point without a timestamp may exceed the requested size. + Split(size int) []Point + + // Round will round the timestamp of the point to the given duration. + Round(d time.Duration) + + // StringSize returns the length of the string that would be returned by String(). + StringSize() int + + // AppendString appends the result of String() to the provided buffer and returns + // the result, potentially reducing string allocations. + AppendString(buf []byte) []byte + + // FieldIterator returns a FieldIterator that can be used to traverse the + // fields of a point without constructing the in-memory map. + FieldIterator() FieldIterator +} + +// FieldType represents the type of a field. +type FieldType int + +const ( + // Integer indicates the field's type is integer. + Integer FieldType = iota + + // Float indicates the field's type is float. + Float + + // Boolean indicates the field's type is boolean. + Boolean + + // String indicates the field's type is string. + String + + // Empty is used to indicate that there is no field. + Empty + + // Unsigned indicates the field's type is an unsigned integer. + Unsigned +) + +// FieldIterator provides a low-allocation interface to iterate through a point's fields. +type FieldIterator interface { + // Next indicates whether there any fields remaining. + Next() bool + + // FieldKey returns the key of the current field. + FieldKey() []byte + + // Type returns the FieldType of the current field. + Type() FieldType + + // StringValue returns the string value of the current field. + StringValue() string + + // IntegerValue returns the integer value of the current field. + IntegerValue() (int64, error) + + // UnsignedValue returns the unsigned value of the current field. + UnsignedValue() (uint64, error) + + // BooleanValue returns the boolean value of the current field. + BooleanValue() (bool, error) + + // FloatValue returns the float value of the current field. + FloatValue() (float64, error) + + // Reset resets the iterator to its initial state. + Reset() +} + +// Points represents a sortable list of points by timestamp. +type Points []Point + +// Len implements sort.Interface. +func (a Points) Len() int { return len(a) } + +// Less implements sort.Interface. +func (a Points) Less(i, j int) bool { return a[i].Time().Before(a[j].Time()) } + +// Swap implements sort.Interface. +func (a Points) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +// point is the default implementation of Point. +type point struct { + time time.Time + + // text encoding of measurement and tags + // key must always be stored sorted by tags, if the original line was not sorted, + // we need to resort it + key []byte + + // text encoding of field data + fields []byte + + // text encoding of timestamp + ts []byte + + // cached version of parsed fields from data + cachedFields map[string]interface{} + + // cached version of parsed name from key + cachedName string + + // cached version of parsed tags + cachedTags Tags + + it fieldIterator +} + +// type assertions +var ( + _ Point = (*point)(nil) + _ FieldIterator = (*point)(nil) +) + +const ( + // the number of characters for the largest possible int64 (9223372036854775807) + maxInt64Digits = 19 + + // the number of characters for the smallest possible int64 (-9223372036854775808) + minInt64Digits = 20 + + // the number of characters for the largest possible uint64 (18446744073709551615) + maxUint64Digits = 20 + + // the number of characters required for the largest float64 before a range check + // would occur during parsing + maxFloat64Digits = 25 + + // the number of characters required for smallest float64 before a range check occur + // would occur during parsing + minFloat64Digits = 27 +) + +// ParsePoints returns a slice of Points from a text representation of a point +// with each point separated by newlines. If any points fail to parse, a non-nil error +// will be returned in addition to the points that parsed successfully. +func ParsePoints(buf []byte) ([]Point, error) { + return ParsePointsWithPrecision(buf, time.Now().UTC(), "n") +} + +// ParsePointsString is identical to ParsePoints but accepts a string. +func ParsePointsString(buf string) ([]Point, error) { + return ParsePoints([]byte(buf)) +} + +// ParseKey returns the measurement name and tags from a point. +// +// NOTE: to minimize heap allocations, the returned Tags will refer to subslices of buf. +// This can have the unintended effect preventing buf from being garbage collected. +func ParseKey(buf []byte) (string, Tags) { + name, tags := ParseKeyBytes(buf) + return string(name), tags +} + +func ParseKeyBytes(buf []byte) ([]byte, Tags) { + return ParseKeyBytesWithTags(buf, nil) +} + +func ParseKeyBytesWithTags(buf []byte, tags Tags) ([]byte, Tags) { + // Ignore the error because scanMeasurement returns "missing fields" which we ignore + // when just parsing a key + state, i, _ := scanMeasurement(buf, 0) + + var name []byte + if state == tagKeyState { + tags = parseTags(buf, tags) + // scanMeasurement returns the location of the comma if there are tags, strip that off + name = buf[:i-1] + } else { + name = buf[:i] + } + return unescapeMeasurement(name), tags +} + +func ParseTags(buf []byte) Tags { + return parseTags(buf, nil) +} + +func ParseName(buf []byte) []byte { + // Ignore the error because scanMeasurement returns "missing fields" which we ignore + // when just parsing a key + state, i, _ := scanMeasurement(buf, 0) + var name []byte + if state == tagKeyState { + name = buf[:i-1] + } else { + name = buf[:i] + } + + return unescapeMeasurement(name) +} + +// ParsePointsWithPrecision is similar to ParsePoints, but allows the +// caller to provide a precision for time. +// +// NOTE: to minimize heap allocations, the returned Points will refer to subslices of buf. +// This can have the unintended effect preventing buf from being garbage collected. +func ParsePointsWithPrecision(buf []byte, defaultTime time.Time, precision string) ([]Point, error) { + points := make([]Point, 0, bytes.Count(buf, []byte{'\n'})+1) + var ( + pos int + block []byte + failed []string + ) + for pos < len(buf) { + pos, block = scanLine(buf, pos) + pos++ + + if len(block) == 0 { + continue + } + + start := skipWhitespace(block, 0) + + // If line is all whitespace, just skip it + if start >= len(block) { + continue + } + + // lines which start with '#' are comments + if block[start] == '#' { + continue + } + + // strip the newline if one is present + if block[len(block)-1] == '\n' { + block = block[:len(block)-1] + } + + pt, err := parsePoint(block[start:], defaultTime, precision) + if err != nil { + failed = append(failed, fmt.Sprintf("unable to parse '%s': %v", string(block[start:]), err)) + } else { + points = append(points, pt) + } + + } + if len(failed) > 0 { + return points, fmt.Errorf("%s", strings.Join(failed, "\n")) + } + return points, nil + +} + +func parsePoint(buf []byte, defaultTime time.Time, precision string) (Point, error) { + // scan the first block which is measurement[,tag1=value1,tag2=value2...] + pos, key, err := scanKey(buf, 0) + if err != nil { + return nil, err + } + + // measurement name is required + if len(key) == 0 { + return nil, fmt.Errorf("missing measurement") + } + + if len(key) > MaxKeyLength { + return nil, fmt.Errorf("max key length exceeded: %v > %v", len(key), MaxKeyLength) + } + + // scan the second block is which is field1=value1[,field2=value2,...] + pos, fields, err := scanFields(buf, pos) + if err != nil { + return nil, err + } + + // at least one field is required + if len(fields) == 0 { + return nil, fmt.Errorf("missing fields") + } + + var maxKeyErr error + err = walkFields(fields, func(k, v []byte) bool { + if sz := seriesKeySize(key, k); sz > MaxKeyLength { + maxKeyErr = fmt.Errorf("max key length exceeded: %v > %v", sz, MaxKeyLength) + return false + } + return true + }) + + if err != nil { + return nil, err + } + + if maxKeyErr != nil { + return nil, maxKeyErr + } + + // scan the last block which is an optional integer timestamp + pos, ts, err := scanTime(buf, pos) + if err != nil { + return nil, err + } + + pt := &point{ + key: key, + fields: fields, + ts: ts, + } + + if len(ts) == 0 { + pt.time = defaultTime + pt.SetPrecision(precision) + } else { + ts, err := parseIntBytes(ts, 10, 64) + if err != nil { + return nil, err + } + pt.time, err = SafeCalcTime(ts, precision) + if err != nil { + return nil, err + } + + // Determine if there are illegal non-whitespace characters after the + // timestamp block. + for pos < len(buf) { + if buf[pos] != ' ' { + return nil, ErrInvalidPoint + } + pos++ + } + } + return pt, nil +} + +// GetPrecisionMultiplier will return a multiplier for the precision specified. +func GetPrecisionMultiplier(precision string) int64 { + d := time.Nanosecond + switch precision { + case "u": + d = time.Microsecond + case "ms": + d = time.Millisecond + case "s": + d = time.Second + case "m": + d = time.Minute + case "h": + d = time.Hour + } + return int64(d) +} + +// scanKey scans buf starting at i for the measurement and tag portion of the point. +// It returns the ending position and the byte slice of key within buf. If there +// are tags, they will be sorted if they are not already. +func scanKey(buf []byte, i int) (int, []byte, error) { + start := skipWhitespace(buf, i) + + i = start + + // Determines whether the tags are sort, assume they are + sorted := true + + // indices holds the indexes within buf of the start of each tag. For example, + // a buf of 'cpu,host=a,region=b,zone=c' would have indices slice of [4,11,20] + // which indicates that the first tag starts at buf[4], seconds at buf[11], and + // last at buf[20] + indices := make([]int, 100) + + // tracks how many commas we've seen so we know how many values are indices. + // Since indices is an arbitrarily large slice, + // we need to know how many values in the buffer are in use. + commas := 0 + + // First scan the Point's measurement. + state, i, err := scanMeasurement(buf, i) + if err != nil { + return i, buf[start:i], err + } + + // Optionally scan tags if needed. + if state == tagKeyState { + i, commas, indices, err = scanTags(buf, i, indices) + if err != nil { + return i, buf[start:i], err + } + } + + // Now we know where the key region is within buf, and the location of tags, we + // need to determine if duplicate tags exist and if the tags are sorted. This iterates + // over the list comparing each tag in the sequence with each other. + for j := 0; j < commas-1; j++ { + // get the left and right tags + _, left := scanTo(buf[indices[j]:indices[j+1]-1], 0, '=') + _, right := scanTo(buf[indices[j+1]:indices[j+2]-1], 0, '=') + + // If left is greater than right, the tags are not sorted. We do not have to + // continue because the short path no longer works. + // If the tags are equal, then there are duplicate tags, and we should abort. + // If the tags are not sorted, this pass may not find duplicate tags and we + // need to do a more exhaustive search later. + if cmp := bytes.Compare(left, right); cmp > 0 { + sorted = false + break + } else if cmp == 0 { + return i, buf[start:i], fmt.Errorf("duplicate tags") + } + } + + // If the tags are not sorted, then sort them. This sort is inline and + // uses the tag indices we created earlier. The actual buffer is not sorted, the + // indices are using the buffer for value comparison. After the indices are sorted, + // the buffer is reconstructed from the sorted indices. + if !sorted && commas > 0 { + // Get the measurement name for later + measurement := buf[start : indices[0]-1] + + // Sort the indices + indices := indices[:commas] + insertionSort(0, commas, buf, indices) + + // Create a new key using the measurement and sorted indices + b := make([]byte, len(buf[start:i])) + pos := copy(b, measurement) + for _, i := range indices { + b[pos] = ',' + pos++ + _, v := scanToSpaceOr(buf, i, ',') + pos += copy(b[pos:], v) + } + + // Check again for duplicate tags now that the tags are sorted. + for j := 0; j < commas-1; j++ { + // get the left and right tags + _, left := scanTo(buf[indices[j]:], 0, '=') + _, right := scanTo(buf[indices[j+1]:], 0, '=') + + // If the tags are equal, then there are duplicate tags, and we should abort. + // If the tags are not sorted, this pass may not find duplicate tags and we + // need to do a more exhaustive search later. + if bytes.Equal(left, right) { + return i, b, fmt.Errorf("duplicate tags") + } + } + + return i, b, nil + } + + return i, buf[start:i], nil +} + +// The following constants allow us to specify which state to move to +// next, when scanning sections of a Point. +const ( + tagKeyState = iota + tagValueState + fieldsState +) + +// scanMeasurement examines the measurement part of a Point, returning +// the next state to move to, and the current location in the buffer. +func scanMeasurement(buf []byte, i int) (int, int, error) { + // Check first byte of measurement, anything except a comma is fine. + // It can't be a space, since whitespace is stripped prior to this + // function call. + if i >= len(buf) || buf[i] == ',' { + return -1, i, fmt.Errorf("missing measurement") + } + + for { + i++ + if i >= len(buf) { + // cpu + return -1, i, fmt.Errorf("missing fields") + } + + if buf[i-1] == '\\' { + // Skip character (it's escaped). + continue + } + + // Unescaped comma; move onto scanning the tags. + if buf[i] == ',' { + return tagKeyState, i + 1, nil + } + + // Unescaped space; move onto scanning the fields. + if buf[i] == ' ' { + // cpu value=1.0 + return fieldsState, i, nil + } + } +} + +// scanTags examines all the tags in a Point, keeping track of and +// returning the updated indices slice, number of commas and location +// in buf where to start examining the Point fields. +func scanTags(buf []byte, i int, indices []int) (int, int, []int, error) { + var ( + err error + commas int + state = tagKeyState + ) + + for { + switch state { + case tagKeyState: + // Grow our indices slice if we have too many tags. + if commas >= len(indices) { + newIndics := make([]int, cap(indices)*2) + copy(newIndics, indices) + indices = newIndics + } + indices[commas] = i + commas++ + + i, err = scanTagsKey(buf, i) + state = tagValueState // tag value always follows a tag key + case tagValueState: + state, i, err = scanTagsValue(buf, i) + case fieldsState: + indices[commas] = i + 1 + return i, commas, indices, nil + } + + if err != nil { + return i, commas, indices, err + } + } +} + +// scanTagsKey scans each character in a tag key. +func scanTagsKey(buf []byte, i int) (int, error) { + // First character of the key. + if i >= len(buf) || buf[i] == ' ' || buf[i] == ',' || buf[i] == '=' { + // cpu,{'', ' ', ',', '='} + return i, fmt.Errorf("missing tag key") + } + + // Examine each character in the tag key until we hit an unescaped + // equals (the tag value), or we hit an error (i.e., unescaped + // space or comma). + for { + i++ + + // Either we reached the end of the buffer or we hit an + // unescaped comma or space. + if i >= len(buf) || + ((buf[i] == ' ' || buf[i] == ',') && buf[i-1] != '\\') { + // cpu,tag{'', ' ', ','} + return i, fmt.Errorf("missing tag value") + } + + if buf[i] == '=' && buf[i-1] != '\\' { + // cpu,tag= + return i + 1, nil + } + } +} + +// scanTagsValue scans each character in a tag value. +func scanTagsValue(buf []byte, i int) (int, int, error) { + // Tag value cannot be empty. + if i >= len(buf) || buf[i] == ',' || buf[i] == ' ' { + // cpu,tag={',', ' '} + return -1, i, fmt.Errorf("missing tag value") + } + + // Examine each character in the tag value until we hit an unescaped + // comma (move onto next tag key), an unescaped space (move onto + // fields), or we error out. + for { + i++ + if i >= len(buf) { + // cpu,tag=value + return -1, i, fmt.Errorf("missing fields") + } + + // An unescaped equals sign is an invalid tag value. + if buf[i] == '=' && buf[i-1] != '\\' { + // cpu,tag={'=', 'fo=o'} + return -1, i, fmt.Errorf("invalid tag format") + } + + if buf[i] == ',' && buf[i-1] != '\\' { + // cpu,tag=foo, + return tagKeyState, i + 1, nil + } + + // cpu,tag=foo value=1.0 + // cpu, tag=foo\= value=1.0 + if buf[i] == ' ' && buf[i-1] != '\\' { + return fieldsState, i, nil + } + } +} + +func insertionSort(l, r int, buf []byte, indices []int) { + for i := l + 1; i < r; i++ { + for j := i; j > l && less(buf, indices, j, j-1); j-- { + indices[j], indices[j-1] = indices[j-1], indices[j] + } + } +} + +func less(buf []byte, indices []int, i, j int) bool { + // This grabs the tag names for i & j, it ignores the values + _, a := scanTo(buf, indices[i], '=') + _, b := scanTo(buf, indices[j], '=') + return bytes.Compare(a, b) < 0 +} + +// scanFields scans buf, starting at i for the fields section of a point. It returns +// the ending position and the byte slice of the fields within buf. +func scanFields(buf []byte, i int) (int, []byte, error) { + start := skipWhitespace(buf, i) + i = start + quoted := false + + // tracks how many '=' we've seen + equals := 0 + + // tracks how many commas we've seen + commas := 0 + + for { + // reached the end of buf? + if i >= len(buf) { + break + } + + // escaped characters? + if buf[i] == '\\' && i+1 < len(buf) { + i += 2 + continue + } + + // If the value is quoted, scan until we get to the end quote + // Only quote values in the field value since quotes are not significant + // in the field key + if buf[i] == '"' && equals > commas { + quoted = !quoted + i++ + continue + } + + // If we see an =, ensure that there is at least on char before and after it + if buf[i] == '=' && !quoted { + equals++ + + // check for "... =123" but allow "a\ =123" + if buf[i-1] == ' ' && buf[i-2] != '\\' { + return i, buf[start:i], fmt.Errorf("missing field key") + } + + // check for "...a=123,=456" but allow "a=123,a\,=456" + if buf[i-1] == ',' && buf[i-2] != '\\' { + return i, buf[start:i], fmt.Errorf("missing field key") + } + + // check for "... value=" + if i+1 >= len(buf) { + return i, buf[start:i], fmt.Errorf("missing field value") + } + + // check for "... value=,value2=..." + if buf[i+1] == ',' || buf[i+1] == ' ' { + return i, buf[start:i], fmt.Errorf("missing field value") + } + + if isNumeric(buf[i+1]) || buf[i+1] == '-' || buf[i+1] == 'N' || buf[i+1] == 'n' { + var err error + i, err = scanNumber(buf, i+1) + if err != nil { + return i, buf[start:i], err + } + continue + } + // If next byte is not a double-quote, the value must be a boolean + if buf[i+1] != '"' { + var err error + i, _, err = scanBoolean(buf, i+1) + if err != nil { + return i, buf[start:i], err + } + continue + } + } + + if buf[i] == ',' && !quoted { + commas++ + } + + // reached end of block? + if buf[i] == ' ' && !quoted { + break + } + i++ + } + + if quoted { + return i, buf[start:i], fmt.Errorf("unbalanced quotes") + } + + // check that all field sections had key and values (e.g. prevent "a=1,b" + if equals == 0 || commas != equals-1 { + return i, buf[start:i], fmt.Errorf("invalid field format") + } + + return i, buf[start:i], nil +} + +// scanTime scans buf, starting at i for the time section of a point. It +// returns the ending position and the byte slice of the timestamp within buf +// and and error if the timestamp is not in the correct numeric format. +func scanTime(buf []byte, i int) (int, []byte, error) { + start := skipWhitespace(buf, i) + i = start + + for { + // reached the end of buf? + if i >= len(buf) { + break + } + + // Reached end of block or trailing whitespace? + if buf[i] == '\n' || buf[i] == ' ' { + break + } + + // Handle negative timestamps + if i == start && buf[i] == '-' { + i++ + continue + } + + // Timestamps should be integers, make sure they are so we don't need + // to actually parse the timestamp until needed. + if buf[i] < '0' || buf[i] > '9' { + return i, buf[start:i], fmt.Errorf("bad timestamp") + } + i++ + } + return i, buf[start:i], nil +} + +func isNumeric(b byte) bool { + return (b >= '0' && b <= '9') || b == '.' +} + +// scanNumber returns the end position within buf, start at i after +// scanning over buf for an integer, or float. It returns an +// error if a invalid number is scanned. +func scanNumber(buf []byte, i int) (int, error) { + start := i + var isInt, isUnsigned bool + + // Is negative number? + if i < len(buf) && buf[i] == '-' { + i++ + // There must be more characters now, as just '-' is illegal. + if i == len(buf) { + return i, ErrInvalidNumber + } + } + + // how many decimal points we've see + decimal := false + + // indicates the number is float in scientific notation + scientific := false + + for { + if i >= len(buf) { + break + } + + if buf[i] == ',' || buf[i] == ' ' { + break + } + + if buf[i] == 'i' && i > start && !(isInt || isUnsigned) { + isInt = true + i++ + continue + } else if buf[i] == 'u' && i > start && !(isInt || isUnsigned) { + isUnsigned = true + i++ + continue + } + + if buf[i] == '.' { + // Can't have more than 1 decimal (e.g. 1.1.1 should fail) + if decimal { + return i, ErrInvalidNumber + } + decimal = true + } + + // `e` is valid for floats but not as the first char + if i > start && (buf[i] == 'e' || buf[i] == 'E') { + scientific = true + i++ + continue + } + + // + and - are only valid at this point if they follow an e (scientific notation) + if (buf[i] == '+' || buf[i] == '-') && (buf[i-1] == 'e' || buf[i-1] == 'E') { + i++ + continue + } + + // NaN is an unsupported value + if i+2 < len(buf) && (buf[i] == 'N' || buf[i] == 'n') { + return i, ErrInvalidNumber + } + + if !isNumeric(buf[i]) { + return i, ErrInvalidNumber + } + i++ + } + + if (isInt || isUnsigned) && (decimal || scientific) { + return i, ErrInvalidNumber + } + + numericDigits := i - start + if isInt { + numericDigits-- + } + if decimal { + numericDigits-- + } + if buf[start] == '-' { + numericDigits-- + } + + if numericDigits == 0 { + return i, ErrInvalidNumber + } + + // It's more common that numbers will be within min/max range for their type but we need to prevent + // out or range numbers from being parsed successfully. This uses some simple heuristics to decide + // if we should parse the number to the actual type. It does not do it all the time because it incurs + // extra allocations and we end up converting the type again when writing points to disk. + if isInt { + // Make sure the last char is an 'i' for integers (e.g. 9i10 is not valid) + if buf[i-1] != 'i' { + return i, ErrInvalidNumber + } + // Parse the int to check bounds the number of digits could be larger than the max range + // We subtract 1 from the index to remove the `i` from our tests + if len(buf[start:i-1]) >= maxInt64Digits || len(buf[start:i-1]) >= minInt64Digits { + if _, err := parseIntBytes(buf[start:i-1], 10, 64); err != nil { + return i, fmt.Errorf("unable to parse integer %s: %s", buf[start:i-1], err) + } + } + } else if isUnsigned { + // Return an error if uint64 support has not been enabled. + if !enableUint64Support { + return i, ErrInvalidNumber + } + // Make sure the last char is a 'u' for unsigned + if buf[i-1] != 'u' { + return i, ErrInvalidNumber + } + // Make sure the first char is not a '-' for unsigned + if buf[start] == '-' { + return i, ErrInvalidNumber + } + // Parse the uint to check bounds the number of digits could be larger than the max range + // We subtract 1 from the index to remove the `u` from our tests + if len(buf[start:i-1]) >= maxUint64Digits { + if _, err := parseUintBytes(buf[start:i-1], 10, 64); err != nil { + return i, fmt.Errorf("unable to parse unsigned %s: %s", buf[start:i-1], err) + } + } + } else { + // Parse the float to check bounds if it's scientific or the number of digits could be larger than the max range + if scientific || len(buf[start:i]) >= maxFloat64Digits || len(buf[start:i]) >= minFloat64Digits { + if _, err := parseFloatBytes(buf[start:i], 10); err != nil { + return i, fmt.Errorf("invalid float") + } + } + } + + return i, nil +} + +// scanBoolean returns the end position within buf, start at i after +// scanning over buf for boolean. Valid values for a boolean are +// t, T, true, TRUE, f, F, false, FALSE. It returns an error if a invalid boolean +// is scanned. +func scanBoolean(buf []byte, i int) (int, []byte, error) { + start := i + + if i < len(buf) && (buf[i] != 't' && buf[i] != 'f' && buf[i] != 'T' && buf[i] != 'F') { + return i, buf[start:i], fmt.Errorf("invalid boolean") + } + + i++ + for { + if i >= len(buf) { + break + } + + if buf[i] == ',' || buf[i] == ' ' { + break + } + i++ + } + + // Single char bool (t, T, f, F) is ok + if i-start == 1 { + return i, buf[start:i], nil + } + + // length must be 4 for true or TRUE + if (buf[start] == 't' || buf[start] == 'T') && i-start != 4 { + return i, buf[start:i], fmt.Errorf("invalid boolean") + } + + // length must be 5 for false or FALSE + if (buf[start] == 'f' || buf[start] == 'F') && i-start != 5 { + return i, buf[start:i], fmt.Errorf("invalid boolean") + } + + // Otherwise + valid := false + switch buf[start] { + case 't': + valid = bytes.Equal(buf[start:i], []byte("true")) + case 'f': + valid = bytes.Equal(buf[start:i], []byte("false")) + case 'T': + valid = bytes.Equal(buf[start:i], []byte("TRUE")) || bytes.Equal(buf[start:i], []byte("True")) + case 'F': + valid = bytes.Equal(buf[start:i], []byte("FALSE")) || bytes.Equal(buf[start:i], []byte("False")) + } + + if !valid { + return i, buf[start:i], fmt.Errorf("invalid boolean") + } + + return i, buf[start:i], nil + +} + +// skipWhitespace returns the end position within buf, starting at i after +// scanning over spaces in tags. +func skipWhitespace(buf []byte, i int) int { + for i < len(buf) { + if buf[i] != ' ' && buf[i] != '\t' && buf[i] != 0 { + break + } + i++ + } + return i +} + +// scanLine returns the end position in buf and the next line found within +// buf. +func scanLine(buf []byte, i int) (int, []byte) { + start := i + quoted := false + fields := false + + // tracks how many '=' and commas we've seen + // this duplicates some of the functionality in scanFields + equals := 0 + commas := 0 + for { + // reached the end of buf? + if i >= len(buf) { + break + } + + // skip past escaped characters + if buf[i] == '\\' && i+2 < len(buf) { + i += 2 + continue + } + + if buf[i] == ' ' { + fields = true + } + + // If we see a double quote, makes sure it is not escaped + if fields { + if !quoted && buf[i] == '=' { + i++ + equals++ + continue + } else if !quoted && buf[i] == ',' { + i++ + commas++ + continue + } else if buf[i] == '"' && equals > commas { + i++ + quoted = !quoted + continue + } + } + + if buf[i] == '\n' && !quoted { + break + } + + i++ + } + + return i, buf[start:i] +} + +// scanTo returns the end position in buf and the next consecutive block +// of bytes, starting from i and ending with stop byte, where stop byte +// has not been escaped. +// +// If there are leading spaces, they are skipped. +func scanTo(buf []byte, i int, stop byte) (int, []byte) { + start := i + for { + // reached the end of buf? + if i >= len(buf) { + break + } + + // Reached unescaped stop value? + if buf[i] == stop && (i == 0 || buf[i-1] != '\\') { + break + } + i++ + } + + return i, buf[start:i] +} + +// scanTo returns the end position in buf and the next consecutive block +// of bytes, starting from i and ending with stop byte. If there are leading +// spaces, they are skipped. +func scanToSpaceOr(buf []byte, i int, stop byte) (int, []byte) { + start := i + if buf[i] == stop || buf[i] == ' ' { + return i, buf[start:i] + } + + for { + i++ + if buf[i-1] == '\\' { + continue + } + + // reached the end of buf? + if i >= len(buf) { + return i, buf[start:i] + } + + // reached end of block? + if buf[i] == stop || buf[i] == ' ' { + return i, buf[start:i] + } + } +} + +func scanTagValue(buf []byte, i int) (int, []byte) { + start := i + for { + if i >= len(buf) { + break + } + + if buf[i] == ',' && buf[i-1] != '\\' { + break + } + i++ + } + if i > len(buf) { + return i, nil + } + return i, buf[start:i] +} + +func scanFieldValue(buf []byte, i int) (int, []byte) { + start := i + quoted := false + for i < len(buf) { + // Only escape char for a field value is a double-quote and backslash + if buf[i] == '\\' && i+1 < len(buf) && (buf[i+1] == '"' || buf[i+1] == '\\') { + i += 2 + continue + } + + // Quoted value? (e.g. string) + if buf[i] == '"' { + i++ + quoted = !quoted + continue + } + + if buf[i] == ',' && !quoted { + break + } + i++ + } + return i, buf[start:i] +} + +func EscapeMeasurement(in []byte) []byte { + for _, c := range measurementEscapeCodes { + if bytes.IndexByte(in, c.k[0]) != -1 { + in = bytes.Replace(in, c.k[:], c.esc[:], -1) + } + } + return in +} + +func unescapeMeasurement(in []byte) []byte { + if bytes.IndexByte(in, '\\') == -1 { + return in + } + + for i := range measurementEscapeCodes { + c := &measurementEscapeCodes[i] + if bytes.IndexByte(in, c.k[0]) != -1 { + in = bytes.Replace(in, c.esc[:], c.k[:], -1) + } + } + return in +} + +func escapeTag(in []byte) []byte { + for i := range tagEscapeCodes { + c := &tagEscapeCodes[i] + if bytes.IndexByte(in, c.k[0]) != -1 { + in = bytes.Replace(in, c.k[:], c.esc[:], -1) + } + } + return in +} + +func unescapeTag(in []byte) []byte { + if bytes.IndexByte(in, '\\') == -1 { + return in + } + + for i := range tagEscapeCodes { + c := &tagEscapeCodes[i] + if bytes.IndexByte(in, c.k[0]) != -1 { + in = bytes.Replace(in, c.esc[:], c.k[:], -1) + } + } + return in +} + +// escapeStringFieldReplacer replaces double quotes and backslashes +// with the same character preceded by a backslash. +// As of Go 1.7 this benchmarked better in allocations and CPU time +// compared to iterating through a string byte-by-byte and appending to a new byte slice, +// calling strings.Replace twice, and better than (*Regex).ReplaceAllString. +var escapeStringFieldReplacer = strings.NewReplacer(`"`, `\"`, `\`, `\\`) + +// EscapeStringField returns a copy of in with any double quotes or +// backslashes with escaped values. +func EscapeStringField(in string) string { + return escapeStringFieldReplacer.Replace(in) +} + +// unescapeStringField returns a copy of in with any escaped double-quotes +// or backslashes unescaped. +func unescapeStringField(in string) string { + if strings.IndexByte(in, '\\') == -1 { + return in + } + + var out []byte + i := 0 + for { + if i >= len(in) { + break + } + // unescape backslashes + if in[i] == '\\' && i+1 < len(in) && in[i+1] == '\\' { + out = append(out, '\\') + i += 2 + continue + } + // unescape double-quotes + if in[i] == '\\' && i+1 < len(in) && in[i+1] == '"' { + out = append(out, '"') + i += 2 + continue + } + out = append(out, in[i]) + i++ + + } + return string(out) +} + +// NewPoint returns a new point with the given measurement name, tags, fields and timestamp. If +// an unsupported field value (NaN, or +/-Inf) or out of range time is passed, this function +// returns an error. +func NewPoint(name string, tags Tags, fields Fields, t time.Time) (Point, error) { + key, err := pointKey(name, tags, fields, t) + if err != nil { + return nil, err + } + + return &point{ + key: key, + time: t, + fields: fields.MarshalBinary(), + }, nil +} + +// pointKey checks some basic requirements for valid points, and returns the +// key, along with an possible error. +func pointKey(measurement string, tags Tags, fields Fields, t time.Time) ([]byte, error) { + if len(fields) == 0 { + return nil, ErrPointMustHaveAField + } + + if !t.IsZero() { + if err := CheckTime(t); err != nil { + return nil, err + } + } + + for key, value := range fields { + switch value := value.(type) { + case float64: + // Ensure the caller validates and handles invalid field values + if math.IsInf(value, 0) { + return nil, fmt.Errorf("+/-Inf is an unsupported value for field %s", key) + } + if math.IsNaN(value) { + return nil, fmt.Errorf("NaN is an unsupported value for field %s", key) + } + case float32: + // Ensure the caller validates and handles invalid field values + if math.IsInf(float64(value), 0) { + return nil, fmt.Errorf("+/-Inf is an unsupported value for field %s", key) + } + if math.IsNaN(float64(value)) { + return nil, fmt.Errorf("NaN is an unsupported value for field %s", key) + } + } + if len(key) == 0 { + return nil, fmt.Errorf("all fields must have non-empty names") + } + } + + key := MakeKey([]byte(measurement), tags) + for field := range fields { + sz := seriesKeySize(key, []byte(field)) + if sz > MaxKeyLength { + return nil, fmt.Errorf("max key length exceeded: %v > %v", sz, MaxKeyLength) + } + } + + return key, nil +} + +func seriesKeySize(key, field []byte) int { + // 4 is the length of the tsm1.fieldKeySeparator constant. It's inlined here to avoid a circular + // dependency. + return len(key) + 4 + len(field) +} + +// NewPointFromBytes returns a new Point from a marshalled Point. +func NewPointFromBytes(b []byte) (Point, error) { + p := &point{} + if err := p.UnmarshalBinary(b); err != nil { + return nil, err + } + + // This does some basic validation to ensure there are fields and they + // can be unmarshalled as well. + iter := p.FieldIterator() + var hasField bool + for iter.Next() { + if len(iter.FieldKey()) == 0 { + continue + } + hasField = true + switch iter.Type() { + case Float: + _, err := iter.FloatValue() + if err != nil { + return nil, fmt.Errorf("unable to unmarshal field %s: %s", string(iter.FieldKey()), err) + } + case Integer: + _, err := iter.IntegerValue() + if err != nil { + return nil, fmt.Errorf("unable to unmarshal field %s: %s", string(iter.FieldKey()), err) + } + case Unsigned: + _, err := iter.UnsignedValue() + if err != nil { + return nil, fmt.Errorf("unable to unmarshal field %s: %s", string(iter.FieldKey()), err) + } + case String: + // Skip since this won't return an error + case Boolean: + _, err := iter.BooleanValue() + if err != nil { + return nil, fmt.Errorf("unable to unmarshal field %s: %s", string(iter.FieldKey()), err) + } + } + } + + if !hasField { + return nil, ErrPointMustHaveAField + } + + return p, nil +} + +// MustNewPoint returns a new point with the given measurement name, tags, fields and timestamp. If +// an unsupported field value (NaN) is passed, this function panics. +func MustNewPoint(name string, tags Tags, fields Fields, time time.Time) Point { + pt, err := NewPoint(name, tags, fields, time) + if err != nil { + panic(err.Error()) + } + return pt +} + +// Key returns the key (measurement joined with tags) of the point. +func (p *point) Key() []byte { + return p.key +} + +func (p *point) name() []byte { + _, name := scanTo(p.key, 0, ',') + return name +} + +func (p *point) Name() []byte { + return escape.Unescape(p.name()) +} + +// SetName updates the measurement name for the point. +func (p *point) SetName(name string) { + p.cachedName = "" + p.key = MakeKey([]byte(name), p.Tags()) +} + +// Time return the timestamp for the point. +func (p *point) Time() time.Time { + return p.time +} + +// SetTime updates the timestamp for the point. +func (p *point) SetTime(t time.Time) { + p.time = t +} + +// Round will round the timestamp of the point to the given duration. +func (p *point) Round(d time.Duration) { + p.time = p.time.Round(d) +} + +// Tags returns the tag set for the point. +func (p *point) Tags() Tags { + if p.cachedTags != nil { + return p.cachedTags + } + p.cachedTags = parseTags(p.key, nil) + return p.cachedTags +} + +func (p *point) ForEachTag(fn func(k, v []byte) bool) { + walkTags(p.key, fn) +} + +func (p *point) HasTag(tag []byte) bool { + if len(p.key) == 0 { + return false + } + + var exists bool + walkTags(p.key, func(key, value []byte) bool { + if bytes.Equal(tag, key) { + exists = true + return false + } + return true + }) + + return exists +} + +func walkTags(buf []byte, fn func(key, value []byte) bool) { + if len(buf) == 0 { + return + } + + pos, name := scanTo(buf, 0, ',') + + // it's an empty key, so there are no tags + if len(name) == 0 { + return + } + + hasEscape := bytes.IndexByte(buf, '\\') != -1 + i := pos + 1 + var key, value []byte + for { + if i >= len(buf) { + break + } + i, key = scanTo(buf, i, '=') + i, value = scanTagValue(buf, i+1) + + if len(value) == 0 { + continue + } + + if hasEscape { + if !fn(unescapeTag(key), unescapeTag(value)) { + return + } + } else { + if !fn(key, value) { + return + } + } + + i++ + } +} + +// walkFields walks each field key and value via fn. If fn returns false, the iteration +// is stopped. The values are the raw byte slices and not the converted types. +func walkFields(buf []byte, fn func(key, value []byte) bool) error { + var i int + var key, val []byte + for len(buf) > 0 { + i, key = scanTo(buf, 0, '=') + if i > len(buf)-2 { + return fmt.Errorf("invalid value: field-key=%s", key) + } + buf = buf[i+1:] + i, val = scanFieldValue(buf, 0) + buf = buf[i:] + if !fn(key, val) { + break + } + + // slice off comma + if len(buf) > 0 { + buf = buf[1:] + } + } + return nil +} + +// parseTags parses buf into the provided destination tags, returning destination +// Tags, which may have a different length and capacity. +func parseTags(buf []byte, dst Tags) Tags { + if len(buf) == 0 { + return nil + } + + n := bytes.Count(buf, []byte(",")) + if cap(dst) < n { + dst = make(Tags, n) + } else { + dst = dst[:n] + } + + // Ensure existing behaviour when point has no tags and nil slice passed in. + if dst == nil { + dst = Tags{} + } + + // Series keys can contain escaped commas, therefore the number of commas + // in a series key only gives an estimation of the upper bound on the number + // of tags. + var i int + walkTags(buf, func(key, value []byte) bool { + dst[i].Key, dst[i].Value = key, value + i++ + return true + }) + return dst[:i] +} + +// MakeKey creates a key for a set of tags. +func MakeKey(name []byte, tags Tags) []byte { + return AppendMakeKey(nil, name, tags) +} + +// AppendMakeKey appends the key derived from name and tags to dst and returns the extended buffer. +func AppendMakeKey(dst []byte, name []byte, tags Tags) []byte { + // unescape the name and then re-escape it to avoid double escaping. + // The key should always be stored in escaped form. + dst = append(dst, EscapeMeasurement(unescapeMeasurement(name))...) + dst = tags.AppendHashKey(dst) + return dst +} + +// SetTags replaces the tags for the point. +func (p *point) SetTags(tags Tags) { + p.key = MakeKey(p.Name(), tags) + p.cachedTags = tags +} + +// AddTag adds or replaces a tag value for a point. +func (p *point) AddTag(key, value string) { + tags := p.Tags() + tags = append(tags, Tag{Key: []byte(key), Value: []byte(value)}) + sort.Sort(tags) + p.cachedTags = tags + p.key = MakeKey(p.Name(), tags) +} + +// Fields returns the fields for the point. +func (p *point) Fields() (Fields, error) { + if p.cachedFields != nil { + return p.cachedFields, nil + } + cf, err := p.unmarshalBinary() + if err != nil { + return nil, err + } + p.cachedFields = cf + return p.cachedFields, nil +} + +// SetPrecision will round a time to the specified precision. +func (p *point) SetPrecision(precision string) { + switch precision { + case "n": + case "u": + p.SetTime(p.Time().Truncate(time.Microsecond)) + case "ms": + p.SetTime(p.Time().Truncate(time.Millisecond)) + case "s": + p.SetTime(p.Time().Truncate(time.Second)) + case "m": + p.SetTime(p.Time().Truncate(time.Minute)) + case "h": + p.SetTime(p.Time().Truncate(time.Hour)) + } +} + +// String returns the string representation of the point. +func (p *point) String() string { + if p.Time().IsZero() { + return string(p.Key()) + " " + string(p.fields) + } + return string(p.Key()) + " " + string(p.fields) + " " + strconv.FormatInt(p.UnixNano(), 10) +} + +// AppendString appends the string representation of the point to buf. +func (p *point) AppendString(buf []byte) []byte { + buf = append(buf, p.key...) + buf = append(buf, ' ') + buf = append(buf, p.fields...) + + if !p.time.IsZero() { + buf = append(buf, ' ') + buf = strconv.AppendInt(buf, p.UnixNano(), 10) + } + + return buf +} + +// StringSize returns the length of the string that would be returned by String(). +func (p *point) StringSize() int { + size := len(p.key) + len(p.fields) + 1 + + if !p.time.IsZero() { + digits := 1 // even "0" has one digit + t := p.UnixNano() + if t < 0 { + // account for negative sign, then negate + digits++ + t = -t + } + for t > 9 { // already accounted for one digit + digits++ + t /= 10 + } + size += digits + 1 // digits and a space + } + + return size +} + +// MarshalBinary returns a binary representation of the point. +func (p *point) MarshalBinary() ([]byte, error) { + if len(p.fields) == 0 { + return nil, ErrPointMustHaveAField + } + + tb, err := p.time.MarshalBinary() + if err != nil { + return nil, err + } + + b := make([]byte, 8+len(p.key)+len(p.fields)+len(tb)) + i := 0 + + binary.BigEndian.PutUint32(b[i:], uint32(len(p.key))) + i += 4 + + i += copy(b[i:], p.key) + + binary.BigEndian.PutUint32(b[i:i+4], uint32(len(p.fields))) + i += 4 + + i += copy(b[i:], p.fields) + + copy(b[i:], tb) + return b, nil +} + +// UnmarshalBinary decodes a binary representation of the point into a point struct. +func (p *point) UnmarshalBinary(b []byte) error { + var n int + + // Read key length. + if len(b) < 4 { + return io.ErrShortBuffer + } + n, b = int(binary.BigEndian.Uint32(b[:4])), b[4:] + + // Read key. + if len(b) < n { + return io.ErrShortBuffer + } + p.key, b = b[:n], b[n:] + + // Read fields length. + if len(b) < 4 { + return io.ErrShortBuffer + } + n, b = int(binary.BigEndian.Uint32(b[:4])), b[4:] + + // Read fields. + if len(b) < n { + return io.ErrShortBuffer + } + p.fields, b = b[:n], b[n:] + + // Read timestamp. + return p.time.UnmarshalBinary(b) +} + +// PrecisionString returns a string representation of the point. If there +// is a timestamp associated with the point then it will be specified in the +// given unit. +func (p *point) PrecisionString(precision string) string { + if p.Time().IsZero() { + return fmt.Sprintf("%s %s", p.Key(), string(p.fields)) + } + return fmt.Sprintf("%s %s %d", p.Key(), string(p.fields), + p.UnixNano()/GetPrecisionMultiplier(precision)) +} + +// RoundedString returns a string representation of the point. If there +// is a timestamp associated with the point, then it will be rounded to the +// given duration. +func (p *point) RoundedString(d time.Duration) string { + if p.Time().IsZero() { + return fmt.Sprintf("%s %s", p.Key(), string(p.fields)) + } + return fmt.Sprintf("%s %s %d", p.Key(), string(p.fields), + p.time.Round(d).UnixNano()) +} + +func (p *point) unmarshalBinary() (Fields, error) { + iter := p.FieldIterator() + fields := make(Fields, 8) + for iter.Next() { + if len(iter.FieldKey()) == 0 { + continue + } + switch iter.Type() { + case Float: + v, err := iter.FloatValue() + if err != nil { + return nil, fmt.Errorf("unable to unmarshal field %s: %s", string(iter.FieldKey()), err) + } + fields[string(iter.FieldKey())] = v + case Integer: + v, err := iter.IntegerValue() + if err != nil { + return nil, fmt.Errorf("unable to unmarshal field %s: %s", string(iter.FieldKey()), err) + } + fields[string(iter.FieldKey())] = v + case Unsigned: + v, err := iter.UnsignedValue() + if err != nil { + return nil, fmt.Errorf("unable to unmarshal field %s: %s", string(iter.FieldKey()), err) + } + fields[string(iter.FieldKey())] = v + case String: + fields[string(iter.FieldKey())] = iter.StringValue() + case Boolean: + v, err := iter.BooleanValue() + if err != nil { + return nil, fmt.Errorf("unable to unmarshal field %s: %s", string(iter.FieldKey()), err) + } + fields[string(iter.FieldKey())] = v + } + } + return fields, nil +} + +// HashID returns a non-cryptographic checksum of the point's key. +func (p *point) HashID() uint64 { + h := NewInlineFNV64a() + h.Write(p.key) + sum := h.Sum64() + return sum +} + +// UnixNano returns the timestamp of the point as nanoseconds since Unix epoch. +func (p *point) UnixNano() int64 { + return p.Time().UnixNano() +} + +// Split will attempt to return multiple points with the same timestamp whose +// string representations are no longer than size. Points with a single field or +// a point without a timestamp may exceed the requested size. +func (p *point) Split(size int) []Point { + if p.time.IsZero() || p.StringSize() <= size { + return []Point{p} + } + + // key string, timestamp string, spaces + size -= len(p.key) + len(strconv.FormatInt(p.time.UnixNano(), 10)) + 2 + + var points []Point + var start, cur int + + for cur < len(p.fields) { + end, _ := scanTo(p.fields, cur, '=') + end, _ = scanFieldValue(p.fields, end+1) + + if cur > start && end-start > size { + points = append(points, &point{ + key: p.key, + time: p.time, + fields: p.fields[start : cur-1], + }) + start = cur + } + + cur = end + 1 + } + + points = append(points, &point{ + key: p.key, + time: p.time, + fields: p.fields[start:], + }) + + return points +} + +// Tag represents a single key/value tag pair. +type Tag struct { + Key []byte + Value []byte +} + +// NewTag returns a new Tag. +func NewTag(key, value []byte) Tag { + return Tag{ + Key: key, + Value: value, + } +} + +// Size returns the size of the key and value. +func (t Tag) Size() int { return len(t.Key) + len(t.Value) } + +// Clone returns a shallow copy of Tag. +// +// Tags associated with a Point created by ParsePointsWithPrecision will hold references to the byte slice that was parsed. +// Use Clone to create a Tag with new byte slices that do not refer to the argument to ParsePointsWithPrecision. +func (t Tag) Clone() Tag { + other := Tag{ + Key: make([]byte, len(t.Key)), + Value: make([]byte, len(t.Value)), + } + + copy(other.Key, t.Key) + copy(other.Value, t.Value) + + return other +} + +// String returns the string reprsentation of the tag. +func (t *Tag) String() string { + var buf bytes.Buffer + buf.WriteByte('{') + buf.WriteString(string(t.Key)) + buf.WriteByte(' ') + buf.WriteString(string(t.Value)) + buf.WriteByte('}') + return buf.String() +} + +// Tags represents a sorted list of tags. +type Tags []Tag + +// NewTags returns a new Tags from a map. +func NewTags(m map[string]string) Tags { + if len(m) == 0 { + return nil + } + a := make(Tags, 0, len(m)) + for k, v := range m { + a = append(a, NewTag([]byte(k), []byte(v))) + } + sort.Sort(a) + return a +} + +// Keys returns the list of keys for a tag set. +func (a Tags) Keys() []string { + if len(a) == 0 { + return nil + } + keys := make([]string, len(a)) + for i, tag := range a { + keys[i] = string(tag.Key) + } + return keys +} + +// Values returns the list of values for a tag set. +func (a Tags) Values() []string { + if len(a) == 0 { + return nil + } + values := make([]string, len(a)) + for i, tag := range a { + values[i] = string(tag.Value) + } + return values +} + +// String returns the string representation of the tags. +func (a Tags) String() string { + var buf bytes.Buffer + buf.WriteByte('[') + for i := range a { + buf.WriteString(a[i].String()) + if i < len(a)-1 { + buf.WriteByte(' ') + } + } + buf.WriteByte(']') + return buf.String() +} + +// Size returns the number of bytes needed to store all tags. Note, this is +// the number of bytes needed to store all keys and values and does not account +// for data structures or delimiters for example. +func (a Tags) Size() int { + var total int + for i := range a { + total += a[i].Size() + } + return total +} + +// Clone returns a copy of the slice where the elements are a result of calling `Clone` on the original elements +// +// Tags associated with a Point created by ParsePointsWithPrecision will hold references to the byte slice that was parsed. +// Use Clone to create Tags with new byte slices that do not refer to the argument to ParsePointsWithPrecision. +func (a Tags) Clone() Tags { + if len(a) == 0 { + return nil + } + + others := make(Tags, len(a)) + for i := range a { + others[i] = a[i].Clone() + } + + return others +} + +func (a Tags) Len() int { return len(a) } +func (a Tags) Less(i, j int) bool { return bytes.Compare(a[i].Key, a[j].Key) == -1 } +func (a Tags) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +// Equal returns true if a equals other. +func (a Tags) Equal(other Tags) bool { + if len(a) != len(other) { + return false + } + for i := range a { + if !bytes.Equal(a[i].Key, other[i].Key) || !bytes.Equal(a[i].Value, other[i].Value) { + return false + } + } + return true +} + +// CompareTags returns -1 if a < b, 1 if a > b, and 0 if a == b. +func CompareTags(a, b Tags) int { + // Compare each key & value until a mismatch. + for i := 0; i < len(a) && i < len(b); i++ { + if cmp := bytes.Compare(a[i].Key, b[i].Key); cmp != 0 { + return cmp + } + if cmp := bytes.Compare(a[i].Value, b[i].Value); cmp != 0 { + return cmp + } + } + + // If all tags are equal up to this point then return shorter tagset. + if len(a) < len(b) { + return -1 + } else if len(a) > len(b) { + return 1 + } + + // All tags are equal. + return 0 +} + +// Get returns the value for a key. +func (a Tags) Get(key []byte) []byte { + // OPTIMIZE: Use sort.Search if tagset is large. + + for _, t := range a { + if bytes.Equal(t.Key, key) { + return t.Value + } + } + return nil +} + +// GetString returns the string value for a string key. +func (a Tags) GetString(key string) string { + return string(a.Get([]byte(key))) +} + +// Set sets the value for a key. +func (a *Tags) Set(key, value []byte) { + for i, t := range *a { + if bytes.Equal(t.Key, key) { + (*a)[i].Value = value + return + } + } + *a = append(*a, Tag{Key: key, Value: value}) + sort.Sort(*a) +} + +// SetString sets the string value for a string key. +func (a *Tags) SetString(key, value string) { + a.Set([]byte(key), []byte(value)) +} + +// Delete removes a tag by key. +func (a *Tags) Delete(key []byte) { + for i, t := range *a { + if bytes.Equal(t.Key, key) { + copy((*a)[i:], (*a)[i+1:]) + (*a)[len(*a)-1] = Tag{} + *a = (*a)[:len(*a)-1] + return + } + } +} + +// Map returns a map representation of the tags. +func (a Tags) Map() map[string]string { + m := make(map[string]string, len(a)) + for _, t := range a { + m[string(t.Key)] = string(t.Value) + } + return m +} + +// Merge merges the tags combining the two. If both define a tag with the +// same key, the merged value overwrites the old value. +// A new map is returned. +func (a Tags) Merge(other map[string]string) Tags { + merged := make(map[string]string, len(a)+len(other)) + for _, t := range a { + merged[string(t.Key)] = string(t.Value) + } + for k, v := range other { + merged[k] = v + } + return NewTags(merged) +} + +// HashKey hashes all of a tag's keys. +func (a Tags) HashKey() []byte { + return a.AppendHashKey(nil) +} + +func (a Tags) needsEscape() bool { + for i := range a { + t := &a[i] + for j := range tagEscapeCodes { + c := &tagEscapeCodes[j] + if bytes.IndexByte(t.Key, c.k[0]) != -1 || bytes.IndexByte(t.Value, c.k[0]) != -1 { + return true + } + } + } + return false +} + +// AppendHashKey appends the result of hashing all of a tag's keys and values to dst and returns the extended buffer. +func (a Tags) AppendHashKey(dst []byte) []byte { + // Empty maps marshal to empty bytes. + if len(a) == 0 { + return dst + } + + // Type invariant: Tags are sorted + + sz := 0 + var escaped Tags + if a.needsEscape() { + var tmp [20]Tag + if len(a) < len(tmp) { + escaped = tmp[:len(a)] + } else { + escaped = make(Tags, len(a)) + } + + for i := range a { + t := &a[i] + nt := &escaped[i] + nt.Key = escapeTag(t.Key) + nt.Value = escapeTag(t.Value) + sz += len(nt.Key) + len(nt.Value) + } + } else { + sz = a.Size() + escaped = a + } + + sz += len(escaped) + (len(escaped) * 2) // separators + + // Generate marshaled bytes. + if cap(dst)-len(dst) < sz { + nd := make([]byte, len(dst), len(dst)+sz) + copy(nd, dst) + dst = nd + } + buf := dst[len(dst) : len(dst)+sz] + idx := 0 + for i := range escaped { + k := &escaped[i] + if len(k.Value) == 0 { + continue + } + buf[idx] = ',' + idx++ + copy(buf[idx:], k.Key) + idx += len(k.Key) + buf[idx] = '=' + idx++ + copy(buf[idx:], k.Value) + idx += len(k.Value) + } + return dst[:len(dst)+idx] +} + +// CopyTags returns a shallow copy of tags. +func CopyTags(a Tags) Tags { + other := make(Tags, len(a)) + copy(other, a) + return other +} + +// DeepCopyTags returns a deep copy of tags. +func DeepCopyTags(a Tags) Tags { + // Calculate size of keys/values in bytes. + var n int + for _, t := range a { + n += len(t.Key) + len(t.Value) + } + + // Build single allocation for all key/values. + buf := make([]byte, n) + + // Copy tags to new set. + other := make(Tags, len(a)) + for i, t := range a { + copy(buf, t.Key) + other[i].Key, buf = buf[:len(t.Key)], buf[len(t.Key):] + + copy(buf, t.Value) + other[i].Value, buf = buf[:len(t.Value)], buf[len(t.Value):] + } + + return other +} + +// Fields represents a mapping between a Point's field names and their +// values. +type Fields map[string]interface{} + +// FieldIterator returns a FieldIterator that can be used to traverse the +// fields of a point without constructing the in-memory map. +func (p *point) FieldIterator() FieldIterator { + p.Reset() + return p +} + +type fieldIterator struct { + start, end int + key, keybuf []byte + valueBuf []byte + fieldType FieldType +} + +// Next indicates whether there any fields remaining. +func (p *point) Next() bool { + p.it.start = p.it.end + if p.it.start >= len(p.fields) { + return false + } + + p.it.end, p.it.key = scanTo(p.fields, p.it.start, '=') + if escape.IsEscaped(p.it.key) { + p.it.keybuf = escape.AppendUnescaped(p.it.keybuf[:0], p.it.key) + p.it.key = p.it.keybuf + } + + p.it.end, p.it.valueBuf = scanFieldValue(p.fields, p.it.end+1) + p.it.end++ + + if len(p.it.valueBuf) == 0 { + p.it.fieldType = Empty + return true + } + + c := p.it.valueBuf[0] + + if c == '"' { + p.it.fieldType = String + return true + } + + if strings.IndexByte(`0123456789-.nNiIu`, c) >= 0 { + if p.it.valueBuf[len(p.it.valueBuf)-1] == 'i' { + p.it.fieldType = Integer + p.it.valueBuf = p.it.valueBuf[:len(p.it.valueBuf)-1] + } else if p.it.valueBuf[len(p.it.valueBuf)-1] == 'u' { + p.it.fieldType = Unsigned + p.it.valueBuf = p.it.valueBuf[:len(p.it.valueBuf)-1] + } else { + p.it.fieldType = Float + } + return true + } + + // to keep the same behavior that currently exists, default to boolean + p.it.fieldType = Boolean + return true +} + +// FieldKey returns the key of the current field. +func (p *point) FieldKey() []byte { + return p.it.key +} + +// Type returns the FieldType of the current field. +func (p *point) Type() FieldType { + return p.it.fieldType +} + +// StringValue returns the string value of the current field. +func (p *point) StringValue() string { + return unescapeStringField(string(p.it.valueBuf[1 : len(p.it.valueBuf)-1])) +} + +// IntegerValue returns the integer value of the current field. +func (p *point) IntegerValue() (int64, error) { + n, err := parseIntBytes(p.it.valueBuf, 10, 64) + if err != nil { + return 0, fmt.Errorf("unable to parse integer value %q: %v", p.it.valueBuf, err) + } + return n, nil +} + +// UnsignedValue returns the unsigned value of the current field. +func (p *point) UnsignedValue() (uint64, error) { + n, err := parseUintBytes(p.it.valueBuf, 10, 64) + if err != nil { + return 0, fmt.Errorf("unable to parse unsigned value %q: %v", p.it.valueBuf, err) + } + return n, nil +} + +// BooleanValue returns the boolean value of the current field. +func (p *point) BooleanValue() (bool, error) { + b, err := parseBoolBytes(p.it.valueBuf) + if err != nil { + return false, fmt.Errorf("unable to parse bool value %q: %v", p.it.valueBuf, err) + } + return b, nil +} + +// FloatValue returns the float value of the current field. +func (p *point) FloatValue() (float64, error) { + f, err := parseFloatBytes(p.it.valueBuf, 64) + if err != nil { + return 0, fmt.Errorf("unable to parse floating point value %q: %v", p.it.valueBuf, err) + } + return f, nil +} + +// Reset resets the iterator to its initial state. +func (p *point) Reset() { + p.it.fieldType = Empty + p.it.key = nil + p.it.valueBuf = nil + p.it.start = 0 + p.it.end = 0 +} + +// MarshalBinary encodes all the fields to their proper type and returns the binary +// representation +// NOTE: uint64 is specifically not supported due to potential overflow when we decode +// again later to an int64 +// NOTE2: uint is accepted, and may be 64 bits, and is for some reason accepted... +func (p Fields) MarshalBinary() []byte { + var b []byte + keys := make([]string, 0, len(p)) + + for k := range p { + keys = append(keys, k) + } + + // Not really necessary, can probably be removed. + sort.Strings(keys) + + for i, k := range keys { + if i > 0 { + b = append(b, ',') + } + b = appendField(b, k, p[k]) + } + + return b +} + +func appendField(b []byte, k string, v interface{}) []byte { + b = append(b, []byte(escape.String(k))...) + b = append(b, '=') + + // check popular types first + switch v := v.(type) { + case float64: + b = strconv.AppendFloat(b, v, 'f', -1, 64) + case int64: + b = strconv.AppendInt(b, v, 10) + b = append(b, 'i') + case string: + b = append(b, '"') + b = append(b, []byte(EscapeStringField(v))...) + b = append(b, '"') + case bool: + b = strconv.AppendBool(b, v) + case int32: + b = strconv.AppendInt(b, int64(v), 10) + b = append(b, 'i') + case int16: + b = strconv.AppendInt(b, int64(v), 10) + b = append(b, 'i') + case int8: + b = strconv.AppendInt(b, int64(v), 10) + b = append(b, 'i') + case int: + b = strconv.AppendInt(b, int64(v), 10) + b = append(b, 'i') + case uint64: + b = strconv.AppendUint(b, v, 10) + b = append(b, 'u') + case uint32: + b = strconv.AppendInt(b, int64(v), 10) + b = append(b, 'i') + case uint16: + b = strconv.AppendInt(b, int64(v), 10) + b = append(b, 'i') + case uint8: + b = strconv.AppendInt(b, int64(v), 10) + b = append(b, 'i') + case uint: + // TODO: 'uint' should be converted to writing as an unsigned integer, + // but we cannot since that would break backwards compatibility. + b = strconv.AppendInt(b, int64(v), 10) + b = append(b, 'i') + case float32: + b = strconv.AppendFloat(b, float64(v), 'f', -1, 32) + case []byte: + b = append(b, v...) + case nil: + // skip + default: + // Can't determine the type, so convert to string + b = append(b, '"') + b = append(b, []byte(EscapeStringField(fmt.Sprintf("%v", v)))...) + b = append(b, '"') + + } + + return b +} + +// ValidKeyToken returns true if the token used for measurement, tag key, or tag +// value is a valid unicode string and only contains printable, non-replacement characters. +func ValidKeyToken(s string) bool { + if !utf8.ValidString(s) { + return false + } + for _, r := range s { + if !unicode.IsPrint(r) || r == unicode.ReplacementChar { + return false + } + } + return true +} + +// ValidKeyTokens returns true if the measurement name and all tags are valid. +func ValidKeyTokens(name string, tags Tags) bool { + if !ValidKeyToken(name) { + return false + } + for _, tag := range tags { + if !ValidKeyToken(string(tag.Key)) || !ValidKeyToken(string(tag.Value)) { + return false + } + } + return true +} diff --git a/vendor/github.com/influxdata/influxdb/models/rows.go b/vendor/github.com/influxdata/influxdb/models/rows.go new file mode 100644 index 000000000..c087a4882 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/models/rows.go @@ -0,0 +1,62 @@ +package models + +import ( + "sort" +) + +// Row represents a single row returned from the execution of a statement. +type Row struct { + Name string `json:"name,omitempty"` + Tags map[string]string `json:"tags,omitempty"` + Columns []string `json:"columns,omitempty"` + Values [][]interface{} `json:"values,omitempty"` + Partial bool `json:"partial,omitempty"` +} + +// SameSeries returns true if r contains values for the same series as o. +func (r *Row) SameSeries(o *Row) bool { + return r.tagsHash() == o.tagsHash() && r.Name == o.Name +} + +// tagsHash returns a hash of tag key/value pairs. +func (r *Row) tagsHash() uint64 { + h := NewInlineFNV64a() + keys := r.tagsKeys() + for _, k := range keys { + h.Write([]byte(k)) + h.Write([]byte(r.Tags[k])) + } + return h.Sum64() +} + +// tagKeys returns a sorted list of tag keys. +func (r *Row) tagsKeys() []string { + a := make([]string, 0, len(r.Tags)) + for k := range r.Tags { + a = append(a, k) + } + sort.Strings(a) + return a +} + +// Rows represents a collection of rows. Rows implements sort.Interface. +type Rows []*Row + +// Len implements sort.Interface. +func (p Rows) Len() int { return len(p) } + +// Less implements sort.Interface. +func (p Rows) Less(i, j int) bool { + // Sort by name first. + if p[i].Name != p[j].Name { + return p[i].Name < p[j].Name + } + + // Sort by tag set hash. Tags don't have a meaningful sort order so we + // just compute a hash and sort by that instead. This allows the tests + // to receive rows in a predictable order every time. + return p[i].tagsHash() < p[j].tagsHash() +} + +// Swap implements sort.Interface. +func (p Rows) Swap(i, j int) { p[i], p[j] = p[j], p[i] } diff --git a/vendor/github.com/influxdata/influxdb/models/statistic.go b/vendor/github.com/influxdata/influxdb/models/statistic.go new file mode 100644 index 000000000..553e9d09f --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/models/statistic.go @@ -0,0 +1,42 @@ +package models + +// Statistic is the representation of a statistic used by the monitoring service. +type Statistic struct { + Name string `json:"name"` + Tags map[string]string `json:"tags"` + Values map[string]interface{} `json:"values"` +} + +// NewStatistic returns an initialized Statistic. +func NewStatistic(name string) Statistic { + return Statistic{ + Name: name, + Tags: make(map[string]string), + Values: make(map[string]interface{}), + } +} + +// StatisticTags is a map that can be merged with others without causing +// mutations to either map. +type StatisticTags map[string]string + +// Merge creates a new map containing the merged contents of tags and t. +// If both tags and the receiver map contain the same key, the value in tags +// is used in the resulting map. +// +// Merge always returns a usable map. +func (t StatisticTags) Merge(tags map[string]string) map[string]string { + // Add everything in tags to the result. + out := make(map[string]string, len(tags)) + for k, v := range tags { + out[k] = v + } + + // Only add values from t that don't appear in tags. + for k, v := range t { + if _, ok := tags[k]; !ok { + out[k] = v + } + } + return out +} diff --git a/vendor/github.com/influxdata/influxdb/models/tagkeysset.go b/vendor/github.com/influxdata/influxdb/models/tagkeysset.go new file mode 100644 index 000000000..d165bdce3 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/models/tagkeysset.go @@ -0,0 +1,156 @@ +package models + +import ( + "bytes" + "strings" +) + +// TagKeysSet provides set operations for combining Tags. +type TagKeysSet struct { + i int + keys [2][][]byte + tmp [][]byte +} + +// Clear removes all the elements of TagKeysSet and ensures all internal +// buffers are reset. +func (set *TagKeysSet) Clear() { + set.clear(set.keys[0]) + set.clear(set.keys[1]) + set.clear(set.tmp) + set.i = 0 + set.keys[0] = set.keys[0][:0] +} + +func (set *TagKeysSet) clear(b [][]byte) { + b = b[:cap(b)] + for i := range b { + b[i] = nil + } +} + +// KeysBytes returns the merged keys in lexicographical order. +// The slice is valid until the next call to UnionKeys, UnionBytes or Reset. +func (set *TagKeysSet) KeysBytes() [][]byte { + return set.keys[set.i&1] +} + +// Keys returns a copy of the merged keys in lexicographical order. +func (set *TagKeysSet) Keys() []string { + keys := set.KeysBytes() + s := make([]string, 0, len(keys)) + for i := range keys { + s = append(s, string(keys[i])) + } + return s +} + +func (set *TagKeysSet) String() string { + var s []string + for _, k := range set.KeysBytes() { + s = append(s, string(k)) + } + return strings.Join(s, ",") +} + +// IsSupersetKeys returns true if the TagKeysSet is a superset of all the keys +// contained in other. +func (set *TagKeysSet) IsSupersetKeys(other Tags) bool { + keys := set.keys[set.i&1] + i, j := 0, 0 + for i < len(keys) && j < len(other) { + if cmp := bytes.Compare(keys[i], other[j].Key); cmp > 0 { + return false + } else if cmp == 0 { + j++ + } + i++ + } + + return j == len(other) +} + +// IsSupersetBytes returns true if the TagKeysSet is a superset of all the keys +// in other. +// Other must be lexicographically sorted or the results are undefined. +func (set *TagKeysSet) IsSupersetBytes(other [][]byte) bool { + keys := set.keys[set.i&1] + i, j := 0, 0 + for i < len(keys) && j < len(other) { + if cmp := bytes.Compare(keys[i], other[j]); cmp > 0 { + return false + } else if cmp == 0 { + j++ + } + i++ + } + + return j == len(other) +} + +// UnionKeys updates the set so that it is the union of itself and all the +// keys contained in other. +func (set *TagKeysSet) UnionKeys(other Tags) { + if set.IsSupersetKeys(other) { + return + } + + if l := len(other); cap(set.tmp) < l { + set.tmp = make([][]byte, l) + } else { + set.tmp = set.tmp[:l] + } + + for i := range other { + set.tmp[i] = other[i].Key + } + + set.merge(set.tmp) +} + +// UnionBytes updates the set so that it is the union of itself and all the +// keys contained in other. +// Other must be lexicographically sorted or the results are undefined. +func (set *TagKeysSet) UnionBytes(other [][]byte) { + if set.IsSupersetBytes(other) { + return + } + + set.merge(other) +} + +func (set *TagKeysSet) merge(in [][]byte) { + keys := set.keys[set.i&1] + l := len(keys) + len(in) + set.i = (set.i + 1) & 1 + keya := set.keys[set.i&1] + if cap(keya) < l { + keya = make([][]byte, 0, l) + } else { + keya = keya[:0] + } + + i, j := 0, 0 + for i < len(keys) && j < len(in) { + ki, kj := keys[i], in[j] + if cmp := bytes.Compare(ki, kj); cmp < 0 { + i++ + } else if cmp > 0 { + ki = kj + j++ + } else { + i++ + j++ + } + + keya = append(keya, ki) + } + + if i < len(keys) { + keya = append(keya, keys[i:]...) + } else if j < len(in) { + keya = append(keya, in[j:]...) + } + + set.keys[set.i&1] = keya +} diff --git a/vendor/github.com/influxdata/influxdb/models/time.go b/vendor/github.com/influxdata/influxdb/models/time.go new file mode 100644 index 000000000..297892c6d --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/models/time.go @@ -0,0 +1,74 @@ +package models + +// Helper time methods since parsing time can easily overflow and we only support a +// specific time range. + +import ( + "fmt" + "math" + "time" +) + +const ( + // MinNanoTime is the minimum time that can be represented. + // + // 1677-09-21 00:12:43.145224194 +0000 UTC + // + // The two lowest minimum integers are used as sentinel values. The + // minimum value needs to be used as a value lower than any other value for + // comparisons and another separate value is needed to act as a sentinel + // default value that is unusable by the user, but usable internally. + // Because these two values need to be used for a special purpose, we do + // not allow users to write points at these two times. + MinNanoTime = int64(math.MinInt64) + 2 + + // MaxNanoTime is the maximum time that can be represented. + // + // 2262-04-11 23:47:16.854775806 +0000 UTC + // + // The highest time represented by a nanosecond needs to be used for an + // exclusive range in the shard group, so the maximum time needs to be one + // less than the possible maximum number of nanoseconds representable by an + // int64 so that we don't lose a point at that one time. + MaxNanoTime = int64(math.MaxInt64) - 1 +) + +var ( + minNanoTime = time.Unix(0, MinNanoTime).UTC() + maxNanoTime = time.Unix(0, MaxNanoTime).UTC() + + // ErrTimeOutOfRange gets returned when time is out of the representable range using int64 nanoseconds since the epoch. + ErrTimeOutOfRange = fmt.Errorf("time outside range %d - %d", MinNanoTime, MaxNanoTime) +) + +// SafeCalcTime safely calculates the time given. Will return error if the time is outside the +// supported range. +func SafeCalcTime(timestamp int64, precision string) (time.Time, error) { + mult := GetPrecisionMultiplier(precision) + if t, ok := safeSignedMult(timestamp, mult); ok { + tme := time.Unix(0, t).UTC() + return tme, CheckTime(tme) + } + + return time.Time{}, ErrTimeOutOfRange +} + +// CheckTime checks that a time is within the safe range. +func CheckTime(t time.Time) error { + if t.Before(minNanoTime) || t.After(maxNanoTime) { + return ErrTimeOutOfRange + } + return nil +} + +// Perform the multiplication and check to make sure it didn't overflow. +func safeSignedMult(a, b int64) (int64, bool) { + if a == 0 || b == 0 || a == 1 || b == 1 { + return a * b, true + } + if a == MinNanoTime || b == MaxNanoTime { + return 0, false + } + c := a * b + return c, c/b == a +} diff --git a/vendor/github.com/influxdata/influxdb/models/uint_support.go b/vendor/github.com/influxdata/influxdb/models/uint_support.go new file mode 100644 index 000000000..18d1ca06e --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/models/uint_support.go @@ -0,0 +1,7 @@ +// +build uint uint64 + +package models + +func init() { + EnableUintSupport() +} diff --git a/vendor/github.com/influxdata/influxdb/pkg/escape/bytes.go b/vendor/github.com/influxdata/influxdb/pkg/escape/bytes.go new file mode 100644 index 000000000..f3b31f42d --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/escape/bytes.go @@ -0,0 +1,115 @@ +// Package escape contains utilities for escaping parts of InfluxQL +// and InfluxDB line protocol. +package escape // import "github.com/influxdata/influxdb/pkg/escape" + +import ( + "bytes" + "strings" +) + +// Codes is a map of bytes to be escaped. +var Codes = map[byte][]byte{ + ',': []byte(`\,`), + '"': []byte(`\"`), + ' ': []byte(`\ `), + '=': []byte(`\=`), +} + +// Bytes escapes characters on the input slice, as defined by Codes. +func Bytes(in []byte) []byte { + for b, esc := range Codes { + in = bytes.Replace(in, []byte{b}, esc, -1) + } + return in +} + +const escapeChars = `," =` + +// IsEscaped returns whether b has any escaped characters, +// i.e. whether b seems to have been processed by Bytes. +func IsEscaped(b []byte) bool { + for len(b) > 0 { + i := bytes.IndexByte(b, '\\') + if i < 0 { + return false + } + + if i+1 < len(b) && strings.IndexByte(escapeChars, b[i+1]) >= 0 { + return true + } + b = b[i+1:] + } + return false +} + +// AppendUnescaped appends the unescaped version of src to dst +// and returns the resulting slice. +func AppendUnescaped(dst, src []byte) []byte { + var pos int + for len(src) > 0 { + next := bytes.IndexByte(src[pos:], '\\') + if next < 0 || pos+next+1 >= len(src) { + return append(dst, src...) + } + + if pos+next+1 < len(src) && strings.IndexByte(escapeChars, src[pos+next+1]) >= 0 { + if pos+next > 0 { + dst = append(dst, src[:pos+next]...) + } + src = src[pos+next+1:] + pos = 0 + } else { + pos += next + 1 + } + } + + return dst +} + +// Unescape returns a new slice containing the unescaped version of in. +func Unescape(in []byte) []byte { + if len(in) == 0 { + return nil + } + + if bytes.IndexByte(in, '\\') == -1 { + return in + } + + i := 0 + inLen := len(in) + + // The output size will be no more than inLen. Preallocating the + // capacity of the output is faster and uses less memory than + // letting append() do its own (over)allocation. + out := make([]byte, 0, inLen) + + for { + if i >= inLen { + break + } + if in[i] == '\\' && i+1 < inLen { + switch in[i+1] { + case ',': + out = append(out, ',') + i += 2 + continue + case '"': + out = append(out, '"') + i += 2 + continue + case ' ': + out = append(out, ' ') + i += 2 + continue + case '=': + out = append(out, '=') + i += 2 + continue + } + } + out = append(out, in[i]) + i += 1 + } + return out +} diff --git a/vendor/github.com/influxdata/influxdb/pkg/escape/strings.go b/vendor/github.com/influxdata/influxdb/pkg/escape/strings.go new file mode 100644 index 000000000..db98033b0 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/escape/strings.go @@ -0,0 +1,21 @@ +package escape + +import "strings" + +var ( + escaper = strings.NewReplacer(`,`, `\,`, `"`, `\"`, ` `, `\ `, `=`, `\=`) + unescaper = strings.NewReplacer(`\,`, `,`, `\"`, `"`, `\ `, ` `, `\=`, `=`) +) + +// UnescapeString returns unescaped version of in. +func UnescapeString(in string) string { + if strings.IndexByte(in, '\\') == -1 { + return in + } + return unescaper.Replace(in) +} + +// String returns the escaped version of in. +func String(in string) string { + return escaper.Replace(in) +} diff --git a/vendor/github.com/klauspost/compress/flate/gen_inflate.go b/vendor/github.com/klauspost/compress/flate/gen_inflate.go deleted file mode 100644 index 35fc072a3..000000000 --- a/vendor/github.com/klauspost/compress/flate/gen_inflate.go +++ /dev/null @@ -1,294 +0,0 @@ -// +build generate - -//go:generate go run $GOFILE && gofmt -w inflate_gen.go - -package main - -import ( - "os" - "strings" -) - -func main() { - f, err := os.Create("inflate_gen.go") - if err != nil { - panic(err) - } - defer f.Close() - types := []string{"*bytes.Buffer", "*bytes.Reader", "*bufio.Reader", "*strings.Reader"} - names := []string{"BytesBuffer", "BytesReader", "BufioReader", "StringsReader"} - imports := []string{"bytes", "bufio", "io", "strings", "math/bits"} - f.WriteString(`// Code generated by go generate gen_inflate.go. DO NOT EDIT. - -package flate - -import ( -`) - - for _, imp := range imports { - f.WriteString("\t\"" + imp + "\"\n") - } - f.WriteString(")\n\n") - - template := ` - -// Decode a single Huffman block from f. -// hl and hd are the Huffman states for the lit/length values -// and the distance values, respectively. If hd == nil, using the -// fixed distance encoding associated with fixed Huffman blocks. -func (f *decompressor) $FUNCNAME$() { - const ( - stateInit = iota // Zero value must be stateInit - stateDict - ) - fr := f.r.($TYPE$) - - switch f.stepState { - case stateInit: - goto readLiteral - case stateDict: - goto copyHistory - } - -readLiteral: - // Read literal and/or (length, distance) according to RFC section 3.2.3. - { - var v int - { - // Inlined v, err := f.huffSym(f.hl) - // Since a huffmanDecoder can be empty or be composed of a degenerate tree - // with single element, huffSym must error on these two edge cases. In both - // cases, the chunks slice will be 0 for the invalid sequence, leading it - // satisfy the n == 0 check below. - n := uint(f.hl.maxRead) - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - nb, b := f.nb, f.b - for { - for nb < n { - c, err := fr.ReadByte() - if err != nil { - f.b = b - f.nb = nb - f.err = noEOF(err) - return - } - f.roffset++ - b |= uint32(c) << (nb & regSizeMaskUint32) - nb += 8 - } - chunk := f.hl.chunks[b&(huffmanNumChunks-1)] - n = uint(chunk & huffmanCountMask) - if n > huffmanChunkBits { - chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask] - n = uint(chunk & huffmanCountMask) - } - if n <= nb { - if n == 0 { - f.b = b - f.nb = nb - if debugDecode { - fmt.Println("huffsym: n==0") - } - f.err = CorruptInputError(f.roffset) - return - } - f.b = b >> (n & regSizeMaskUint32) - f.nb = nb - n - v = int(chunk >> huffmanValueShift) - break - } - } - } - - var length int - switch { - case v < 256: - f.dict.writeByte(byte(v)) - if f.dict.availWrite() == 0 { - f.toRead = f.dict.readFlush() - f.step = (*decompressor).$FUNCNAME$ - f.stepState = stateInit - return - } - goto readLiteral - case v == 256: - f.finishBlock() - return - // otherwise, reference to older data - case v < 265: - length = v - (257 - 3) - case v < maxNumLit: - val := decCodeToLen[(v - 257)] - length = int(val.length) + 3 - n := uint(val.extra) - for f.nb < n { - c, err := fr.ReadByte() - if err != nil { - if debugDecode { - fmt.Println("morebits n>0:", err) - } - f.err = err - return - } - f.roffset++ - f.b |= uint32(c) << f.nb - f.nb += 8 - } - length += int(f.b & uint32(1<<(n®SizeMaskUint32)-1)) - f.b >>= n & regSizeMaskUint32 - f.nb -= n - default: - if debugDecode { - fmt.Println(v, ">= maxNumLit") - } - f.err = CorruptInputError(f.roffset) - return - } - - var dist uint32 - if f.hd == nil { - for f.nb < 5 { - c, err := fr.ReadByte() - if err != nil { - if debugDecode { - fmt.Println("morebits f.nb<5:", err) - } - f.err = err - return - } - f.roffset++ - f.b |= uint32(c) << f.nb - f.nb += 8 - } - dist = uint32(bits.Reverse8(uint8(f.b & 0x1F << 3))) - f.b >>= 5 - f.nb -= 5 - } else { - // Since a huffmanDecoder can be empty or be composed of a degenerate tree - // with single element, huffSym must error on these two edge cases. In both - // cases, the chunks slice will be 0 for the invalid sequence, leading it - // satisfy the n == 0 check below. - n := uint(f.hd.maxRead) - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - nb, b := f.nb, f.b - for { - for nb < n { - c, err := fr.ReadByte() - if err != nil { - f.b = b - f.nb = nb - f.err = noEOF(err) - return - } - f.roffset++ - b |= uint32(c) << (nb & regSizeMaskUint32) - nb += 8 - } - chunk := f.hd.chunks[b&(huffmanNumChunks-1)] - n = uint(chunk & huffmanCountMask) - if n > huffmanChunkBits { - chunk = f.hd.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hd.linkMask] - n = uint(chunk & huffmanCountMask) - } - if n <= nb { - if n == 0 { - f.b = b - f.nb = nb - if debugDecode { - fmt.Println("huffsym: n==0") - } - f.err = CorruptInputError(f.roffset) - return - } - f.b = b >> (n & regSizeMaskUint32) - f.nb = nb - n - dist = uint32(chunk >> huffmanValueShift) - break - } - } - } - - switch { - case dist < 4: - dist++ - case dist < maxNumDist: - nb := uint(dist-2) >> 1 - // have 1 bit in bottom of dist, need nb more. - extra := (dist & 1) << (nb & regSizeMaskUint32) - for f.nb < nb { - c, err := fr.ReadByte() - if err != nil { - if debugDecode { - fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 - f.nb -= nb - dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra - default: - if debugDecode { - fmt.Println("dist too big:", dist, maxNumDist) - } - f.err = CorruptInputError(f.roffset) - return - } - - // No check on length; encoding can be prescient. - if dist > uint32(f.dict.histSize()) { - if debugDecode { - fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize()) - } - f.err = CorruptInputError(f.roffset) - return - } - - f.copyLen, f.copyDist = length, int(dist) - goto copyHistory - } - -copyHistory: - // Perform a backwards copy according to RFC section 3.2.3. - { - cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen) - if cnt == 0 { - cnt = f.dict.writeCopy(f.copyDist, f.copyLen) - } - f.copyLen -= cnt - - if f.dict.availWrite() == 0 || f.copyLen > 0 { - f.toRead = f.dict.readFlush() - f.step = (*decompressor).$FUNCNAME$ // We need to continue this work - f.stepState = stateDict - return - } - goto readLiteral - } -} - -` - for i, t := range types { - s := strings.Replace(template, "$FUNCNAME$", "huffman"+names[i], -1) - s = strings.Replace(s, "$TYPE$", t, -1) - f.WriteString(s) - } - f.WriteString("func (f *decompressor) huffmanBlockDecoder() func() {\n") - f.WriteString("\tswitch f.r.(type) {\n") - for i, t := range types { - f.WriteString("\t\tcase " + t + ":\n") - f.WriteString("\t\t\treturn f.huffman" + names[i] + "\n") - } - f.WriteString("\t\tdefault:\n") - f.WriteString("\t\t\treturn f.huffmanBlockGeneric") - f.WriteString("\t}\n}\n") -} diff --git a/vendor/github.com/klauspost/compress/huff0/README.md b/vendor/github.com/klauspost/compress/huff0/README.md index e12da4db2..8b6e5c663 100644 --- a/vendor/github.com/klauspost/compress/huff0/README.md +++ b/vendor/github.com/klauspost/compress/huff0/README.md @@ -14,7 +14,9 @@ but it can be used as a secondary step to compressors (like Snappy) that does no ## News - * Mar 2018: First implementation released. Consider this beta software for now. +This is used as part of the [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and decompression package. + +This ensures that most functionality is well tested. # Usage diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go index 62fd37324..1d41c25d2 100644 --- a/vendor/github.com/klauspost/compress/zstd/decoder.go +++ b/vendor/github.com/klauspost/compress/zstd/decoder.go @@ -5,7 +5,6 @@ package zstd import ( - "bytes" "errors" "io" "sync" @@ -179,11 +178,13 @@ func (d *Decoder) Reset(r io.Reader) error { } // If bytes buffer and < 1MB, do sync decoding anyway. - if bb, ok := r.(*bytes.Buffer); ok && bb.Len() < 1<<20 { + if bb, ok := r.(byter); ok && bb.Len() < 1<<20 { + var bb2 byter + bb2 = bb if debug { println("*bytes.Buffer detected, doing sync decode, len:", bb.Len()) } - b := bb.Bytes() + b := bb2.Bytes() var dst []byte if cap(d.current.b) > 0 { dst = d.current.b diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go index 0c761dd62..9056beef2 100644 --- a/vendor/github.com/klauspost/compress/zstd/zstd.go +++ b/vendor/github.com/klauspost/compress/zstd/zstd.go @@ -4,6 +4,7 @@ package zstd import ( + "bytes" "errors" "log" "math" @@ -146,3 +147,10 @@ func load64(b []byte, i int) uint64 { return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 } + +type byter interface { + Bytes() []byte + Len() int +} + +var _ byter = &bytes.Buffer{} diff --git a/vendor/github.com/mattn/go-colorable/.travis.yml b/vendor/github.com/mattn/go-colorable/.travis.yml new file mode 100644 index 000000000..7942c565c --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/.travis.yml @@ -0,0 +1,15 @@ +language: go +sudo: false +go: + - 1.13.x + - tip + +before_install: + - go get -t -v ./... + +script: + - ./go.test.sh + +after_success: + - bash <(curl -s https://codecov.io/bash) + diff --git a/vendor/github.com/mattn/go-colorable/LICENSE b/vendor/github.com/mattn/go-colorable/LICENSE new file mode 100644 index 000000000..91b5cef30 --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Yasuhiro Matsumoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/mattn/go-colorable/README.md b/vendor/github.com/mattn/go-colorable/README.md new file mode 100644 index 000000000..e055952b6 --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/README.md @@ -0,0 +1,48 @@ +# go-colorable + +[![Build Status](https://travis-ci.org/mattn/go-colorable.svg?branch=master)](https://travis-ci.org/mattn/go-colorable) +[![Codecov](https://codecov.io/gh/mattn/go-colorable/branch/master/graph/badge.svg)](https://codecov.io/gh/mattn/go-colorable) +[![GoDoc](https://godoc.org/github.com/mattn/go-colorable?status.svg)](http://godoc.org/github.com/mattn/go-colorable) +[![Go Report Card](https://goreportcard.com/badge/mattn/go-colorable)](https://goreportcard.com/report/mattn/go-colorable) + +Colorable writer for windows. + +For example, most of logger packages doesn't show colors on windows. (I know we can do it with ansicon. But I don't want.) +This package is possible to handle escape sequence for ansi color on windows. + +## Too Bad! + +![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/bad.png) + + +## So Good! + +![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/good.png) + +## Usage + +```go +logrus.SetFormatter(&logrus.TextFormatter{ForceColors: true}) +logrus.SetOutput(colorable.NewColorableStdout()) + +logrus.Info("succeeded") +logrus.Warn("not correct") +logrus.Error("something error") +logrus.Fatal("panic") +``` + +You can compile above code on non-windows OSs. + +## Installation + +``` +$ go get github.com/mattn/go-colorable +``` + +# License + +MIT + +# Author + +Yasuhiro Matsumoto (a.k.a mattn) diff --git a/vendor/github.com/mattn/go-colorable/colorable_appengine.go b/vendor/github.com/mattn/go-colorable/colorable_appengine.go new file mode 100644 index 000000000..1f7806fe1 --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/colorable_appengine.go @@ -0,0 +1,37 @@ +// +build appengine + +package colorable + +import ( + "io" + "os" + + _ "github.com/mattn/go-isatty" +) + +// NewColorable returns new instance of Writer which handles escape sequence. +func NewColorable(file *os.File) io.Writer { + if file == nil { + panic("nil passed instead of *os.File to NewColorable()") + } + + return file +} + +// NewColorableStdout returns new instance of Writer which handles escape sequence for stdout. +func NewColorableStdout() io.Writer { + return os.Stdout +} + +// NewColorableStderr returns new instance of Writer which handles escape sequence for stderr. +func NewColorableStderr() io.Writer { + return os.Stderr +} + +// EnableColorsStdout enable colors if possible. +func EnableColorsStdout(enabled *bool) func() { + if enabled != nil { + *enabled = true + } + return func() {} +} diff --git a/vendor/github.com/mattn/go-colorable/colorable_others.go b/vendor/github.com/mattn/go-colorable/colorable_others.go new file mode 100644 index 000000000..08cbd1e0f --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/colorable_others.go @@ -0,0 +1,38 @@ +// +build !windows +// +build !appengine + +package colorable + +import ( + "io" + "os" + + _ "github.com/mattn/go-isatty" +) + +// NewColorable returns new instance of Writer which handles escape sequence. +func NewColorable(file *os.File) io.Writer { + if file == nil { + panic("nil passed instead of *os.File to NewColorable()") + } + + return file +} + +// NewColorableStdout returns new instance of Writer which handles escape sequence for stdout. +func NewColorableStdout() io.Writer { + return os.Stdout +} + +// NewColorableStderr returns new instance of Writer which handles escape sequence for stderr. +func NewColorableStderr() io.Writer { + return os.Stderr +} + +// EnableColorsStdout enable colors if possible. +func EnableColorsStdout(enabled *bool) func() { + if enabled != nil { + *enabled = true + } + return func() {} +} diff --git a/vendor/github.com/mattn/go-colorable/colorable_windows.go b/vendor/github.com/mattn/go-colorable/colorable_windows.go new file mode 100644 index 000000000..41215d7fc --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/colorable_windows.go @@ -0,0 +1,1043 @@ +// +build windows +// +build !appengine + +package colorable + +import ( + "bytes" + "io" + "math" + "os" + "strconv" + "strings" + "sync" + "syscall" + "unsafe" + + "github.com/mattn/go-isatty" +) + +const ( + foregroundBlue = 0x1 + foregroundGreen = 0x2 + foregroundRed = 0x4 + foregroundIntensity = 0x8 + foregroundMask = (foregroundRed | foregroundBlue | foregroundGreen | foregroundIntensity) + backgroundBlue = 0x10 + backgroundGreen = 0x20 + backgroundRed = 0x40 + backgroundIntensity = 0x80 + backgroundMask = (backgroundRed | backgroundBlue | backgroundGreen | backgroundIntensity) + commonLvbUnderscore = 0x8000 + + cENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x4 +) + +const ( + genericRead = 0x80000000 + genericWrite = 0x40000000 +) + +const ( + consoleTextmodeBuffer = 0x1 +) + +type wchar uint16 +type short int16 +type dword uint32 +type word uint16 + +type coord struct { + x short + y short +} + +type smallRect struct { + left short + top short + right short + bottom short +} + +type consoleScreenBufferInfo struct { + size coord + cursorPosition coord + attributes word + window smallRect + maximumWindowSize coord +} + +type consoleCursorInfo struct { + size dword + visible int32 +} + +var ( + kernel32 = syscall.NewLazyDLL("kernel32.dll") + procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo") + procSetConsoleTextAttribute = kernel32.NewProc("SetConsoleTextAttribute") + procSetConsoleCursorPosition = kernel32.NewProc("SetConsoleCursorPosition") + procFillConsoleOutputCharacter = kernel32.NewProc("FillConsoleOutputCharacterW") + procFillConsoleOutputAttribute = kernel32.NewProc("FillConsoleOutputAttribute") + procGetConsoleCursorInfo = kernel32.NewProc("GetConsoleCursorInfo") + procSetConsoleCursorInfo = kernel32.NewProc("SetConsoleCursorInfo") + procSetConsoleTitle = kernel32.NewProc("SetConsoleTitleW") + procGetConsoleMode = kernel32.NewProc("GetConsoleMode") + procSetConsoleMode = kernel32.NewProc("SetConsoleMode") + procCreateConsoleScreenBuffer = kernel32.NewProc("CreateConsoleScreenBuffer") +) + +// Writer provides colorable Writer to the console +type Writer struct { + out io.Writer + handle syscall.Handle + althandle syscall.Handle + oldattr word + oldpos coord + rest bytes.Buffer + mutex sync.Mutex +} + +// NewColorable returns new instance of Writer which handles escape sequence from File. +func NewColorable(file *os.File) io.Writer { + if file == nil { + panic("nil passed instead of *os.File to NewColorable()") + } + + if isatty.IsTerminal(file.Fd()) { + var mode uint32 + if r, _, _ := procGetConsoleMode.Call(file.Fd(), uintptr(unsafe.Pointer(&mode))); r != 0 && mode&cENABLE_VIRTUAL_TERMINAL_PROCESSING != 0 { + return file + } + var csbi consoleScreenBufferInfo + handle := syscall.Handle(file.Fd()) + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + return &Writer{out: file, handle: handle, oldattr: csbi.attributes, oldpos: coord{0, 0}} + } + return file +} + +// NewColorableStdout returns new instance of Writer which handles escape sequence for stdout. +func NewColorableStdout() io.Writer { + return NewColorable(os.Stdout) +} + +// NewColorableStderr returns new instance of Writer which handles escape sequence for stderr. +func NewColorableStderr() io.Writer { + return NewColorable(os.Stderr) +} + +var color256 = map[int]int{ + 0: 0x000000, + 1: 0x800000, + 2: 0x008000, + 3: 0x808000, + 4: 0x000080, + 5: 0x800080, + 6: 0x008080, + 7: 0xc0c0c0, + 8: 0x808080, + 9: 0xff0000, + 10: 0x00ff00, + 11: 0xffff00, + 12: 0x0000ff, + 13: 0xff00ff, + 14: 0x00ffff, + 15: 0xffffff, + 16: 0x000000, + 17: 0x00005f, + 18: 0x000087, + 19: 0x0000af, + 20: 0x0000d7, + 21: 0x0000ff, + 22: 0x005f00, + 23: 0x005f5f, + 24: 0x005f87, + 25: 0x005faf, + 26: 0x005fd7, + 27: 0x005fff, + 28: 0x008700, + 29: 0x00875f, + 30: 0x008787, + 31: 0x0087af, + 32: 0x0087d7, + 33: 0x0087ff, + 34: 0x00af00, + 35: 0x00af5f, + 36: 0x00af87, + 37: 0x00afaf, + 38: 0x00afd7, + 39: 0x00afff, + 40: 0x00d700, + 41: 0x00d75f, + 42: 0x00d787, + 43: 0x00d7af, + 44: 0x00d7d7, + 45: 0x00d7ff, + 46: 0x00ff00, + 47: 0x00ff5f, + 48: 0x00ff87, + 49: 0x00ffaf, + 50: 0x00ffd7, + 51: 0x00ffff, + 52: 0x5f0000, + 53: 0x5f005f, + 54: 0x5f0087, + 55: 0x5f00af, + 56: 0x5f00d7, + 57: 0x5f00ff, + 58: 0x5f5f00, + 59: 0x5f5f5f, + 60: 0x5f5f87, + 61: 0x5f5faf, + 62: 0x5f5fd7, + 63: 0x5f5fff, + 64: 0x5f8700, + 65: 0x5f875f, + 66: 0x5f8787, + 67: 0x5f87af, + 68: 0x5f87d7, + 69: 0x5f87ff, + 70: 0x5faf00, + 71: 0x5faf5f, + 72: 0x5faf87, + 73: 0x5fafaf, + 74: 0x5fafd7, + 75: 0x5fafff, + 76: 0x5fd700, + 77: 0x5fd75f, + 78: 0x5fd787, + 79: 0x5fd7af, + 80: 0x5fd7d7, + 81: 0x5fd7ff, + 82: 0x5fff00, + 83: 0x5fff5f, + 84: 0x5fff87, + 85: 0x5fffaf, + 86: 0x5fffd7, + 87: 0x5fffff, + 88: 0x870000, + 89: 0x87005f, + 90: 0x870087, + 91: 0x8700af, + 92: 0x8700d7, + 93: 0x8700ff, + 94: 0x875f00, + 95: 0x875f5f, + 96: 0x875f87, + 97: 0x875faf, + 98: 0x875fd7, + 99: 0x875fff, + 100: 0x878700, + 101: 0x87875f, + 102: 0x878787, + 103: 0x8787af, + 104: 0x8787d7, + 105: 0x8787ff, + 106: 0x87af00, + 107: 0x87af5f, + 108: 0x87af87, + 109: 0x87afaf, + 110: 0x87afd7, + 111: 0x87afff, + 112: 0x87d700, + 113: 0x87d75f, + 114: 0x87d787, + 115: 0x87d7af, + 116: 0x87d7d7, + 117: 0x87d7ff, + 118: 0x87ff00, + 119: 0x87ff5f, + 120: 0x87ff87, + 121: 0x87ffaf, + 122: 0x87ffd7, + 123: 0x87ffff, + 124: 0xaf0000, + 125: 0xaf005f, + 126: 0xaf0087, + 127: 0xaf00af, + 128: 0xaf00d7, + 129: 0xaf00ff, + 130: 0xaf5f00, + 131: 0xaf5f5f, + 132: 0xaf5f87, + 133: 0xaf5faf, + 134: 0xaf5fd7, + 135: 0xaf5fff, + 136: 0xaf8700, + 137: 0xaf875f, + 138: 0xaf8787, + 139: 0xaf87af, + 140: 0xaf87d7, + 141: 0xaf87ff, + 142: 0xafaf00, + 143: 0xafaf5f, + 144: 0xafaf87, + 145: 0xafafaf, + 146: 0xafafd7, + 147: 0xafafff, + 148: 0xafd700, + 149: 0xafd75f, + 150: 0xafd787, + 151: 0xafd7af, + 152: 0xafd7d7, + 153: 0xafd7ff, + 154: 0xafff00, + 155: 0xafff5f, + 156: 0xafff87, + 157: 0xafffaf, + 158: 0xafffd7, + 159: 0xafffff, + 160: 0xd70000, + 161: 0xd7005f, + 162: 0xd70087, + 163: 0xd700af, + 164: 0xd700d7, + 165: 0xd700ff, + 166: 0xd75f00, + 167: 0xd75f5f, + 168: 0xd75f87, + 169: 0xd75faf, + 170: 0xd75fd7, + 171: 0xd75fff, + 172: 0xd78700, + 173: 0xd7875f, + 174: 0xd78787, + 175: 0xd787af, + 176: 0xd787d7, + 177: 0xd787ff, + 178: 0xd7af00, + 179: 0xd7af5f, + 180: 0xd7af87, + 181: 0xd7afaf, + 182: 0xd7afd7, + 183: 0xd7afff, + 184: 0xd7d700, + 185: 0xd7d75f, + 186: 0xd7d787, + 187: 0xd7d7af, + 188: 0xd7d7d7, + 189: 0xd7d7ff, + 190: 0xd7ff00, + 191: 0xd7ff5f, + 192: 0xd7ff87, + 193: 0xd7ffaf, + 194: 0xd7ffd7, + 195: 0xd7ffff, + 196: 0xff0000, + 197: 0xff005f, + 198: 0xff0087, + 199: 0xff00af, + 200: 0xff00d7, + 201: 0xff00ff, + 202: 0xff5f00, + 203: 0xff5f5f, + 204: 0xff5f87, + 205: 0xff5faf, + 206: 0xff5fd7, + 207: 0xff5fff, + 208: 0xff8700, + 209: 0xff875f, + 210: 0xff8787, + 211: 0xff87af, + 212: 0xff87d7, + 213: 0xff87ff, + 214: 0xffaf00, + 215: 0xffaf5f, + 216: 0xffaf87, + 217: 0xffafaf, + 218: 0xffafd7, + 219: 0xffafff, + 220: 0xffd700, + 221: 0xffd75f, + 222: 0xffd787, + 223: 0xffd7af, + 224: 0xffd7d7, + 225: 0xffd7ff, + 226: 0xffff00, + 227: 0xffff5f, + 228: 0xffff87, + 229: 0xffffaf, + 230: 0xffffd7, + 231: 0xffffff, + 232: 0x080808, + 233: 0x121212, + 234: 0x1c1c1c, + 235: 0x262626, + 236: 0x303030, + 237: 0x3a3a3a, + 238: 0x444444, + 239: 0x4e4e4e, + 240: 0x585858, + 241: 0x626262, + 242: 0x6c6c6c, + 243: 0x767676, + 244: 0x808080, + 245: 0x8a8a8a, + 246: 0x949494, + 247: 0x9e9e9e, + 248: 0xa8a8a8, + 249: 0xb2b2b2, + 250: 0xbcbcbc, + 251: 0xc6c6c6, + 252: 0xd0d0d0, + 253: 0xdadada, + 254: 0xe4e4e4, + 255: 0xeeeeee, +} + +// `\033]0;TITLESTR\007` +func doTitleSequence(er *bytes.Reader) error { + var c byte + var err error + + c, err = er.ReadByte() + if err != nil { + return err + } + if c != '0' && c != '2' { + return nil + } + c, err = er.ReadByte() + if err != nil { + return err + } + if c != ';' { + return nil + } + title := make([]byte, 0, 80) + for { + c, err = er.ReadByte() + if err != nil { + return err + } + if c == 0x07 || c == '\n' { + break + } + title = append(title, c) + } + if len(title) > 0 { + title8, err := syscall.UTF16PtrFromString(string(title)) + if err == nil { + procSetConsoleTitle.Call(uintptr(unsafe.Pointer(title8))) + } + } + return nil +} + +// returns Atoi(s) unless s == "" in which case it returns def +func atoiWithDefault(s string, def int) (int, error) { + if s == "" { + return def, nil + } + return strconv.Atoi(s) +} + +// Write writes data on console +func (w *Writer) Write(data []byte) (n int, err error) { + w.mutex.Lock() + defer w.mutex.Unlock() + var csbi consoleScreenBufferInfo + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + + handle := w.handle + + var er *bytes.Reader + if w.rest.Len() > 0 { + var rest bytes.Buffer + w.rest.WriteTo(&rest) + w.rest.Reset() + rest.Write(data) + er = bytes.NewReader(rest.Bytes()) + } else { + er = bytes.NewReader(data) + } + var bw [1]byte +loop: + for { + c1, err := er.ReadByte() + if err != nil { + break loop + } + if c1 != 0x1b { + bw[0] = c1 + w.out.Write(bw[:]) + continue + } + c2, err := er.ReadByte() + if err != nil { + break loop + } + + switch c2 { + case '>': + continue + case ']': + w.rest.WriteByte(c1) + w.rest.WriteByte(c2) + er.WriteTo(&w.rest) + if bytes.IndexByte(w.rest.Bytes(), 0x07) == -1 { + break loop + } + er = bytes.NewReader(w.rest.Bytes()[2:]) + err := doTitleSequence(er) + if err != nil { + break loop + } + w.rest.Reset() + continue + // https://github.com/mattn/go-colorable/issues/27 + case '7': + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + w.oldpos = csbi.cursorPosition + continue + case '8': + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&w.oldpos))) + continue + case 0x5b: + // execute part after switch + default: + continue + } + + w.rest.WriteByte(c1) + w.rest.WriteByte(c2) + er.WriteTo(&w.rest) + + var buf bytes.Buffer + var m byte + for i, c := range w.rest.Bytes()[2:] { + if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' { + m = c + er = bytes.NewReader(w.rest.Bytes()[2+i+1:]) + w.rest.Reset() + break + } + buf.Write([]byte(string(c))) + } + if m == 0 { + break loop + } + + switch m { + case 'A': + n, err = atoiWithDefault(buf.String(), 1) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.y -= short(n) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'B': + n, err = atoiWithDefault(buf.String(), 1) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.y += short(n) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'C': + n, err = atoiWithDefault(buf.String(), 1) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x += short(n) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'D': + n, err = atoiWithDefault(buf.String(), 1) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x -= short(n) + if csbi.cursorPosition.x < 0 { + csbi.cursorPosition.x = 0 + } + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'E': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x = 0 + csbi.cursorPosition.y += short(n) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'F': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x = 0 + csbi.cursorPosition.y -= short(n) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'G': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + if n < 1 { + n = 1 + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x = short(n - 1) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'H', 'f': + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + if buf.Len() > 0 { + token := strings.Split(buf.String(), ";") + switch len(token) { + case 1: + n1, err := strconv.Atoi(token[0]) + if err != nil { + continue + } + csbi.cursorPosition.y = short(n1 - 1) + case 2: + n1, err := strconv.Atoi(token[0]) + if err != nil { + continue + } + n2, err := strconv.Atoi(token[1]) + if err != nil { + continue + } + csbi.cursorPosition.x = short(n2 - 1) + csbi.cursorPosition.y = short(n1 - 1) + } + } else { + csbi.cursorPosition.y = 0 + } + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'J': + n := 0 + if buf.Len() > 0 { + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + } + var count, written dword + var cursor coord + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + switch n { + case 0: + cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y} + count = dword(csbi.size.x) - dword(csbi.cursorPosition.x) + dword(csbi.size.y-csbi.cursorPosition.y)*dword(csbi.size.x) + case 1: + cursor = coord{x: csbi.window.left, y: csbi.window.top} + count = dword(csbi.size.x) - dword(csbi.cursorPosition.x) + dword(csbi.window.top-csbi.cursorPosition.y)*dword(csbi.size.x) + case 2: + cursor = coord{x: csbi.window.left, y: csbi.window.top} + count = dword(csbi.size.x) - dword(csbi.cursorPosition.x) + dword(csbi.size.y-csbi.cursorPosition.y)*dword(csbi.size.x) + } + procFillConsoleOutputCharacter.Call(uintptr(handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + procFillConsoleOutputAttribute.Call(uintptr(handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + case 'K': + n := 0 + if buf.Len() > 0 { + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + var cursor coord + var count, written dword + switch n { + case 0: + cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y} + count = dword(csbi.size.x - csbi.cursorPosition.x) + case 1: + cursor = coord{x: csbi.window.left, y: csbi.cursorPosition.y} + count = dword(csbi.size.x - csbi.cursorPosition.x) + case 2: + cursor = coord{x: csbi.window.left, y: csbi.cursorPosition.y} + count = dword(csbi.size.x) + } + procFillConsoleOutputCharacter.Call(uintptr(handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + procFillConsoleOutputAttribute.Call(uintptr(handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + case 'X': + n := 0 + if buf.Len() > 0 { + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + var cursor coord + var written dword + cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y} + procFillConsoleOutputCharacter.Call(uintptr(handle), uintptr(' '), uintptr(n), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + procFillConsoleOutputAttribute.Call(uintptr(handle), uintptr(csbi.attributes), uintptr(n), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + case 'm': + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + attr := csbi.attributes + cs := buf.String() + if cs == "" { + procSetConsoleTextAttribute.Call(uintptr(handle), uintptr(w.oldattr)) + continue + } + token := strings.Split(cs, ";") + for i := 0; i < len(token); i++ { + ns := token[i] + if n, err = strconv.Atoi(ns); err == nil { + switch { + case n == 0 || n == 100: + attr = w.oldattr + case n == 4: + attr |= commonLvbUnderscore + case (1 <= n && n <= 3) || n == 5: + attr |= foregroundIntensity + case n == 7 || n == 27: + attr = + (attr &^ (foregroundMask | backgroundMask)) | + ((attr & foregroundMask) << 4) | + ((attr & backgroundMask) >> 4) + case n == 22: + attr &^= foregroundIntensity + case n == 24: + attr &^= commonLvbUnderscore + case 30 <= n && n <= 37: + attr &= backgroundMask + if (n-30)&1 != 0 { + attr |= foregroundRed + } + if (n-30)&2 != 0 { + attr |= foregroundGreen + } + if (n-30)&4 != 0 { + attr |= foregroundBlue + } + case n == 38: // set foreground color. + if i < len(token)-2 && (token[i+1] == "5" || token[i+1] == "05") { + if n256, err := strconv.Atoi(token[i+2]); err == nil { + if n256foreAttr == nil { + n256setup() + } + attr &= backgroundMask + attr |= n256foreAttr[n256%len(n256foreAttr)] + i += 2 + } + } else if len(token) == 5 && token[i+1] == "2" { + var r, g, b int + r, _ = strconv.Atoi(token[i+2]) + g, _ = strconv.Atoi(token[i+3]) + b, _ = strconv.Atoi(token[i+4]) + i += 4 + if r > 127 { + attr |= foregroundRed + } + if g > 127 { + attr |= foregroundGreen + } + if b > 127 { + attr |= foregroundBlue + } + } else { + attr = attr & (w.oldattr & backgroundMask) + } + case n == 39: // reset foreground color. + attr &= backgroundMask + attr |= w.oldattr & foregroundMask + case 40 <= n && n <= 47: + attr &= foregroundMask + if (n-40)&1 != 0 { + attr |= backgroundRed + } + if (n-40)&2 != 0 { + attr |= backgroundGreen + } + if (n-40)&4 != 0 { + attr |= backgroundBlue + } + case n == 48: // set background color. + if i < len(token)-2 && token[i+1] == "5" { + if n256, err := strconv.Atoi(token[i+2]); err == nil { + if n256backAttr == nil { + n256setup() + } + attr &= foregroundMask + attr |= n256backAttr[n256%len(n256backAttr)] + i += 2 + } + } else if len(token) == 5 && token[i+1] == "2" { + var r, g, b int + r, _ = strconv.Atoi(token[i+2]) + g, _ = strconv.Atoi(token[i+3]) + b, _ = strconv.Atoi(token[i+4]) + i += 4 + if r > 127 { + attr |= backgroundRed + } + if g > 127 { + attr |= backgroundGreen + } + if b > 127 { + attr |= backgroundBlue + } + } else { + attr = attr & (w.oldattr & foregroundMask) + } + case n == 49: // reset foreground color. + attr &= foregroundMask + attr |= w.oldattr & backgroundMask + case 90 <= n && n <= 97: + attr = (attr & backgroundMask) + attr |= foregroundIntensity + if (n-90)&1 != 0 { + attr |= foregroundRed + } + if (n-90)&2 != 0 { + attr |= foregroundGreen + } + if (n-90)&4 != 0 { + attr |= foregroundBlue + } + case 100 <= n && n <= 107: + attr = (attr & foregroundMask) + attr |= backgroundIntensity + if (n-100)&1 != 0 { + attr |= backgroundRed + } + if (n-100)&2 != 0 { + attr |= backgroundGreen + } + if (n-100)&4 != 0 { + attr |= backgroundBlue + } + } + procSetConsoleTextAttribute.Call(uintptr(handle), uintptr(attr)) + } + } + case 'h': + var ci consoleCursorInfo + cs := buf.String() + if cs == "5>" { + procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + ci.visible = 0 + procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + } else if cs == "?25" { + procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + ci.visible = 1 + procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + } else if cs == "?1049" { + if w.althandle == 0 { + h, _, _ := procCreateConsoleScreenBuffer.Call(uintptr(genericRead|genericWrite), 0, 0, uintptr(consoleTextmodeBuffer), 0, 0) + w.althandle = syscall.Handle(h) + if w.althandle != 0 { + handle = w.althandle + } + } + } + case 'l': + var ci consoleCursorInfo + cs := buf.String() + if cs == "5>" { + procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + ci.visible = 1 + procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + } else if cs == "?25" { + procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + ci.visible = 0 + procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + } else if cs == "?1049" { + if w.althandle != 0 { + syscall.CloseHandle(w.althandle) + w.althandle = 0 + handle = w.handle + } + } + case 's': + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + w.oldpos = csbi.cursorPosition + case 'u': + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&w.oldpos))) + } + } + + return len(data), nil +} + +type consoleColor struct { + rgb int + red bool + green bool + blue bool + intensity bool +} + +func (c consoleColor) foregroundAttr() (attr word) { + if c.red { + attr |= foregroundRed + } + if c.green { + attr |= foregroundGreen + } + if c.blue { + attr |= foregroundBlue + } + if c.intensity { + attr |= foregroundIntensity + } + return +} + +func (c consoleColor) backgroundAttr() (attr word) { + if c.red { + attr |= backgroundRed + } + if c.green { + attr |= backgroundGreen + } + if c.blue { + attr |= backgroundBlue + } + if c.intensity { + attr |= backgroundIntensity + } + return +} + +var color16 = []consoleColor{ + {0x000000, false, false, false, false}, + {0x000080, false, false, true, false}, + {0x008000, false, true, false, false}, + {0x008080, false, true, true, false}, + {0x800000, true, false, false, false}, + {0x800080, true, false, true, false}, + {0x808000, true, true, false, false}, + {0xc0c0c0, true, true, true, false}, + {0x808080, false, false, false, true}, + {0x0000ff, false, false, true, true}, + {0x00ff00, false, true, false, true}, + {0x00ffff, false, true, true, true}, + {0xff0000, true, false, false, true}, + {0xff00ff, true, false, true, true}, + {0xffff00, true, true, false, true}, + {0xffffff, true, true, true, true}, +} + +type hsv struct { + h, s, v float32 +} + +func (a hsv) dist(b hsv) float32 { + dh := a.h - b.h + switch { + case dh > 0.5: + dh = 1 - dh + case dh < -0.5: + dh = -1 - dh + } + ds := a.s - b.s + dv := a.v - b.v + return float32(math.Sqrt(float64(dh*dh + ds*ds + dv*dv))) +} + +func toHSV(rgb int) hsv { + r, g, b := float32((rgb&0xFF0000)>>16)/256.0, + float32((rgb&0x00FF00)>>8)/256.0, + float32(rgb&0x0000FF)/256.0 + min, max := minmax3f(r, g, b) + h := max - min + if h > 0 { + if max == r { + h = (g - b) / h + if h < 0 { + h += 6 + } + } else if max == g { + h = 2 + (b-r)/h + } else { + h = 4 + (r-g)/h + } + } + h /= 6.0 + s := max - min + if max != 0 { + s /= max + } + v := max + return hsv{h: h, s: s, v: v} +} + +type hsvTable []hsv + +func toHSVTable(rgbTable []consoleColor) hsvTable { + t := make(hsvTable, len(rgbTable)) + for i, c := range rgbTable { + t[i] = toHSV(c.rgb) + } + return t +} + +func (t hsvTable) find(rgb int) consoleColor { + hsv := toHSV(rgb) + n := 7 + l := float32(5.0) + for i, p := range t { + d := hsv.dist(p) + if d < l { + l, n = d, i + } + } + return color16[n] +} + +func minmax3f(a, b, c float32) (min, max float32) { + if a < b { + if b < c { + return a, c + } else if a < c { + return a, b + } else { + return c, b + } + } else { + if a < c { + return b, c + } else if b < c { + return b, a + } else { + return c, a + } + } +} + +var n256foreAttr []word +var n256backAttr []word + +func n256setup() { + n256foreAttr = make([]word, 256) + n256backAttr = make([]word, 256) + t := toHSVTable(color16) + for i, rgb := range color256 { + c := t.find(rgb) + n256foreAttr[i] = c.foregroundAttr() + n256backAttr[i] = c.backgroundAttr() + } +} + +// EnableColorsStdout enable colors if possible. +func EnableColorsStdout(enabled *bool) func() { + var mode uint32 + h := os.Stdout.Fd() + if r, _, _ := procGetConsoleMode.Call(h, uintptr(unsafe.Pointer(&mode))); r != 0 { + if r, _, _ = procSetConsoleMode.Call(h, uintptr(mode|cENABLE_VIRTUAL_TERMINAL_PROCESSING)); r != 0 { + if enabled != nil { + *enabled = true + } + return func() { + procSetConsoleMode.Call(h, uintptr(mode)) + } + } + } + if enabled != nil { + *enabled = true + } + return func() {} +} diff --git a/vendor/github.com/mattn/go-colorable/go.mod b/vendor/github.com/mattn/go-colorable/go.mod new file mode 100644 index 000000000..1e590b819 --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/go.mod @@ -0,0 +1,8 @@ +module github.com/mattn/go-colorable + +require ( + github.com/mattn/go-isatty v0.0.12 + golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae // indirect +) + +go 1.13 diff --git a/vendor/github.com/mattn/go-colorable/go.sum b/vendor/github.com/mattn/go-colorable/go.sum new file mode 100644 index 000000000..cf5b95d97 --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/go.sum @@ -0,0 +1,5 @@ +github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae h1:/WDfKMnPU+m5M4xB+6x4kaepxRw6jWvR5iDRdvjHgy8= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/vendor/github.com/mattn/go-colorable/go.test.sh b/vendor/github.com/mattn/go-colorable/go.test.sh new file mode 100644 index 000000000..012162b07 --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/go.test.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +set -e +echo "" > coverage.txt + +for d in $(go list ./... | grep -v vendor); do + go test -race -coverprofile=profile.out -covermode=atomic "$d" + if [ -f profile.out ]; then + cat profile.out >> coverage.txt + rm profile.out + fi +done diff --git a/vendor/github.com/mattn/go-colorable/noncolorable.go b/vendor/github.com/mattn/go-colorable/noncolorable.go new file mode 100644 index 000000000..95f2c6be2 --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/noncolorable.go @@ -0,0 +1,55 @@ +package colorable + +import ( + "bytes" + "io" +) + +// NonColorable holds writer but removes escape sequence. +type NonColorable struct { + out io.Writer +} + +// NewNonColorable returns new instance of Writer which removes escape sequence from Writer. +func NewNonColorable(w io.Writer) io.Writer { + return &NonColorable{out: w} +} + +// Write writes data on console +func (w *NonColorable) Write(data []byte) (n int, err error) { + er := bytes.NewReader(data) + var bw [1]byte +loop: + for { + c1, err := er.ReadByte() + if err != nil { + break loop + } + if c1 != 0x1b { + bw[0] = c1 + w.out.Write(bw[:]) + continue + } + c2, err := er.ReadByte() + if err != nil { + break loop + } + if c2 != 0x5b { + continue + } + + var buf bytes.Buffer + for { + c, err := er.ReadByte() + if err != nil { + break loop + } + if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' { + break + } + buf.Write([]byte(string(c))) + } + } + + return len(data), nil +} diff --git a/vendor/github.com/mattn/go-isatty/.travis.yml b/vendor/github.com/mattn/go-isatty/.travis.yml new file mode 100644 index 000000000..604314dd4 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/.travis.yml @@ -0,0 +1,14 @@ +language: go +sudo: false +go: + - 1.13.x + - tip + +before_install: + - go get -t -v ./... + +script: + - ./go.test.sh + +after_success: + - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/mattn/go-isatty/LICENSE b/vendor/github.com/mattn/go-isatty/LICENSE new file mode 100644 index 000000000..65dc692b6 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/LICENSE @@ -0,0 +1,9 @@ +Copyright (c) Yasuhiro MATSUMOTO + +MIT License (Expat) + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/mattn/go-isatty/README.md b/vendor/github.com/mattn/go-isatty/README.md new file mode 100644 index 000000000..38418353e --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/README.md @@ -0,0 +1,50 @@ +# go-isatty + +[![Godoc Reference](https://godoc.org/github.com/mattn/go-isatty?status.svg)](http://godoc.org/github.com/mattn/go-isatty) +[![Codecov](https://codecov.io/gh/mattn/go-isatty/branch/master/graph/badge.svg)](https://codecov.io/gh/mattn/go-isatty) +[![Coverage Status](https://coveralls.io/repos/github/mattn/go-isatty/badge.svg?branch=master)](https://coveralls.io/github/mattn/go-isatty?branch=master) +[![Go Report Card](https://goreportcard.com/badge/mattn/go-isatty)](https://goreportcard.com/report/mattn/go-isatty) + +isatty for golang + +## Usage + +```go +package main + +import ( + "fmt" + "github.com/mattn/go-isatty" + "os" +) + +func main() { + if isatty.IsTerminal(os.Stdout.Fd()) { + fmt.Println("Is Terminal") + } else if isatty.IsCygwinTerminal(os.Stdout.Fd()) { + fmt.Println("Is Cygwin/MSYS2 Terminal") + } else { + fmt.Println("Is Not Terminal") + } +} +``` + +## Installation + +``` +$ go get github.com/mattn/go-isatty +``` + +## License + +MIT + +## Author + +Yasuhiro Matsumoto (a.k.a mattn) + +## Thanks + +* k-takata: base idea for IsCygwinTerminal + + https://github.com/k-takata/go-iscygpty diff --git a/vendor/github.com/mattn/go-isatty/doc.go b/vendor/github.com/mattn/go-isatty/doc.go new file mode 100644 index 000000000..17d4f90eb --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/doc.go @@ -0,0 +1,2 @@ +// Package isatty implements interface to isatty +package isatty diff --git a/vendor/github.com/mattn/go-isatty/go.mod b/vendor/github.com/mattn/go-isatty/go.mod new file mode 100644 index 000000000..605c4c221 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/go.mod @@ -0,0 +1,5 @@ +module github.com/mattn/go-isatty + +go 1.12 + +require golang.org/x/sys v0.0.0-20200116001909-b77594299b42 diff --git a/vendor/github.com/mattn/go-isatty/go.sum b/vendor/github.com/mattn/go-isatty/go.sum new file mode 100644 index 000000000..912e29cbc --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/go.sum @@ -0,0 +1,2 @@ +golang.org/x/sys v0.0.0-20200116001909-b77594299b42 h1:vEOn+mP2zCOVzKckCZy6YsCtDblrpj/w7B9nxGNELpg= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/vendor/github.com/mattn/go-isatty/go.test.sh b/vendor/github.com/mattn/go-isatty/go.test.sh new file mode 100644 index 000000000..012162b07 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/go.test.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +set -e +echo "" > coverage.txt + +for d in $(go list ./... | grep -v vendor); do + go test -race -coverprofile=profile.out -covermode=atomic "$d" + if [ -f profile.out ]; then + cat profile.out >> coverage.txt + rm profile.out + fi +done diff --git a/vendor/github.com/mattn/go-isatty/isatty_bsd.go b/vendor/github.com/mattn/go-isatty/isatty_bsd.go new file mode 100644 index 000000000..711f28808 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_bsd.go @@ -0,0 +1,18 @@ +// +build darwin freebsd openbsd netbsd dragonfly +// +build !appengine + +package isatty + +import "golang.org/x/sys/unix" + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + _, err := unix.IoctlGetTermios(int(fd), unix.TIOCGETA) + return err == nil +} + +// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_others.go b/vendor/github.com/mattn/go-isatty/isatty_others.go new file mode 100644 index 000000000..ff714a376 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_others.go @@ -0,0 +1,15 @@ +// +build appengine js nacl + +package isatty + +// IsTerminal returns true if the file descriptor is terminal which +// is always false on js and appengine classic which is a sandboxed PaaS. +func IsTerminal(fd uintptr) bool { + return false +} + +// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_plan9.go b/vendor/github.com/mattn/go-isatty/isatty_plan9.go new file mode 100644 index 000000000..c5b6e0c08 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_plan9.go @@ -0,0 +1,22 @@ +// +build plan9 + +package isatty + +import ( + "syscall" +) + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(fd uintptr) bool { + path, err := syscall.Fd2path(int(fd)) + if err != nil { + return false + } + return path == "/dev/cons" || path == "/mnt/term/dev/cons" +} + +// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_solaris.go b/vendor/github.com/mattn/go-isatty/isatty_solaris.go new file mode 100644 index 000000000..bdd5c79a0 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_solaris.go @@ -0,0 +1,22 @@ +// +build solaris +// +build !appengine + +package isatty + +import ( + "golang.org/x/sys/unix" +) + +// IsTerminal returns true if the given file descriptor is a terminal. +// see: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libbc/libc/gen/common/isatty.c +func IsTerminal(fd uintptr) bool { + var termio unix.Termio + err := unix.IoctlSetTermio(int(fd), unix.TCGETA, &termio) + return err == nil +} + +// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_tcgets.go b/vendor/github.com/mattn/go-isatty/isatty_tcgets.go new file mode 100644 index 000000000..31a1ca973 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_tcgets.go @@ -0,0 +1,18 @@ +// +build linux aix +// +build !appengine + +package isatty + +import "golang.org/x/sys/unix" + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + _, err := unix.IoctlGetTermios(int(fd), unix.TCGETS) + return err == nil +} + +// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_windows.go b/vendor/github.com/mattn/go-isatty/isatty_windows.go new file mode 100644 index 000000000..1fa869154 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_windows.go @@ -0,0 +1,125 @@ +// +build windows +// +build !appengine + +package isatty + +import ( + "errors" + "strings" + "syscall" + "unicode/utf16" + "unsafe" +) + +const ( + objectNameInfo uintptr = 1 + fileNameInfo = 2 + fileTypePipe = 3 +) + +var ( + kernel32 = syscall.NewLazyDLL("kernel32.dll") + ntdll = syscall.NewLazyDLL("ntdll.dll") + procGetConsoleMode = kernel32.NewProc("GetConsoleMode") + procGetFileInformationByHandleEx = kernel32.NewProc("GetFileInformationByHandleEx") + procGetFileType = kernel32.NewProc("GetFileType") + procNtQueryObject = ntdll.NewProc("NtQueryObject") +) + +func init() { + // Check if GetFileInformationByHandleEx is available. + if procGetFileInformationByHandleEx.Find() != nil { + procGetFileInformationByHandleEx = nil + } +} + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + var st uint32 + r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0) + return r != 0 && e == 0 +} + +// Check pipe name is used for cygwin/msys2 pty. +// Cygwin/MSYS2 PTY has a name like: +// \{cygwin,msys}-XXXXXXXXXXXXXXXX-ptyN-{from,to}-master +func isCygwinPipeName(name string) bool { + token := strings.Split(name, "-") + if len(token) < 5 { + return false + } + + if token[0] != `\msys` && + token[0] != `\cygwin` && + token[0] != `\Device\NamedPipe\msys` && + token[0] != `\Device\NamedPipe\cygwin` { + return false + } + + if token[1] == "" { + return false + } + + if !strings.HasPrefix(token[2], "pty") { + return false + } + + if token[3] != `from` && token[3] != `to` { + return false + } + + if token[4] != "master" { + return false + } + + return true +} + +// getFileNameByHandle use the undocomented ntdll NtQueryObject to get file full name from file handler +// since GetFileInformationByHandleEx is not avilable under windows Vista and still some old fashion +// guys are using Windows XP, this is a workaround for those guys, it will also work on system from +// Windows vista to 10 +// see https://stackoverflow.com/a/18792477 for details +func getFileNameByHandle(fd uintptr) (string, error) { + if procNtQueryObject == nil { + return "", errors.New("ntdll.dll: NtQueryObject not supported") + } + + var buf [4 + syscall.MAX_PATH]uint16 + var result int + r, _, e := syscall.Syscall6(procNtQueryObject.Addr(), 5, + fd, objectNameInfo, uintptr(unsafe.Pointer(&buf)), uintptr(2*len(buf)), uintptr(unsafe.Pointer(&result)), 0) + if r != 0 { + return "", e + } + return string(utf16.Decode(buf[4 : 4+buf[0]/2])), nil +} + +// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2 +// terminal. +func IsCygwinTerminal(fd uintptr) bool { + if procGetFileInformationByHandleEx == nil { + name, err := getFileNameByHandle(fd) + if err != nil { + return false + } + return isCygwinPipeName(name) + } + + // Cygwin/msys's pty is a pipe. + ft, _, e := syscall.Syscall(procGetFileType.Addr(), 1, fd, 0, 0) + if ft != fileTypePipe || e != 0 { + return false + } + + var buf [2 + syscall.MAX_PATH]uint16 + r, _, e := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(), + 4, fd, fileNameInfo, uintptr(unsafe.Pointer(&buf)), + uintptr(len(buf)*2), 0, 0) + if r == 0 || e != 0 { + return false + } + + l := *(*uint32)(unsafe.Pointer(&buf)) + return isCygwinPipeName(string(utf16.Decode(buf[2 : 2+l/2]))) +} diff --git a/vendor/github.com/mattn/go-isatty/renovate.json b/vendor/github.com/mattn/go-isatty/renovate.json new file mode 100644 index 000000000..5ae9d96b7 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/renovate.json @@ -0,0 +1,8 @@ +{ + "extends": [ + "config:base" + ], + "postUpdateOptions": [ + "gomodTidy" + ] +} diff --git a/vendor/github.com/mattn/go-runewidth/.travis.yml b/vendor/github.com/mattn/go-runewidth/.travis.yml new file mode 100644 index 000000000..6a21813a3 --- /dev/null +++ b/vendor/github.com/mattn/go-runewidth/.travis.yml @@ -0,0 +1,16 @@ +language: go +sudo: false +go: + - 1.13.x + - tip + +before_install: + - go get -t -v ./... + +script: + - go generate + - git diff --cached --exit-code + - ./go.test.sh + +after_success: + - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/mattn/go-runewidth/LICENSE b/vendor/github.com/mattn/go-runewidth/LICENSE new file mode 100644 index 000000000..91b5cef30 --- /dev/null +++ b/vendor/github.com/mattn/go-runewidth/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Yasuhiro Matsumoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/mattn/go-runewidth/README.md b/vendor/github.com/mattn/go-runewidth/README.md new file mode 100644 index 000000000..aa56ab96c --- /dev/null +++ b/vendor/github.com/mattn/go-runewidth/README.md @@ -0,0 +1,27 @@ +go-runewidth +============ + +[![Build Status](https://travis-ci.org/mattn/go-runewidth.png?branch=master)](https://travis-ci.org/mattn/go-runewidth) +[![Codecov](https://codecov.io/gh/mattn/go-runewidth/branch/master/graph/badge.svg)](https://codecov.io/gh/mattn/go-runewidth) +[![GoDoc](https://godoc.org/github.com/mattn/go-runewidth?status.svg)](http://godoc.org/github.com/mattn/go-runewidth) +[![Go Report Card](https://goreportcard.com/badge/github.com/mattn/go-runewidth)](https://goreportcard.com/report/github.com/mattn/go-runewidth) + +Provides functions to get fixed width of the character or string. + +Usage +----- + +```go +runewidth.StringWidth("つのだ☆HIRO") == 12 +``` + + +Author +------ + +Yasuhiro Matsumoto + +License +------- + +under the MIT License: http://mattn.mit-license.org/2013 diff --git a/vendor/github.com/mattn/go-runewidth/go.mod b/vendor/github.com/mattn/go-runewidth/go.mod new file mode 100644 index 000000000..8a9d524ec --- /dev/null +++ b/vendor/github.com/mattn/go-runewidth/go.mod @@ -0,0 +1,5 @@ +module github.com/mattn/go-runewidth + +go 1.9 + +require github.com/rivo/uniseg v0.1.0 diff --git a/vendor/github.com/mattn/go-runewidth/go.sum b/vendor/github.com/mattn/go-runewidth/go.sum new file mode 100644 index 000000000..02135660b --- /dev/null +++ b/vendor/github.com/mattn/go-runewidth/go.sum @@ -0,0 +1,2 @@ +github.com/rivo/uniseg v0.1.0 h1:+2KBaVoUmb9XzDsrx/Ct0W/EYOSFf/nWTauy++DprtY= +github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= diff --git a/vendor/github.com/mattn/go-runewidth/go.test.sh b/vendor/github.com/mattn/go-runewidth/go.test.sh new file mode 100644 index 000000000..012162b07 --- /dev/null +++ b/vendor/github.com/mattn/go-runewidth/go.test.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +set -e +echo "" > coverage.txt + +for d in $(go list ./... | grep -v vendor); do + go test -race -coverprofile=profile.out -covermode=atomic "$d" + if [ -f profile.out ]; then + cat profile.out >> coverage.txt + rm profile.out + fi +done diff --git a/vendor/github.com/mattn/go-runewidth/runewidth.go b/vendor/github.com/mattn/go-runewidth/runewidth.go new file mode 100644 index 000000000..f3871a624 --- /dev/null +++ b/vendor/github.com/mattn/go-runewidth/runewidth.go @@ -0,0 +1,241 @@ +package runewidth + +import ( + "os" + + "github.com/rivo/uniseg" +) + +//go:generate go run script/generate.go + +var ( + // EastAsianWidth will be set true if the current locale is CJK + EastAsianWidth bool + + // DefaultCondition is a condition in current locale + DefaultCondition = &Condition{} +) + +func init() { + handleEnv() +} + +func handleEnv() { + env := os.Getenv("RUNEWIDTH_EASTASIAN") + if env == "" { + EastAsianWidth = IsEastAsian() + } else { + EastAsianWidth = env == "1" + } + // update DefaultCondition + DefaultCondition.EastAsianWidth = EastAsianWidth +} + +type interval struct { + first rune + last rune +} + +type table []interval + +func inTables(r rune, ts ...table) bool { + for _, t := range ts { + if inTable(r, t) { + return true + } + } + return false +} + +func inTable(r rune, t table) bool { + if r < t[0].first { + return false + } + + bot := 0 + top := len(t) - 1 + for top >= bot { + mid := (bot + top) >> 1 + + switch { + case t[mid].last < r: + bot = mid + 1 + case t[mid].first > r: + top = mid - 1 + default: + return true + } + } + + return false +} + +var private = table{ + {0x00E000, 0x00F8FF}, {0x0F0000, 0x0FFFFD}, {0x100000, 0x10FFFD}, +} + +var nonprint = table{ + {0x0000, 0x001F}, {0x007F, 0x009F}, {0x00AD, 0x00AD}, + {0x070F, 0x070F}, {0x180B, 0x180E}, {0x200B, 0x200F}, + {0x2028, 0x202E}, {0x206A, 0x206F}, {0xD800, 0xDFFF}, + {0xFEFF, 0xFEFF}, {0xFFF9, 0xFFFB}, {0xFFFE, 0xFFFF}, +} + +// Condition have flag EastAsianWidth whether the current locale is CJK or not. +type Condition struct { + EastAsianWidth bool +} + +// NewCondition return new instance of Condition which is current locale. +func NewCondition() *Condition { + return &Condition{ + EastAsianWidth: EastAsianWidth, + } +} + +// RuneWidth returns the number of cells in r. +// See http://www.unicode.org/reports/tr11/ +func (c *Condition) RuneWidth(r rune) int { + switch { + case r < 0 || r > 0x10FFFF || inTables(r, nonprint, combining, notassigned): + return 0 + case (c.EastAsianWidth && IsAmbiguousWidth(r)) || inTables(r, doublewidth): + return 2 + default: + return 1 + } +} + +// StringWidth return width as you can see +func (c *Condition) StringWidth(s string) (width int) { + g := uniseg.NewGraphemes(s) + for g.Next() { + var chWidth int + for _, r := range g.Runes() { + chWidth = c.RuneWidth(r) + if chWidth > 0 { + break // Our best guess at this point is to use the width of the first non-zero-width rune. + } + } + width += chWidth + } + return +} + +// Truncate return string truncated with w cells +func (c *Condition) Truncate(s string, w int, tail string) string { + if c.StringWidth(s) <= w { + return s + } + w -= c.StringWidth(tail) + var width int + pos := len(s) + g := uniseg.NewGraphemes(s) + for g.Next() { + var chWidth int + for _, r := range g.Runes() { + chWidth = c.RuneWidth(r) + if chWidth > 0 { + break // See StringWidth() for details. + } + } + if width+chWidth > w { + pos, _ = g.Positions() + break + } + width += chWidth + } + return s[:pos] + tail +} + +// Wrap return string wrapped with w cells +func (c *Condition) Wrap(s string, w int) string { + width := 0 + out := "" + for _, r := range []rune(s) { + cw := c.RuneWidth(r) + if r == '\n' { + out += string(r) + width = 0 + continue + } else if width+cw > w { + out += "\n" + width = 0 + out += string(r) + width += cw + continue + } + out += string(r) + width += cw + } + return out +} + +// FillLeft return string filled in left by spaces in w cells +func (c *Condition) FillLeft(s string, w int) string { + width := c.StringWidth(s) + count := w - width + if count > 0 { + b := make([]byte, count) + for i := range b { + b[i] = ' ' + } + return string(b) + s + } + return s +} + +// FillRight return string filled in left by spaces in w cells +func (c *Condition) FillRight(s string, w int) string { + width := c.StringWidth(s) + count := w - width + if count > 0 { + b := make([]byte, count) + for i := range b { + b[i] = ' ' + } + return s + string(b) + } + return s +} + +// RuneWidth returns the number of cells in r. +// See http://www.unicode.org/reports/tr11/ +func RuneWidth(r rune) int { + return DefaultCondition.RuneWidth(r) +} + +// IsAmbiguousWidth returns whether is ambiguous width or not. +func IsAmbiguousWidth(r rune) bool { + return inTables(r, private, ambiguous) +} + +// IsNeutralWidth returns whether is neutral width or not. +func IsNeutralWidth(r rune) bool { + return inTable(r, neutral) +} + +// StringWidth return width as you can see +func StringWidth(s string) (width int) { + return DefaultCondition.StringWidth(s) +} + +// Truncate return string truncated with w cells +func Truncate(s string, w int, tail string) string { + return DefaultCondition.Truncate(s, w, tail) +} + +// Wrap return string wrapped with w cells +func Wrap(s string, w int) string { + return DefaultCondition.Wrap(s, w) +} + +// FillLeft return string filled in left by spaces in w cells +func FillLeft(s string, w int) string { + return DefaultCondition.FillLeft(s, w) +} + +// FillRight return string filled in left by spaces in w cells +func FillRight(s string, w int) string { + return DefaultCondition.FillRight(s, w) +} diff --git a/vendor/github.com/mattn/go-runewidth/runewidth_appengine.go b/vendor/github.com/mattn/go-runewidth/runewidth_appengine.go new file mode 100644 index 000000000..7d99f6e52 --- /dev/null +++ b/vendor/github.com/mattn/go-runewidth/runewidth_appengine.go @@ -0,0 +1,8 @@ +// +build appengine + +package runewidth + +// IsEastAsian return true if the current locale is CJK +func IsEastAsian() bool { + return false +} diff --git a/vendor/github.com/mattn/go-runewidth/runewidth_js.go b/vendor/github.com/mattn/go-runewidth/runewidth_js.go new file mode 100644 index 000000000..c5fdf40ba --- /dev/null +++ b/vendor/github.com/mattn/go-runewidth/runewidth_js.go @@ -0,0 +1,9 @@ +// +build js +// +build !appengine + +package runewidth + +func IsEastAsian() bool { + // TODO: Implement this for the web. Detect east asian in a compatible way, and return true. + return false +} diff --git a/vendor/github.com/mattn/go-runewidth/runewidth_posix.go b/vendor/github.com/mattn/go-runewidth/runewidth_posix.go new file mode 100644 index 000000000..480ad7485 --- /dev/null +++ b/vendor/github.com/mattn/go-runewidth/runewidth_posix.go @@ -0,0 +1,82 @@ +// +build !windows +// +build !js +// +build !appengine + +package runewidth + +import ( + "os" + "regexp" + "strings" +) + +var reLoc = regexp.MustCompile(`^[a-z][a-z][a-z]?(?:_[A-Z][A-Z])?\.(.+)`) + +var mblenTable = map[string]int{ + "utf-8": 6, + "utf8": 6, + "jis": 8, + "eucjp": 3, + "euckr": 2, + "euccn": 2, + "sjis": 2, + "cp932": 2, + "cp51932": 2, + "cp936": 2, + "cp949": 2, + "cp950": 2, + "big5": 2, + "gbk": 2, + "gb2312": 2, +} + +func isEastAsian(locale string) bool { + charset := strings.ToLower(locale) + r := reLoc.FindStringSubmatch(locale) + if len(r) == 2 { + charset = strings.ToLower(r[1]) + } + + if strings.HasSuffix(charset, "@cjk_narrow") { + return false + } + + for pos, b := range []byte(charset) { + if b == '@' { + charset = charset[:pos] + break + } + } + max := 1 + if m, ok := mblenTable[charset]; ok { + max = m + } + if max > 1 && (charset[0] != 'u' || + strings.HasPrefix(locale, "ja") || + strings.HasPrefix(locale, "ko") || + strings.HasPrefix(locale, "zh")) { + return true + } + return false +} + +// IsEastAsian return true if the current locale is CJK +func IsEastAsian() bool { + locale := os.Getenv("LC_ALL") + if locale == "" { + locale = os.Getenv("LC_CTYPE") + } + if locale == "" { + locale = os.Getenv("LANG") + } + + // ignore C locale + if locale == "POSIX" || locale == "C" { + return false + } + if len(locale) > 1 && locale[0] == 'C' && (locale[1] == '.' || locale[1] == '-') { + return false + } + + return isEastAsian(locale) +} diff --git a/vendor/github.com/mattn/go-runewidth/runewidth_table.go b/vendor/github.com/mattn/go-runewidth/runewidth_table.go new file mode 100644 index 000000000..b27d77d89 --- /dev/null +++ b/vendor/github.com/mattn/go-runewidth/runewidth_table.go @@ -0,0 +1,437 @@ +// Code generated by script/generate.go. DO NOT EDIT. + +package runewidth + +var combining = table{ + {0x0300, 0x036F}, {0x0483, 0x0489}, {0x07EB, 0x07F3}, + {0x0C00, 0x0C00}, {0x0C04, 0x0C04}, {0x0D00, 0x0D01}, + {0x135D, 0x135F}, {0x1A7F, 0x1A7F}, {0x1AB0, 0x1AC0}, + {0x1B6B, 0x1B73}, {0x1DC0, 0x1DF9}, {0x1DFB, 0x1DFF}, + {0x20D0, 0x20F0}, {0x2CEF, 0x2CF1}, {0x2DE0, 0x2DFF}, + {0x3099, 0x309A}, {0xA66F, 0xA672}, {0xA674, 0xA67D}, + {0xA69E, 0xA69F}, {0xA6F0, 0xA6F1}, {0xA8E0, 0xA8F1}, + {0xFE20, 0xFE2F}, {0x101FD, 0x101FD}, {0x10376, 0x1037A}, + {0x10EAB, 0x10EAC}, {0x10F46, 0x10F50}, {0x11300, 0x11301}, + {0x1133B, 0x1133C}, {0x11366, 0x1136C}, {0x11370, 0x11374}, + {0x16AF0, 0x16AF4}, {0x1D165, 0x1D169}, {0x1D16D, 0x1D172}, + {0x1D17B, 0x1D182}, {0x1D185, 0x1D18B}, {0x1D1AA, 0x1D1AD}, + {0x1D242, 0x1D244}, {0x1E000, 0x1E006}, {0x1E008, 0x1E018}, + {0x1E01B, 0x1E021}, {0x1E023, 0x1E024}, {0x1E026, 0x1E02A}, + {0x1E8D0, 0x1E8D6}, +} + +var doublewidth = table{ + {0x1100, 0x115F}, {0x231A, 0x231B}, {0x2329, 0x232A}, + {0x23E9, 0x23EC}, {0x23F0, 0x23F0}, {0x23F3, 0x23F3}, + {0x25FD, 0x25FE}, {0x2614, 0x2615}, {0x2648, 0x2653}, + {0x267F, 0x267F}, {0x2693, 0x2693}, {0x26A1, 0x26A1}, + {0x26AA, 0x26AB}, {0x26BD, 0x26BE}, {0x26C4, 0x26C5}, + {0x26CE, 0x26CE}, {0x26D4, 0x26D4}, {0x26EA, 0x26EA}, + {0x26F2, 0x26F3}, {0x26F5, 0x26F5}, {0x26FA, 0x26FA}, + {0x26FD, 0x26FD}, {0x2705, 0x2705}, {0x270A, 0x270B}, + {0x2728, 0x2728}, {0x274C, 0x274C}, {0x274E, 0x274E}, + {0x2753, 0x2755}, {0x2757, 0x2757}, {0x2795, 0x2797}, + {0x27B0, 0x27B0}, {0x27BF, 0x27BF}, {0x2B1B, 0x2B1C}, + {0x2B50, 0x2B50}, {0x2B55, 0x2B55}, {0x2E80, 0x2E99}, + {0x2E9B, 0x2EF3}, {0x2F00, 0x2FD5}, {0x2FF0, 0x2FFB}, + {0x3000, 0x303E}, {0x3041, 0x3096}, {0x3099, 0x30FF}, + {0x3105, 0x312F}, {0x3131, 0x318E}, {0x3190, 0x31E3}, + {0x31F0, 0x321E}, {0x3220, 0x3247}, {0x3250, 0x4DBF}, + {0x4E00, 0xA48C}, {0xA490, 0xA4C6}, {0xA960, 0xA97C}, + {0xAC00, 0xD7A3}, {0xF900, 0xFAFF}, {0xFE10, 0xFE19}, + {0xFE30, 0xFE52}, {0xFE54, 0xFE66}, {0xFE68, 0xFE6B}, + {0xFF01, 0xFF60}, {0xFFE0, 0xFFE6}, {0x16FE0, 0x16FE4}, + {0x16FF0, 0x16FF1}, {0x17000, 0x187F7}, {0x18800, 0x18CD5}, + {0x18D00, 0x18D08}, {0x1B000, 0x1B11E}, {0x1B150, 0x1B152}, + {0x1B164, 0x1B167}, {0x1B170, 0x1B2FB}, {0x1F004, 0x1F004}, + {0x1F0CF, 0x1F0CF}, {0x1F18E, 0x1F18E}, {0x1F191, 0x1F19A}, + {0x1F200, 0x1F202}, {0x1F210, 0x1F23B}, {0x1F240, 0x1F248}, + {0x1F250, 0x1F251}, {0x1F260, 0x1F265}, {0x1F300, 0x1F320}, + {0x1F32D, 0x1F335}, {0x1F337, 0x1F37C}, {0x1F37E, 0x1F393}, + {0x1F3A0, 0x1F3CA}, {0x1F3CF, 0x1F3D3}, {0x1F3E0, 0x1F3F0}, + {0x1F3F4, 0x1F3F4}, {0x1F3F8, 0x1F43E}, {0x1F440, 0x1F440}, + {0x1F442, 0x1F4FC}, {0x1F4FF, 0x1F53D}, {0x1F54B, 0x1F54E}, + {0x1F550, 0x1F567}, {0x1F57A, 0x1F57A}, {0x1F595, 0x1F596}, + {0x1F5A4, 0x1F5A4}, {0x1F5FB, 0x1F64F}, {0x1F680, 0x1F6C5}, + {0x1F6CC, 0x1F6CC}, {0x1F6D0, 0x1F6D2}, {0x1F6D5, 0x1F6D7}, + {0x1F6EB, 0x1F6EC}, {0x1F6F4, 0x1F6FC}, {0x1F7E0, 0x1F7EB}, + {0x1F90C, 0x1F93A}, {0x1F93C, 0x1F945}, {0x1F947, 0x1F978}, + {0x1F97A, 0x1F9CB}, {0x1F9CD, 0x1F9FF}, {0x1FA70, 0x1FA74}, + {0x1FA78, 0x1FA7A}, {0x1FA80, 0x1FA86}, {0x1FA90, 0x1FAA8}, + {0x1FAB0, 0x1FAB6}, {0x1FAC0, 0x1FAC2}, {0x1FAD0, 0x1FAD6}, + {0x20000, 0x2FFFD}, {0x30000, 0x3FFFD}, +} + +var ambiguous = table{ + {0x00A1, 0x00A1}, {0x00A4, 0x00A4}, {0x00A7, 0x00A8}, + {0x00AA, 0x00AA}, {0x00AD, 0x00AE}, {0x00B0, 0x00B4}, + {0x00B6, 0x00BA}, {0x00BC, 0x00BF}, {0x00C6, 0x00C6}, + {0x00D0, 0x00D0}, {0x00D7, 0x00D8}, {0x00DE, 0x00E1}, + {0x00E6, 0x00E6}, {0x00E8, 0x00EA}, {0x00EC, 0x00ED}, + {0x00F0, 0x00F0}, {0x00F2, 0x00F3}, {0x00F7, 0x00FA}, + {0x00FC, 0x00FC}, {0x00FE, 0x00FE}, {0x0101, 0x0101}, + {0x0111, 0x0111}, {0x0113, 0x0113}, {0x011B, 0x011B}, + {0x0126, 0x0127}, {0x012B, 0x012B}, {0x0131, 0x0133}, + {0x0138, 0x0138}, {0x013F, 0x0142}, {0x0144, 0x0144}, + {0x0148, 0x014B}, {0x014D, 0x014D}, {0x0152, 0x0153}, + {0x0166, 0x0167}, {0x016B, 0x016B}, {0x01CE, 0x01CE}, + {0x01D0, 0x01D0}, {0x01D2, 0x01D2}, {0x01D4, 0x01D4}, + {0x01D6, 0x01D6}, {0x01D8, 0x01D8}, {0x01DA, 0x01DA}, + {0x01DC, 0x01DC}, {0x0251, 0x0251}, {0x0261, 0x0261}, + {0x02C4, 0x02C4}, {0x02C7, 0x02C7}, {0x02C9, 0x02CB}, + {0x02CD, 0x02CD}, {0x02D0, 0x02D0}, {0x02D8, 0x02DB}, + {0x02DD, 0x02DD}, {0x02DF, 0x02DF}, {0x0300, 0x036F}, + {0x0391, 0x03A1}, {0x03A3, 0x03A9}, {0x03B1, 0x03C1}, + {0x03C3, 0x03C9}, {0x0401, 0x0401}, {0x0410, 0x044F}, + {0x0451, 0x0451}, {0x2010, 0x2010}, {0x2013, 0x2016}, + {0x2018, 0x2019}, {0x201C, 0x201D}, {0x2020, 0x2022}, + {0x2024, 0x2027}, {0x2030, 0x2030}, {0x2032, 0x2033}, + {0x2035, 0x2035}, {0x203B, 0x203B}, {0x203E, 0x203E}, + {0x2074, 0x2074}, {0x207F, 0x207F}, {0x2081, 0x2084}, + {0x20AC, 0x20AC}, {0x2103, 0x2103}, {0x2105, 0x2105}, + {0x2109, 0x2109}, {0x2113, 0x2113}, {0x2116, 0x2116}, + {0x2121, 0x2122}, {0x2126, 0x2126}, {0x212B, 0x212B}, + {0x2153, 0x2154}, {0x215B, 0x215E}, {0x2160, 0x216B}, + {0x2170, 0x2179}, {0x2189, 0x2189}, {0x2190, 0x2199}, + {0x21B8, 0x21B9}, {0x21D2, 0x21D2}, {0x21D4, 0x21D4}, + {0x21E7, 0x21E7}, {0x2200, 0x2200}, {0x2202, 0x2203}, + {0x2207, 0x2208}, {0x220B, 0x220B}, {0x220F, 0x220F}, + {0x2211, 0x2211}, {0x2215, 0x2215}, {0x221A, 0x221A}, + {0x221D, 0x2220}, {0x2223, 0x2223}, {0x2225, 0x2225}, + {0x2227, 0x222C}, {0x222E, 0x222E}, {0x2234, 0x2237}, + {0x223C, 0x223D}, {0x2248, 0x2248}, {0x224C, 0x224C}, + {0x2252, 0x2252}, {0x2260, 0x2261}, {0x2264, 0x2267}, + {0x226A, 0x226B}, {0x226E, 0x226F}, {0x2282, 0x2283}, + {0x2286, 0x2287}, {0x2295, 0x2295}, {0x2299, 0x2299}, + {0x22A5, 0x22A5}, {0x22BF, 0x22BF}, {0x2312, 0x2312}, + {0x2460, 0x24E9}, {0x24EB, 0x254B}, {0x2550, 0x2573}, + {0x2580, 0x258F}, {0x2592, 0x2595}, {0x25A0, 0x25A1}, + {0x25A3, 0x25A9}, {0x25B2, 0x25B3}, {0x25B6, 0x25B7}, + {0x25BC, 0x25BD}, {0x25C0, 0x25C1}, {0x25C6, 0x25C8}, + {0x25CB, 0x25CB}, {0x25CE, 0x25D1}, {0x25E2, 0x25E5}, + {0x25EF, 0x25EF}, {0x2605, 0x2606}, {0x2609, 0x2609}, + {0x260E, 0x260F}, {0x261C, 0x261C}, {0x261E, 0x261E}, + {0x2640, 0x2640}, {0x2642, 0x2642}, {0x2660, 0x2661}, + {0x2663, 0x2665}, {0x2667, 0x266A}, {0x266C, 0x266D}, + {0x266F, 0x266F}, {0x269E, 0x269F}, {0x26BF, 0x26BF}, + {0x26C6, 0x26CD}, {0x26CF, 0x26D3}, {0x26D5, 0x26E1}, + {0x26E3, 0x26E3}, {0x26E8, 0x26E9}, {0x26EB, 0x26F1}, + {0x26F4, 0x26F4}, {0x26F6, 0x26F9}, {0x26FB, 0x26FC}, + {0x26FE, 0x26FF}, {0x273D, 0x273D}, {0x2776, 0x277F}, + {0x2B56, 0x2B59}, {0x3248, 0x324F}, {0xE000, 0xF8FF}, + {0xFE00, 0xFE0F}, {0xFFFD, 0xFFFD}, {0x1F100, 0x1F10A}, + {0x1F110, 0x1F12D}, {0x1F130, 0x1F169}, {0x1F170, 0x1F18D}, + {0x1F18F, 0x1F190}, {0x1F19B, 0x1F1AC}, {0xE0100, 0xE01EF}, + {0xF0000, 0xFFFFD}, {0x100000, 0x10FFFD}, +} +var notassigned = table{ + {0x27E6, 0x27ED}, {0x2985, 0x2986}, +} + +var neutral = table{ + {0x0000, 0x001F}, {0x007F, 0x00A0}, {0x00A9, 0x00A9}, + {0x00AB, 0x00AB}, {0x00B5, 0x00B5}, {0x00BB, 0x00BB}, + {0x00C0, 0x00C5}, {0x00C7, 0x00CF}, {0x00D1, 0x00D6}, + {0x00D9, 0x00DD}, {0x00E2, 0x00E5}, {0x00E7, 0x00E7}, + {0x00EB, 0x00EB}, {0x00EE, 0x00EF}, {0x00F1, 0x00F1}, + {0x00F4, 0x00F6}, {0x00FB, 0x00FB}, {0x00FD, 0x00FD}, + {0x00FF, 0x0100}, {0x0102, 0x0110}, {0x0112, 0x0112}, + {0x0114, 0x011A}, {0x011C, 0x0125}, {0x0128, 0x012A}, + {0x012C, 0x0130}, {0x0134, 0x0137}, {0x0139, 0x013E}, + {0x0143, 0x0143}, {0x0145, 0x0147}, {0x014C, 0x014C}, + {0x014E, 0x0151}, {0x0154, 0x0165}, {0x0168, 0x016A}, + {0x016C, 0x01CD}, {0x01CF, 0x01CF}, {0x01D1, 0x01D1}, + {0x01D3, 0x01D3}, {0x01D5, 0x01D5}, {0x01D7, 0x01D7}, + {0x01D9, 0x01D9}, {0x01DB, 0x01DB}, {0x01DD, 0x0250}, + {0x0252, 0x0260}, {0x0262, 0x02C3}, {0x02C5, 0x02C6}, + {0x02C8, 0x02C8}, {0x02CC, 0x02CC}, {0x02CE, 0x02CF}, + {0x02D1, 0x02D7}, {0x02DC, 0x02DC}, {0x02DE, 0x02DE}, + {0x02E0, 0x02FF}, {0x0370, 0x0377}, {0x037A, 0x037F}, + {0x0384, 0x038A}, {0x038C, 0x038C}, {0x038E, 0x0390}, + {0x03AA, 0x03B0}, {0x03C2, 0x03C2}, {0x03CA, 0x0400}, + {0x0402, 0x040F}, {0x0450, 0x0450}, {0x0452, 0x052F}, + {0x0531, 0x0556}, {0x0559, 0x058A}, {0x058D, 0x058F}, + {0x0591, 0x05C7}, {0x05D0, 0x05EA}, {0x05EF, 0x05F4}, + {0x0600, 0x061C}, {0x061E, 0x070D}, {0x070F, 0x074A}, + {0x074D, 0x07B1}, {0x07C0, 0x07FA}, {0x07FD, 0x082D}, + {0x0830, 0x083E}, {0x0840, 0x085B}, {0x085E, 0x085E}, + {0x0860, 0x086A}, {0x08A0, 0x08B4}, {0x08B6, 0x08C7}, + {0x08D3, 0x0983}, {0x0985, 0x098C}, {0x098F, 0x0990}, + {0x0993, 0x09A8}, {0x09AA, 0x09B0}, {0x09B2, 0x09B2}, + {0x09B6, 0x09B9}, {0x09BC, 0x09C4}, {0x09C7, 0x09C8}, + {0x09CB, 0x09CE}, {0x09D7, 0x09D7}, {0x09DC, 0x09DD}, + {0x09DF, 0x09E3}, {0x09E6, 0x09FE}, {0x0A01, 0x0A03}, + {0x0A05, 0x0A0A}, {0x0A0F, 0x0A10}, {0x0A13, 0x0A28}, + {0x0A2A, 0x0A30}, {0x0A32, 0x0A33}, {0x0A35, 0x0A36}, + {0x0A38, 0x0A39}, {0x0A3C, 0x0A3C}, {0x0A3E, 0x0A42}, + {0x0A47, 0x0A48}, {0x0A4B, 0x0A4D}, {0x0A51, 0x0A51}, + {0x0A59, 0x0A5C}, {0x0A5E, 0x0A5E}, {0x0A66, 0x0A76}, + {0x0A81, 0x0A83}, {0x0A85, 0x0A8D}, {0x0A8F, 0x0A91}, + {0x0A93, 0x0AA8}, {0x0AAA, 0x0AB0}, {0x0AB2, 0x0AB3}, + {0x0AB5, 0x0AB9}, {0x0ABC, 0x0AC5}, {0x0AC7, 0x0AC9}, + {0x0ACB, 0x0ACD}, {0x0AD0, 0x0AD0}, {0x0AE0, 0x0AE3}, + {0x0AE6, 0x0AF1}, {0x0AF9, 0x0AFF}, {0x0B01, 0x0B03}, + {0x0B05, 0x0B0C}, {0x0B0F, 0x0B10}, {0x0B13, 0x0B28}, + {0x0B2A, 0x0B30}, {0x0B32, 0x0B33}, {0x0B35, 0x0B39}, + {0x0B3C, 0x0B44}, {0x0B47, 0x0B48}, {0x0B4B, 0x0B4D}, + {0x0B55, 0x0B57}, {0x0B5C, 0x0B5D}, {0x0B5F, 0x0B63}, + {0x0B66, 0x0B77}, {0x0B82, 0x0B83}, {0x0B85, 0x0B8A}, + {0x0B8E, 0x0B90}, {0x0B92, 0x0B95}, {0x0B99, 0x0B9A}, + {0x0B9C, 0x0B9C}, {0x0B9E, 0x0B9F}, {0x0BA3, 0x0BA4}, + {0x0BA8, 0x0BAA}, {0x0BAE, 0x0BB9}, {0x0BBE, 0x0BC2}, + {0x0BC6, 0x0BC8}, {0x0BCA, 0x0BCD}, {0x0BD0, 0x0BD0}, + {0x0BD7, 0x0BD7}, {0x0BE6, 0x0BFA}, {0x0C00, 0x0C0C}, + {0x0C0E, 0x0C10}, {0x0C12, 0x0C28}, {0x0C2A, 0x0C39}, + {0x0C3D, 0x0C44}, {0x0C46, 0x0C48}, {0x0C4A, 0x0C4D}, + {0x0C55, 0x0C56}, {0x0C58, 0x0C5A}, {0x0C60, 0x0C63}, + {0x0C66, 0x0C6F}, {0x0C77, 0x0C8C}, {0x0C8E, 0x0C90}, + {0x0C92, 0x0CA8}, {0x0CAA, 0x0CB3}, {0x0CB5, 0x0CB9}, + {0x0CBC, 0x0CC4}, {0x0CC6, 0x0CC8}, {0x0CCA, 0x0CCD}, + {0x0CD5, 0x0CD6}, {0x0CDE, 0x0CDE}, {0x0CE0, 0x0CE3}, + {0x0CE6, 0x0CEF}, {0x0CF1, 0x0CF2}, {0x0D00, 0x0D0C}, + {0x0D0E, 0x0D10}, {0x0D12, 0x0D44}, {0x0D46, 0x0D48}, + {0x0D4A, 0x0D4F}, {0x0D54, 0x0D63}, {0x0D66, 0x0D7F}, + {0x0D81, 0x0D83}, {0x0D85, 0x0D96}, {0x0D9A, 0x0DB1}, + {0x0DB3, 0x0DBB}, {0x0DBD, 0x0DBD}, {0x0DC0, 0x0DC6}, + {0x0DCA, 0x0DCA}, {0x0DCF, 0x0DD4}, {0x0DD6, 0x0DD6}, + {0x0DD8, 0x0DDF}, {0x0DE6, 0x0DEF}, {0x0DF2, 0x0DF4}, + {0x0E01, 0x0E3A}, {0x0E3F, 0x0E5B}, {0x0E81, 0x0E82}, + {0x0E84, 0x0E84}, {0x0E86, 0x0E8A}, {0x0E8C, 0x0EA3}, + {0x0EA5, 0x0EA5}, {0x0EA7, 0x0EBD}, {0x0EC0, 0x0EC4}, + {0x0EC6, 0x0EC6}, {0x0EC8, 0x0ECD}, {0x0ED0, 0x0ED9}, + {0x0EDC, 0x0EDF}, {0x0F00, 0x0F47}, {0x0F49, 0x0F6C}, + {0x0F71, 0x0F97}, {0x0F99, 0x0FBC}, {0x0FBE, 0x0FCC}, + {0x0FCE, 0x0FDA}, {0x1000, 0x10C5}, {0x10C7, 0x10C7}, + {0x10CD, 0x10CD}, {0x10D0, 0x10FF}, {0x1160, 0x1248}, + {0x124A, 0x124D}, {0x1250, 0x1256}, {0x1258, 0x1258}, + {0x125A, 0x125D}, {0x1260, 0x1288}, {0x128A, 0x128D}, + {0x1290, 0x12B0}, {0x12B2, 0x12B5}, {0x12B8, 0x12BE}, + {0x12C0, 0x12C0}, {0x12C2, 0x12C5}, {0x12C8, 0x12D6}, + {0x12D8, 0x1310}, {0x1312, 0x1315}, {0x1318, 0x135A}, + {0x135D, 0x137C}, {0x1380, 0x1399}, {0x13A0, 0x13F5}, + {0x13F8, 0x13FD}, {0x1400, 0x169C}, {0x16A0, 0x16F8}, + {0x1700, 0x170C}, {0x170E, 0x1714}, {0x1720, 0x1736}, + {0x1740, 0x1753}, {0x1760, 0x176C}, {0x176E, 0x1770}, + {0x1772, 0x1773}, {0x1780, 0x17DD}, {0x17E0, 0x17E9}, + {0x17F0, 0x17F9}, {0x1800, 0x180E}, {0x1810, 0x1819}, + {0x1820, 0x1878}, {0x1880, 0x18AA}, {0x18B0, 0x18F5}, + {0x1900, 0x191E}, {0x1920, 0x192B}, {0x1930, 0x193B}, + {0x1940, 0x1940}, {0x1944, 0x196D}, {0x1970, 0x1974}, + {0x1980, 0x19AB}, {0x19B0, 0x19C9}, {0x19D0, 0x19DA}, + {0x19DE, 0x1A1B}, {0x1A1E, 0x1A5E}, {0x1A60, 0x1A7C}, + {0x1A7F, 0x1A89}, {0x1A90, 0x1A99}, {0x1AA0, 0x1AAD}, + {0x1AB0, 0x1AC0}, {0x1B00, 0x1B4B}, {0x1B50, 0x1B7C}, + {0x1B80, 0x1BF3}, {0x1BFC, 0x1C37}, {0x1C3B, 0x1C49}, + {0x1C4D, 0x1C88}, {0x1C90, 0x1CBA}, {0x1CBD, 0x1CC7}, + {0x1CD0, 0x1CFA}, {0x1D00, 0x1DF9}, {0x1DFB, 0x1F15}, + {0x1F18, 0x1F1D}, {0x1F20, 0x1F45}, {0x1F48, 0x1F4D}, + {0x1F50, 0x1F57}, {0x1F59, 0x1F59}, {0x1F5B, 0x1F5B}, + {0x1F5D, 0x1F5D}, {0x1F5F, 0x1F7D}, {0x1F80, 0x1FB4}, + {0x1FB6, 0x1FC4}, {0x1FC6, 0x1FD3}, {0x1FD6, 0x1FDB}, + {0x1FDD, 0x1FEF}, {0x1FF2, 0x1FF4}, {0x1FF6, 0x1FFE}, + {0x2000, 0x200F}, {0x2011, 0x2012}, {0x2017, 0x2017}, + {0x201A, 0x201B}, {0x201E, 0x201F}, {0x2023, 0x2023}, + {0x2028, 0x202F}, {0x2031, 0x2031}, {0x2034, 0x2034}, + {0x2036, 0x203A}, {0x203C, 0x203D}, {0x203F, 0x2064}, + {0x2066, 0x2071}, {0x2075, 0x207E}, {0x2080, 0x2080}, + {0x2085, 0x208E}, {0x2090, 0x209C}, {0x20A0, 0x20A8}, + {0x20AA, 0x20AB}, {0x20AD, 0x20BF}, {0x20D0, 0x20F0}, + {0x2100, 0x2102}, {0x2104, 0x2104}, {0x2106, 0x2108}, + {0x210A, 0x2112}, {0x2114, 0x2115}, {0x2117, 0x2120}, + {0x2123, 0x2125}, {0x2127, 0x212A}, {0x212C, 0x2152}, + {0x2155, 0x215A}, {0x215F, 0x215F}, {0x216C, 0x216F}, + {0x217A, 0x2188}, {0x218A, 0x218B}, {0x219A, 0x21B7}, + {0x21BA, 0x21D1}, {0x21D3, 0x21D3}, {0x21D5, 0x21E6}, + {0x21E8, 0x21FF}, {0x2201, 0x2201}, {0x2204, 0x2206}, + {0x2209, 0x220A}, {0x220C, 0x220E}, {0x2210, 0x2210}, + {0x2212, 0x2214}, {0x2216, 0x2219}, {0x221B, 0x221C}, + {0x2221, 0x2222}, {0x2224, 0x2224}, {0x2226, 0x2226}, + {0x222D, 0x222D}, {0x222F, 0x2233}, {0x2238, 0x223B}, + {0x223E, 0x2247}, {0x2249, 0x224B}, {0x224D, 0x2251}, + {0x2253, 0x225F}, {0x2262, 0x2263}, {0x2268, 0x2269}, + {0x226C, 0x226D}, {0x2270, 0x2281}, {0x2284, 0x2285}, + {0x2288, 0x2294}, {0x2296, 0x2298}, {0x229A, 0x22A4}, + {0x22A6, 0x22BE}, {0x22C0, 0x2311}, {0x2313, 0x2319}, + {0x231C, 0x2328}, {0x232B, 0x23E8}, {0x23ED, 0x23EF}, + {0x23F1, 0x23F2}, {0x23F4, 0x2426}, {0x2440, 0x244A}, + {0x24EA, 0x24EA}, {0x254C, 0x254F}, {0x2574, 0x257F}, + {0x2590, 0x2591}, {0x2596, 0x259F}, {0x25A2, 0x25A2}, + {0x25AA, 0x25B1}, {0x25B4, 0x25B5}, {0x25B8, 0x25BB}, + {0x25BE, 0x25BF}, {0x25C2, 0x25C5}, {0x25C9, 0x25CA}, + {0x25CC, 0x25CD}, {0x25D2, 0x25E1}, {0x25E6, 0x25EE}, + {0x25F0, 0x25FC}, {0x25FF, 0x2604}, {0x2607, 0x2608}, + {0x260A, 0x260D}, {0x2610, 0x2613}, {0x2616, 0x261B}, + {0x261D, 0x261D}, {0x261F, 0x263F}, {0x2641, 0x2641}, + {0x2643, 0x2647}, {0x2654, 0x265F}, {0x2662, 0x2662}, + {0x2666, 0x2666}, {0x266B, 0x266B}, {0x266E, 0x266E}, + {0x2670, 0x267E}, {0x2680, 0x2692}, {0x2694, 0x269D}, + {0x26A0, 0x26A0}, {0x26A2, 0x26A9}, {0x26AC, 0x26BC}, + {0x26C0, 0x26C3}, {0x26E2, 0x26E2}, {0x26E4, 0x26E7}, + {0x2700, 0x2704}, {0x2706, 0x2709}, {0x270C, 0x2727}, + {0x2729, 0x273C}, {0x273E, 0x274B}, {0x274D, 0x274D}, + {0x274F, 0x2752}, {0x2756, 0x2756}, {0x2758, 0x2775}, + {0x2780, 0x2794}, {0x2798, 0x27AF}, {0x27B1, 0x27BE}, + {0x27C0, 0x27E5}, {0x27EE, 0x2984}, {0x2987, 0x2B1A}, + {0x2B1D, 0x2B4F}, {0x2B51, 0x2B54}, {0x2B5A, 0x2B73}, + {0x2B76, 0x2B95}, {0x2B97, 0x2C2E}, {0x2C30, 0x2C5E}, + {0x2C60, 0x2CF3}, {0x2CF9, 0x2D25}, {0x2D27, 0x2D27}, + {0x2D2D, 0x2D2D}, {0x2D30, 0x2D67}, {0x2D6F, 0x2D70}, + {0x2D7F, 0x2D96}, {0x2DA0, 0x2DA6}, {0x2DA8, 0x2DAE}, + {0x2DB0, 0x2DB6}, {0x2DB8, 0x2DBE}, {0x2DC0, 0x2DC6}, + {0x2DC8, 0x2DCE}, {0x2DD0, 0x2DD6}, {0x2DD8, 0x2DDE}, + {0x2DE0, 0x2E52}, {0x303F, 0x303F}, {0x4DC0, 0x4DFF}, + {0xA4D0, 0xA62B}, {0xA640, 0xA6F7}, {0xA700, 0xA7BF}, + {0xA7C2, 0xA7CA}, {0xA7F5, 0xA82C}, {0xA830, 0xA839}, + {0xA840, 0xA877}, {0xA880, 0xA8C5}, {0xA8CE, 0xA8D9}, + {0xA8E0, 0xA953}, {0xA95F, 0xA95F}, {0xA980, 0xA9CD}, + {0xA9CF, 0xA9D9}, {0xA9DE, 0xA9FE}, {0xAA00, 0xAA36}, + {0xAA40, 0xAA4D}, {0xAA50, 0xAA59}, {0xAA5C, 0xAAC2}, + {0xAADB, 0xAAF6}, {0xAB01, 0xAB06}, {0xAB09, 0xAB0E}, + {0xAB11, 0xAB16}, {0xAB20, 0xAB26}, {0xAB28, 0xAB2E}, + {0xAB30, 0xAB6B}, {0xAB70, 0xABED}, {0xABF0, 0xABF9}, + {0xD7B0, 0xD7C6}, {0xD7CB, 0xD7FB}, {0xD800, 0xDFFF}, + {0xFB00, 0xFB06}, {0xFB13, 0xFB17}, {0xFB1D, 0xFB36}, + {0xFB38, 0xFB3C}, {0xFB3E, 0xFB3E}, {0xFB40, 0xFB41}, + {0xFB43, 0xFB44}, {0xFB46, 0xFBC1}, {0xFBD3, 0xFD3F}, + {0xFD50, 0xFD8F}, {0xFD92, 0xFDC7}, {0xFDF0, 0xFDFD}, + {0xFE20, 0xFE2F}, {0xFE70, 0xFE74}, {0xFE76, 0xFEFC}, + {0xFEFF, 0xFEFF}, {0xFFF9, 0xFFFC}, {0x10000, 0x1000B}, + {0x1000D, 0x10026}, {0x10028, 0x1003A}, {0x1003C, 0x1003D}, + {0x1003F, 0x1004D}, {0x10050, 0x1005D}, {0x10080, 0x100FA}, + {0x10100, 0x10102}, {0x10107, 0x10133}, {0x10137, 0x1018E}, + {0x10190, 0x1019C}, {0x101A0, 0x101A0}, {0x101D0, 0x101FD}, + {0x10280, 0x1029C}, {0x102A0, 0x102D0}, {0x102E0, 0x102FB}, + {0x10300, 0x10323}, {0x1032D, 0x1034A}, {0x10350, 0x1037A}, + {0x10380, 0x1039D}, {0x1039F, 0x103C3}, {0x103C8, 0x103D5}, + {0x10400, 0x1049D}, {0x104A0, 0x104A9}, {0x104B0, 0x104D3}, + {0x104D8, 0x104FB}, {0x10500, 0x10527}, {0x10530, 0x10563}, + {0x1056F, 0x1056F}, {0x10600, 0x10736}, {0x10740, 0x10755}, + {0x10760, 0x10767}, {0x10800, 0x10805}, {0x10808, 0x10808}, + {0x1080A, 0x10835}, {0x10837, 0x10838}, {0x1083C, 0x1083C}, + {0x1083F, 0x10855}, {0x10857, 0x1089E}, {0x108A7, 0x108AF}, + {0x108E0, 0x108F2}, {0x108F4, 0x108F5}, {0x108FB, 0x1091B}, + {0x1091F, 0x10939}, {0x1093F, 0x1093F}, {0x10980, 0x109B7}, + {0x109BC, 0x109CF}, {0x109D2, 0x10A03}, {0x10A05, 0x10A06}, + {0x10A0C, 0x10A13}, {0x10A15, 0x10A17}, {0x10A19, 0x10A35}, + {0x10A38, 0x10A3A}, {0x10A3F, 0x10A48}, {0x10A50, 0x10A58}, + {0x10A60, 0x10A9F}, {0x10AC0, 0x10AE6}, {0x10AEB, 0x10AF6}, + {0x10B00, 0x10B35}, {0x10B39, 0x10B55}, {0x10B58, 0x10B72}, + {0x10B78, 0x10B91}, {0x10B99, 0x10B9C}, {0x10BA9, 0x10BAF}, + {0x10C00, 0x10C48}, {0x10C80, 0x10CB2}, {0x10CC0, 0x10CF2}, + {0x10CFA, 0x10D27}, {0x10D30, 0x10D39}, {0x10E60, 0x10E7E}, + {0x10E80, 0x10EA9}, {0x10EAB, 0x10EAD}, {0x10EB0, 0x10EB1}, + {0x10F00, 0x10F27}, {0x10F30, 0x10F59}, {0x10FB0, 0x10FCB}, + {0x10FE0, 0x10FF6}, {0x11000, 0x1104D}, {0x11052, 0x1106F}, + {0x1107F, 0x110C1}, {0x110CD, 0x110CD}, {0x110D0, 0x110E8}, + {0x110F0, 0x110F9}, {0x11100, 0x11134}, {0x11136, 0x11147}, + {0x11150, 0x11176}, {0x11180, 0x111DF}, {0x111E1, 0x111F4}, + {0x11200, 0x11211}, {0x11213, 0x1123E}, {0x11280, 0x11286}, + {0x11288, 0x11288}, {0x1128A, 0x1128D}, {0x1128F, 0x1129D}, + {0x1129F, 0x112A9}, {0x112B0, 0x112EA}, {0x112F0, 0x112F9}, + {0x11300, 0x11303}, {0x11305, 0x1130C}, {0x1130F, 0x11310}, + {0x11313, 0x11328}, {0x1132A, 0x11330}, {0x11332, 0x11333}, + {0x11335, 0x11339}, {0x1133B, 0x11344}, {0x11347, 0x11348}, + {0x1134B, 0x1134D}, {0x11350, 0x11350}, {0x11357, 0x11357}, + {0x1135D, 0x11363}, {0x11366, 0x1136C}, {0x11370, 0x11374}, + {0x11400, 0x1145B}, {0x1145D, 0x11461}, {0x11480, 0x114C7}, + {0x114D0, 0x114D9}, {0x11580, 0x115B5}, {0x115B8, 0x115DD}, + {0x11600, 0x11644}, {0x11650, 0x11659}, {0x11660, 0x1166C}, + {0x11680, 0x116B8}, {0x116C0, 0x116C9}, {0x11700, 0x1171A}, + {0x1171D, 0x1172B}, {0x11730, 0x1173F}, {0x11800, 0x1183B}, + {0x118A0, 0x118F2}, {0x118FF, 0x11906}, {0x11909, 0x11909}, + {0x1190C, 0x11913}, {0x11915, 0x11916}, {0x11918, 0x11935}, + {0x11937, 0x11938}, {0x1193B, 0x11946}, {0x11950, 0x11959}, + {0x119A0, 0x119A7}, {0x119AA, 0x119D7}, {0x119DA, 0x119E4}, + {0x11A00, 0x11A47}, {0x11A50, 0x11AA2}, {0x11AC0, 0x11AF8}, + {0x11C00, 0x11C08}, {0x11C0A, 0x11C36}, {0x11C38, 0x11C45}, + {0x11C50, 0x11C6C}, {0x11C70, 0x11C8F}, {0x11C92, 0x11CA7}, + {0x11CA9, 0x11CB6}, {0x11D00, 0x11D06}, {0x11D08, 0x11D09}, + {0x11D0B, 0x11D36}, {0x11D3A, 0x11D3A}, {0x11D3C, 0x11D3D}, + {0x11D3F, 0x11D47}, {0x11D50, 0x11D59}, {0x11D60, 0x11D65}, + {0x11D67, 0x11D68}, {0x11D6A, 0x11D8E}, {0x11D90, 0x11D91}, + {0x11D93, 0x11D98}, {0x11DA0, 0x11DA9}, {0x11EE0, 0x11EF8}, + {0x11FB0, 0x11FB0}, {0x11FC0, 0x11FF1}, {0x11FFF, 0x12399}, + {0x12400, 0x1246E}, {0x12470, 0x12474}, {0x12480, 0x12543}, + {0x13000, 0x1342E}, {0x13430, 0x13438}, {0x14400, 0x14646}, + {0x16800, 0x16A38}, {0x16A40, 0x16A5E}, {0x16A60, 0x16A69}, + {0x16A6E, 0x16A6F}, {0x16AD0, 0x16AED}, {0x16AF0, 0x16AF5}, + {0x16B00, 0x16B45}, {0x16B50, 0x16B59}, {0x16B5B, 0x16B61}, + {0x16B63, 0x16B77}, {0x16B7D, 0x16B8F}, {0x16E40, 0x16E9A}, + {0x16F00, 0x16F4A}, {0x16F4F, 0x16F87}, {0x16F8F, 0x16F9F}, + {0x1BC00, 0x1BC6A}, {0x1BC70, 0x1BC7C}, {0x1BC80, 0x1BC88}, + {0x1BC90, 0x1BC99}, {0x1BC9C, 0x1BCA3}, {0x1D000, 0x1D0F5}, + {0x1D100, 0x1D126}, {0x1D129, 0x1D1E8}, {0x1D200, 0x1D245}, + {0x1D2E0, 0x1D2F3}, {0x1D300, 0x1D356}, {0x1D360, 0x1D378}, + {0x1D400, 0x1D454}, {0x1D456, 0x1D49C}, {0x1D49E, 0x1D49F}, + {0x1D4A2, 0x1D4A2}, {0x1D4A5, 0x1D4A6}, {0x1D4A9, 0x1D4AC}, + {0x1D4AE, 0x1D4B9}, {0x1D4BB, 0x1D4BB}, {0x1D4BD, 0x1D4C3}, + {0x1D4C5, 0x1D505}, {0x1D507, 0x1D50A}, {0x1D50D, 0x1D514}, + {0x1D516, 0x1D51C}, {0x1D51E, 0x1D539}, {0x1D53B, 0x1D53E}, + {0x1D540, 0x1D544}, {0x1D546, 0x1D546}, {0x1D54A, 0x1D550}, + {0x1D552, 0x1D6A5}, {0x1D6A8, 0x1D7CB}, {0x1D7CE, 0x1DA8B}, + {0x1DA9B, 0x1DA9F}, {0x1DAA1, 0x1DAAF}, {0x1E000, 0x1E006}, + {0x1E008, 0x1E018}, {0x1E01B, 0x1E021}, {0x1E023, 0x1E024}, + {0x1E026, 0x1E02A}, {0x1E100, 0x1E12C}, {0x1E130, 0x1E13D}, + {0x1E140, 0x1E149}, {0x1E14E, 0x1E14F}, {0x1E2C0, 0x1E2F9}, + {0x1E2FF, 0x1E2FF}, {0x1E800, 0x1E8C4}, {0x1E8C7, 0x1E8D6}, + {0x1E900, 0x1E94B}, {0x1E950, 0x1E959}, {0x1E95E, 0x1E95F}, + {0x1EC71, 0x1ECB4}, {0x1ED01, 0x1ED3D}, {0x1EE00, 0x1EE03}, + {0x1EE05, 0x1EE1F}, {0x1EE21, 0x1EE22}, {0x1EE24, 0x1EE24}, + {0x1EE27, 0x1EE27}, {0x1EE29, 0x1EE32}, {0x1EE34, 0x1EE37}, + {0x1EE39, 0x1EE39}, {0x1EE3B, 0x1EE3B}, {0x1EE42, 0x1EE42}, + {0x1EE47, 0x1EE47}, {0x1EE49, 0x1EE49}, {0x1EE4B, 0x1EE4B}, + {0x1EE4D, 0x1EE4F}, {0x1EE51, 0x1EE52}, {0x1EE54, 0x1EE54}, + {0x1EE57, 0x1EE57}, {0x1EE59, 0x1EE59}, {0x1EE5B, 0x1EE5B}, + {0x1EE5D, 0x1EE5D}, {0x1EE5F, 0x1EE5F}, {0x1EE61, 0x1EE62}, + {0x1EE64, 0x1EE64}, {0x1EE67, 0x1EE6A}, {0x1EE6C, 0x1EE72}, + {0x1EE74, 0x1EE77}, {0x1EE79, 0x1EE7C}, {0x1EE7E, 0x1EE7E}, + {0x1EE80, 0x1EE89}, {0x1EE8B, 0x1EE9B}, {0x1EEA1, 0x1EEA3}, + {0x1EEA5, 0x1EEA9}, {0x1EEAB, 0x1EEBB}, {0x1EEF0, 0x1EEF1}, + {0x1F000, 0x1F003}, {0x1F005, 0x1F02B}, {0x1F030, 0x1F093}, + {0x1F0A0, 0x1F0AE}, {0x1F0B1, 0x1F0BF}, {0x1F0C1, 0x1F0CE}, + {0x1F0D1, 0x1F0F5}, {0x1F10B, 0x1F10F}, {0x1F12E, 0x1F12F}, + {0x1F16A, 0x1F16F}, {0x1F1AD, 0x1F1AD}, {0x1F1E6, 0x1F1FF}, + {0x1F321, 0x1F32C}, {0x1F336, 0x1F336}, {0x1F37D, 0x1F37D}, + {0x1F394, 0x1F39F}, {0x1F3CB, 0x1F3CE}, {0x1F3D4, 0x1F3DF}, + {0x1F3F1, 0x1F3F3}, {0x1F3F5, 0x1F3F7}, {0x1F43F, 0x1F43F}, + {0x1F441, 0x1F441}, {0x1F4FD, 0x1F4FE}, {0x1F53E, 0x1F54A}, + {0x1F54F, 0x1F54F}, {0x1F568, 0x1F579}, {0x1F57B, 0x1F594}, + {0x1F597, 0x1F5A3}, {0x1F5A5, 0x1F5FA}, {0x1F650, 0x1F67F}, + {0x1F6C6, 0x1F6CB}, {0x1F6CD, 0x1F6CF}, {0x1F6D3, 0x1F6D4}, + {0x1F6E0, 0x1F6EA}, {0x1F6F0, 0x1F6F3}, {0x1F700, 0x1F773}, + {0x1F780, 0x1F7D8}, {0x1F800, 0x1F80B}, {0x1F810, 0x1F847}, + {0x1F850, 0x1F859}, {0x1F860, 0x1F887}, {0x1F890, 0x1F8AD}, + {0x1F8B0, 0x1F8B1}, {0x1F900, 0x1F90B}, {0x1F93B, 0x1F93B}, + {0x1F946, 0x1F946}, {0x1FA00, 0x1FA53}, {0x1FA60, 0x1FA6D}, + {0x1FB00, 0x1FB92}, {0x1FB94, 0x1FBCA}, {0x1FBF0, 0x1FBF9}, + {0xE0001, 0xE0001}, {0xE0020, 0xE007F}, +} + +var emoji = table{ + {0x203C, 0x203C}, {0x2049, 0x2049}, {0x2122, 0x2122}, + {0x2139, 0x2139}, {0x2194, 0x2199}, {0x21A9, 0x21AA}, + {0x231A, 0x231B}, {0x2328, 0x2328}, {0x2388, 0x2388}, + {0x23CF, 0x23CF}, {0x23E9, 0x23F3}, {0x23F8, 0x23FA}, + {0x24C2, 0x24C2}, {0x25AA, 0x25AB}, {0x25B6, 0x25B6}, + {0x25C0, 0x25C0}, {0x25FB, 0x25FE}, {0x2600, 0x2605}, + {0x2607, 0x2612}, {0x2614, 0x2685}, {0x2690, 0x2705}, + {0x2708, 0x2712}, {0x2714, 0x2714}, {0x2716, 0x2716}, + {0x271D, 0x271D}, {0x2721, 0x2721}, {0x2728, 0x2728}, + {0x2733, 0x2734}, {0x2744, 0x2744}, {0x2747, 0x2747}, + {0x274C, 0x274C}, {0x274E, 0x274E}, {0x2753, 0x2755}, + {0x2757, 0x2757}, {0x2763, 0x2767}, {0x2795, 0x2797}, + {0x27A1, 0x27A1}, {0x27B0, 0x27B0}, {0x27BF, 0x27BF}, + {0x2934, 0x2935}, {0x2B05, 0x2B07}, {0x2B1B, 0x2B1C}, + {0x2B50, 0x2B50}, {0x2B55, 0x2B55}, {0x3030, 0x3030}, + {0x303D, 0x303D}, {0x3297, 0x3297}, {0x3299, 0x3299}, + {0x1F000, 0x1F0FF}, {0x1F10D, 0x1F10F}, {0x1F12F, 0x1F12F}, + {0x1F16C, 0x1F171}, {0x1F17E, 0x1F17F}, {0x1F18E, 0x1F18E}, + {0x1F191, 0x1F19A}, {0x1F1AD, 0x1F1E5}, {0x1F201, 0x1F20F}, + {0x1F21A, 0x1F21A}, {0x1F22F, 0x1F22F}, {0x1F232, 0x1F23A}, + {0x1F23C, 0x1F23F}, {0x1F249, 0x1F3FA}, {0x1F400, 0x1F53D}, + {0x1F546, 0x1F64F}, {0x1F680, 0x1F6FF}, {0x1F774, 0x1F77F}, + {0x1F7D5, 0x1F7FF}, {0x1F80C, 0x1F80F}, {0x1F848, 0x1F84F}, + {0x1F85A, 0x1F85F}, {0x1F888, 0x1F88F}, {0x1F8AE, 0x1F8FF}, + {0x1F90C, 0x1F93A}, {0x1F93C, 0x1F945}, {0x1F947, 0x1FAFF}, + {0x1FC00, 0x1FFFD}, +} diff --git a/vendor/github.com/mattn/go-runewidth/runewidth_windows.go b/vendor/github.com/mattn/go-runewidth/runewidth_windows.go new file mode 100644 index 000000000..d6a61777d --- /dev/null +++ b/vendor/github.com/mattn/go-runewidth/runewidth_windows.go @@ -0,0 +1,28 @@ +// +build windows +// +build !appengine + +package runewidth + +import ( + "syscall" +) + +var ( + kernel32 = syscall.NewLazyDLL("kernel32") + procGetConsoleOutputCP = kernel32.NewProc("GetConsoleOutputCP") +) + +// IsEastAsian return true if the current locale is CJK +func IsEastAsian() bool { + r1, _, _ := procGetConsoleOutputCP.Call() + if r1 == 0 { + return false + } + + switch int(r1) { + case 932, 51932, 936, 949, 950: + return true + } + + return false +} diff --git a/vendor/github.com/rivo/uniseg/LICENSE.txt b/vendor/github.com/rivo/uniseg/LICENSE.txt new file mode 100644 index 000000000..5040f1ef8 --- /dev/null +++ b/vendor/github.com/rivo/uniseg/LICENSE.txt @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2019 Oliver Kuederle + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/rivo/uniseg/README.md b/vendor/github.com/rivo/uniseg/README.md new file mode 100644 index 000000000..f8da293e1 --- /dev/null +++ b/vendor/github.com/rivo/uniseg/README.md @@ -0,0 +1,62 @@ +# Unicode Text Segmentation for Go + +[![Godoc Reference](https://img.shields.io/badge/godoc-reference-blue.svg)](https://godoc.org/github.com/rivo/uniseg) +[![Go Report](https://img.shields.io/badge/go%20report-A%2B-brightgreen.svg)](https://goreportcard.com/report/github.com/rivo/uniseg) + +This Go package implements Unicode Text Segmentation according to [Unicode Standard Annex #29](http://unicode.org/reports/tr29/) (Unicode version 12.0.0). + +At this point, only the determination of grapheme cluster boundaries is implemented. + +## Background + +In Go, [strings are read-only slices of bytes](https://blog.golang.org/strings). They can be turned into Unicode code points using the `for` loop or by casting: `[]rune(str)`. However, multiple code points may be combined into one user-perceived character or what the Unicode specification calls "grapheme cluster". Here are some examples: + +|String|Bytes (UTF-8)|Code points (runes)|Grapheme clusters| +|-|-|-|-| +|Käse|6 bytes: `4b 61 cc 88 73 65`|5 code points: `4b 61 308 73 65`|4 clusters: `[4b],[61 308],[73],[65]`| +|🏳️‍🌈|14 bytes: `f0 9f 8f b3 ef b8 8f e2 80 8d f0 9f 8c 88`|4 code points: `1f3f3 fe0f 200d 1f308`|1 cluster: `[1f3f3 fe0f 200d 1f308]`| +|🇩🇪|8 bytes: `f0 9f 87 a9 f0 9f 87 aa`|2 code points: `1f1e9 1f1ea`|1 cluster: `[1f1e9 1f1ea]`| + +This package provides a tool to iterate over these grapheme clusters. This may be used to determine the number of user-perceived characters, to split strings in their intended places, or to extract individual characters which form a unit. + +## Installation + +```bash +go get github.com/rivo/uniseg +``` + +## Basic Example + +```go +package uniseg + +import ( + "fmt" + + "github.com/rivo/uniseg" +) + +func main() { + gr := uniseg.NewGraphemes("👍🏼!") + for gr.Next() { + fmt.Printf("%x ", gr.Runes()) + } + // Output: [1f44d 1f3fc] [21] +} +``` + +## Documentation + +Refer to https://godoc.org/github.com/rivo/uniseg for the package's documentation. + +## Dependencies + +This package does not depend on any packages outside the standard library. + +## Your Feedback + +Add your issue here on GitHub. Feel free to get in touch if you have any questions. + +## Version + +Version tags will be introduced once Golang modules are official. Consider this version 0.1. diff --git a/vendor/github.com/rivo/uniseg/doc.go b/vendor/github.com/rivo/uniseg/doc.go new file mode 100644 index 000000000..60c737d7b --- /dev/null +++ b/vendor/github.com/rivo/uniseg/doc.go @@ -0,0 +1,8 @@ +/* +Package uniseg implements Unicode Text Segmentation according to Unicode +Standard Annex #29 (http://unicode.org/reports/tr29/). + +At this point, only the determination of grapheme cluster boundaries is +implemented. +*/ +package uniseg diff --git a/vendor/github.com/rivo/uniseg/go.mod b/vendor/github.com/rivo/uniseg/go.mod new file mode 100644 index 000000000..a54280b2d --- /dev/null +++ b/vendor/github.com/rivo/uniseg/go.mod @@ -0,0 +1,3 @@ +module github.com/rivo/uniseg + +go 1.12 diff --git a/vendor/github.com/rivo/uniseg/grapheme.go b/vendor/github.com/rivo/uniseg/grapheme.go new file mode 100644 index 000000000..207157f5e --- /dev/null +++ b/vendor/github.com/rivo/uniseg/grapheme.go @@ -0,0 +1,268 @@ +package uniseg + +import "unicode/utf8" + +// The states of the grapheme cluster parser. +const ( + grAny = iota + grCR + grControlLF + grL + grLVV + grLVTT + grPrepend + grExtendedPictographic + grExtendedPictographicZWJ + grRIOdd + grRIEven +) + +// The grapheme cluster parser's breaking instructions. +const ( + grNoBoundary = iota + grBoundary +) + +// The grapheme cluster parser's state transitions. Maps (state, property) to +// (new state, breaking instruction, rule number). The breaking instruction +// always refers to the boundary between the last and next code point. +// +// This map is queried as follows: +// +// 1. Find specific state + specific property. Stop if found. +// 2. Find specific state + any property. +// 3. Find any state + specific property. +// 4. If only (2) or (3) (but not both) was found, stop. +// 5. If both (2) and (3) were found, use state and breaking instruction from +// the transition with the lower rule number, prefer (3) if rule numbers +// are equal. Stop. +// 6. Assume grAny and grBoundary. +var grTransitions = map[[2]int][3]int{ + // GB5 + {grAny, prCR}: {grCR, grBoundary, 50}, + {grAny, prLF}: {grControlLF, grBoundary, 50}, + {grAny, prControl}: {grControlLF, grBoundary, 50}, + + // GB4 + {grCR, prAny}: {grAny, grBoundary, 40}, + {grControlLF, prAny}: {grAny, grBoundary, 40}, + + // GB3. + {grCR, prLF}: {grAny, grNoBoundary, 30}, + + // GB6. + {grAny, prL}: {grL, grBoundary, 9990}, + {grL, prL}: {grL, grNoBoundary, 60}, + {grL, prV}: {grLVV, grNoBoundary, 60}, + {grL, prLV}: {grLVV, grNoBoundary, 60}, + {grL, prLVT}: {grLVTT, grNoBoundary, 60}, + + // GB7. + {grAny, prLV}: {grLVV, grBoundary, 9990}, + {grAny, prV}: {grLVV, grBoundary, 9990}, + {grLVV, prV}: {grLVV, grNoBoundary, 70}, + {grLVV, prT}: {grLVTT, grNoBoundary, 70}, + + // GB8. + {grAny, prLVT}: {grLVTT, grBoundary, 9990}, + {grAny, prT}: {grLVTT, grBoundary, 9990}, + {grLVTT, prT}: {grLVTT, grNoBoundary, 80}, + + // GB9. + {grAny, prExtend}: {grAny, grNoBoundary, 90}, + {grAny, prZWJ}: {grAny, grNoBoundary, 90}, + + // GB9a. + {grAny, prSpacingMark}: {grAny, grNoBoundary, 91}, + + // GB9b. + {grAny, prPreprend}: {grPrepend, grBoundary, 9990}, + {grPrepend, prAny}: {grAny, grNoBoundary, 92}, + + // GB11. + {grAny, prExtendedPictographic}: {grExtendedPictographic, grBoundary, 9990}, + {grExtendedPictographic, prExtend}: {grExtendedPictographic, grNoBoundary, 110}, + {grExtendedPictographic, prZWJ}: {grExtendedPictographicZWJ, grNoBoundary, 110}, + {grExtendedPictographicZWJ, prExtendedPictographic}: {grExtendedPictographic, grNoBoundary, 110}, + + // GB12 / GB13. + {grAny, prRegionalIndicator}: {grRIOdd, grBoundary, 9990}, + {grRIOdd, prRegionalIndicator}: {grRIEven, grNoBoundary, 120}, + {grRIEven, prRegionalIndicator}: {grRIOdd, grBoundary, 120}, +} + +// Graphemes implements an iterator over Unicode extended grapheme clusters, +// specified in the Unicode Standard Annex #29. Grapheme clusters correspond to +// "user-perceived characters". These characters often consist of multiple +// code points (e.g. the "woman kissing woman" emoji consists of 8 code points: +// woman + ZWJ + heavy black heart (2 code points) + ZWJ + kiss mark + ZWJ + +// woman) and the rules described in Annex #29 must be applied to group those +// code points into clusters perceived by the user as one character. +type Graphemes struct { + // The code points over which this class iterates. + codePoints []rune + + // The (byte-based) indices of the code points into the original string plus + // len(original string). Thus, len(indices) = len(codePoints) + 1. + indices []int + + // The current grapheme cluster to be returned. These are indices into + // codePoints/indices. If start == end, we either haven't started iterating + // yet (0) or the iteration has already completed (1). + start, end int + + // The index of the next code point to be parsed. + pos int + + // The current state of the code point parser. + state int +} + +// NewGraphemes returns a new grapheme cluster iterator. +func NewGraphemes(s string) *Graphemes { + l := utf8.RuneCountInString(s) + codePoints := make([]rune, l) + indices := make([]int, l+1) + i := 0 + for pos, r := range s { + codePoints[i] = r + indices[i] = pos + i++ + } + indices[l] = len(s) + g := &Graphemes{ + codePoints: codePoints, + indices: indices, + } + g.Next() // Parse ahead. + return g +} + +// Next advances the iterator by one grapheme cluster and returns false if no +// clusters are left. This function must be called before the first cluster is +// accessed. +func (g *Graphemes) Next() bool { + g.start = g.end + + // The state transition gives us a boundary instruction BEFORE the next code + // point so we always need to stay ahead by one code point. + + // Parse the next code point. + for g.pos <= len(g.codePoints) { + // GB2. + if g.pos == len(g.codePoints) { + g.end = g.pos + g.pos++ + break + } + + // Determine the property of the next character. + nextProperty := property(g.codePoints[g.pos]) + g.pos++ + + // Find the applicable transition. + var boundary bool + transition, ok := grTransitions[[2]int{g.state, nextProperty}] + if ok { + // We have a specific transition. We'll use it. + g.state = transition[0] + boundary = transition[1] == grBoundary + } else { + // No specific transition found. Try the less specific ones. + transAnyProp, okAnyProp := grTransitions[[2]int{g.state, prAny}] + transAnyState, okAnyState := grTransitions[[2]int{grAny, nextProperty}] + if okAnyProp && okAnyState { + // Both apply. We'll use a mix (see comments for grTransitions). + g.state = transAnyState[0] + boundary = transAnyState[1] == grBoundary + if transAnyProp[2] < transAnyState[2] { + g.state = transAnyProp[0] + boundary = transAnyProp[1] == grBoundary + } + } else if okAnyProp { + // We only have a specific state. + g.state = transAnyProp[0] + boundary = transAnyProp[1] == grBoundary + // This branch will probably never be reached because okAnyState will + // always be true given the current transition map. But we keep it here + // for future modifications to the transition map where this may not be + // true anymore. + } else if okAnyState { + // We only have a specific property. + g.state = transAnyState[0] + boundary = transAnyState[1] == grBoundary + } else { + // No known transition. GB999: Any x Any. + g.state = grAny + boundary = true + } + } + + // If we found a cluster boundary, let's stop here. The current cluster will + // be the one that just ended. + if g.pos-1 == 0 /* GB1 */ || boundary { + g.end = g.pos - 1 + break + } + } + + return g.start != g.end +} + +// Runes returns a slice of runes (code points) which corresponds to the current +// grapheme cluster. If the iterator is already past the end or Next() has not +// yet been called, nil is returned. +func (g *Graphemes) Runes() []rune { + if g.start == g.end { + return nil + } + return g.codePoints[g.start:g.end] +} + +// Str returns a substring of the original string which corresponds to the +// current grapheme cluster. If the iterator is already past the end or Next() +// has not yet been called, an empty string is returned. +func (g *Graphemes) Str() string { + if g.start == g.end { + return "" + } + return string(g.codePoints[g.start:g.end]) +} + +// Bytes returns a byte slice which corresponds to the current grapheme cluster. +// If the iterator is already past the end or Next() has not yet been called, +// nil is returned. +func (g *Graphemes) Bytes() []byte { + if g.start == g.end { + return nil + } + return []byte(string(g.codePoints[g.start:g.end])) +} + +// Positions returns the interval of the current grapheme cluster as byte +// positions into the original string. The first returned value "from" indexes +// the first byte and the second returned value "to" indexes the first byte that +// is not included anymore, i.e. str[from:to] is the current grapheme cluster of +// the original string "str". If Next() has not yet been called, both values are +// 0. If the iterator is already past the end, both values are 1. +func (g *Graphemes) Positions() (int, int) { + return g.indices[g.start], g.indices[g.end] +} + +// Reset puts the iterator into its initial state such that the next call to +// Next() sets it to the first grapheme cluster again. +func (g *Graphemes) Reset() { + g.start, g.end, g.pos, g.state = 0, 0, 0, grAny + g.Next() // Parse ahead again. +} + +// GraphemeClusterCount returns the number of user-perceived characters +// (grapheme clusters) for the given string. To calculate this number, it +// iterates through the string using the Graphemes iterator. +func GraphemeClusterCount(s string) (n int) { + g := NewGraphemes(s) + for g.Next() { + n++ + } + return +} diff --git a/vendor/github.com/rivo/uniseg/properties.go b/vendor/github.com/rivo/uniseg/properties.go new file mode 100644 index 000000000..a75ab5883 --- /dev/null +++ b/vendor/github.com/rivo/uniseg/properties.go @@ -0,0 +1,1658 @@ +package uniseg + +// The unicode properties. Only the ones needed in the context of this package +// are included. +const ( + prAny = iota + prPreprend + prCR + prLF + prControl + prExtend + prRegionalIndicator + prSpacingMark + prL + prV + prT + prLV + prLVT + prZWJ + prExtendedPictographic +) + +// Maps code point ranges to their properties. In the context of this package, +// any code point that is not contained may map to "prAny". The code point +// ranges in this slice are numerically sorted. +// +// These ranges were taken from +// http://www.unicode.org/Public/UCD/latest/ucd/auxiliary/GraphemeBreakProperty.txt +// as well as +// https://unicode.org/Public/emoji/latest/emoji-data.txt +// ("Extended_Pictographic" only) on March 11, 2019. See +// https://www.unicode.org/license.html for the Unicode license agreement. +var codePoints = [][3]int{ + {0x0000, 0x0009, prControl}, // Cc [10] .. + {0x000A, 0x000A, prLF}, // Cc + {0x000B, 0x000C, prControl}, // Cc [2] .. + {0x000D, 0x000D, prCR}, // Cc + {0x000E, 0x001F, prControl}, // Cc [18] .. + {0x007F, 0x009F, prControl}, // Cc [33] .. + {0x00A9, 0x00A9, prExtendedPictographic}, // 1.1 [1] (©️) copyright + {0x00AD, 0x00AD, prControl}, // Cf SOFT HYPHEN + {0x00AE, 0x00AE, prExtendedPictographic}, // 1.1 [1] (®️) registered + {0x0300, 0x036F, prExtend}, // Mn [112] COMBINING GRAVE ACCENT..COMBINING LATIN SMALL LETTER X + {0x0483, 0x0487, prExtend}, // Mn [5] COMBINING CYRILLIC TITLO..COMBINING CYRILLIC POKRYTIE + {0x0488, 0x0489, prExtend}, // Me [2] COMBINING CYRILLIC HUNDRED THOUSANDS SIGN..COMBINING CYRILLIC MILLIONS SIGN + {0x0591, 0x05BD, prExtend}, // Mn [45] HEBREW ACCENT ETNAHTA..HEBREW POINT METEG + {0x05BF, 0x05BF, prExtend}, // Mn HEBREW POINT RAFE + {0x05C1, 0x05C2, prExtend}, // Mn [2] HEBREW POINT SHIN DOT..HEBREW POINT SIN DOT + {0x05C4, 0x05C5, prExtend}, // Mn [2] HEBREW MARK UPPER DOT..HEBREW MARK LOWER DOT + {0x05C7, 0x05C7, prExtend}, // Mn HEBREW POINT QAMATS QATAN + {0x0600, 0x0605, prPreprend}, // Cf [6] ARABIC NUMBER SIGN..ARABIC NUMBER MARK ABOVE + {0x0610, 0x061A, prExtend}, // Mn [11] ARABIC SIGN SALLALLAHOU ALAYHE WASSALLAM..ARABIC SMALL KASRA + {0x061C, 0x061C, prControl}, // Cf ARABIC LETTER MARK + {0x064B, 0x065F, prExtend}, // Mn [21] ARABIC FATHATAN..ARABIC WAVY HAMZA BELOW + {0x0670, 0x0670, prExtend}, // Mn ARABIC LETTER SUPERSCRIPT ALEF + {0x06D6, 0x06DC, prExtend}, // Mn [7] ARABIC SMALL HIGH LIGATURE SAD WITH LAM WITH ALEF MAKSURA..ARABIC SMALL HIGH SEEN + {0x06DD, 0x06DD, prPreprend}, // Cf ARABIC END OF AYAH + {0x06DF, 0x06E4, prExtend}, // Mn [6] ARABIC SMALL HIGH ROUNDED ZERO..ARABIC SMALL HIGH MADDA + {0x06E7, 0x06E8, prExtend}, // Mn [2] ARABIC SMALL HIGH YEH..ARABIC SMALL HIGH NOON + {0x06EA, 0x06ED, prExtend}, // Mn [4] ARABIC EMPTY CENTRE LOW STOP..ARABIC SMALL LOW MEEM + {0x070F, 0x070F, prPreprend}, // Cf SYRIAC ABBREVIATION MARK + {0x0711, 0x0711, prExtend}, // Mn SYRIAC LETTER SUPERSCRIPT ALAPH + {0x0730, 0x074A, prExtend}, // Mn [27] SYRIAC PTHAHA ABOVE..SYRIAC BARREKH + {0x07A6, 0x07B0, prExtend}, // Mn [11] THAANA ABAFILI..THAANA SUKUN + {0x07EB, 0x07F3, prExtend}, // Mn [9] NKO COMBINING SHORT HIGH TONE..NKO COMBINING DOUBLE DOT ABOVE + {0x07FD, 0x07FD, prExtend}, // Mn NKO DANTAYALAN + {0x0816, 0x0819, prExtend}, // Mn [4] SAMARITAN MARK IN..SAMARITAN MARK DAGESH + {0x081B, 0x0823, prExtend}, // Mn [9] SAMARITAN MARK EPENTHETIC YUT..SAMARITAN VOWEL SIGN A + {0x0825, 0x0827, prExtend}, // Mn [3] SAMARITAN VOWEL SIGN SHORT A..SAMARITAN VOWEL SIGN U + {0x0829, 0x082D, prExtend}, // Mn [5] SAMARITAN VOWEL SIGN LONG I..SAMARITAN MARK NEQUDAA + {0x0859, 0x085B, prExtend}, // Mn [3] MANDAIC AFFRICATION MARK..MANDAIC GEMINATION MARK + {0x08D3, 0x08E1, prExtend}, // Mn [15] ARABIC SMALL LOW WAW..ARABIC SMALL HIGH SIGN SAFHA + {0x08E2, 0x08E2, prPreprend}, // Cf ARABIC DISPUTED END OF AYAH + {0x08E3, 0x0902, prExtend}, // Mn [32] ARABIC TURNED DAMMA BELOW..DEVANAGARI SIGN ANUSVARA + {0x0903, 0x0903, prSpacingMark}, // Mc DEVANAGARI SIGN VISARGA + {0x093A, 0x093A, prExtend}, // Mn DEVANAGARI VOWEL SIGN OE + {0x093B, 0x093B, prSpacingMark}, // Mc DEVANAGARI VOWEL SIGN OOE + {0x093C, 0x093C, prExtend}, // Mn DEVANAGARI SIGN NUKTA + {0x093E, 0x0940, prSpacingMark}, // Mc [3] DEVANAGARI VOWEL SIGN AA..DEVANAGARI VOWEL SIGN II + {0x0941, 0x0948, prExtend}, // Mn [8] DEVANAGARI VOWEL SIGN U..DEVANAGARI VOWEL SIGN AI + {0x0949, 0x094C, prSpacingMark}, // Mc [4] DEVANAGARI VOWEL SIGN CANDRA O..DEVANAGARI VOWEL SIGN AU + {0x094D, 0x094D, prExtend}, // Mn DEVANAGARI SIGN VIRAMA + {0x094E, 0x094F, prSpacingMark}, // Mc [2] DEVANAGARI VOWEL SIGN PRISHTHAMATRA E..DEVANAGARI VOWEL SIGN AW + {0x0951, 0x0957, prExtend}, // Mn [7] DEVANAGARI STRESS SIGN UDATTA..DEVANAGARI VOWEL SIGN UUE + {0x0962, 0x0963, prExtend}, // Mn [2] DEVANAGARI VOWEL SIGN VOCALIC L..DEVANAGARI VOWEL SIGN VOCALIC LL + {0x0981, 0x0981, prExtend}, // Mn BENGALI SIGN CANDRABINDU + {0x0982, 0x0983, prSpacingMark}, // Mc [2] BENGALI SIGN ANUSVARA..BENGALI SIGN VISARGA + {0x09BC, 0x09BC, prExtend}, // Mn BENGALI SIGN NUKTA + {0x09BE, 0x09BE, prExtend}, // Mc BENGALI VOWEL SIGN AA + {0x09BF, 0x09C0, prSpacingMark}, // Mc [2] BENGALI VOWEL SIGN I..BENGALI VOWEL SIGN II + {0x09C1, 0x09C4, prExtend}, // Mn [4] BENGALI VOWEL SIGN U..BENGALI VOWEL SIGN VOCALIC RR + {0x09C7, 0x09C8, prSpacingMark}, // Mc [2] BENGALI VOWEL SIGN E..BENGALI VOWEL SIGN AI + {0x09CB, 0x09CC, prSpacingMark}, // Mc [2] BENGALI VOWEL SIGN O..BENGALI VOWEL SIGN AU + {0x09CD, 0x09CD, prExtend}, // Mn BENGALI SIGN VIRAMA + {0x09D7, 0x09D7, prExtend}, // Mc BENGALI AU LENGTH MARK + {0x09E2, 0x09E3, prExtend}, // Mn [2] BENGALI VOWEL SIGN VOCALIC L..BENGALI VOWEL SIGN VOCALIC LL + {0x09FE, 0x09FE, prExtend}, // Mn BENGALI SANDHI MARK + {0x0A01, 0x0A02, prExtend}, // Mn [2] GURMUKHI SIGN ADAK BINDI..GURMUKHI SIGN BINDI + {0x0A03, 0x0A03, prSpacingMark}, // Mc GURMUKHI SIGN VISARGA + {0x0A3C, 0x0A3C, prExtend}, // Mn GURMUKHI SIGN NUKTA + {0x0A3E, 0x0A40, prSpacingMark}, // Mc [3] GURMUKHI VOWEL SIGN AA..GURMUKHI VOWEL SIGN II + {0x0A41, 0x0A42, prExtend}, // Mn [2] GURMUKHI VOWEL SIGN U..GURMUKHI VOWEL SIGN UU + {0x0A47, 0x0A48, prExtend}, // Mn [2] GURMUKHI VOWEL SIGN EE..GURMUKHI VOWEL SIGN AI + {0x0A4B, 0x0A4D, prExtend}, // Mn [3] GURMUKHI VOWEL SIGN OO..GURMUKHI SIGN VIRAMA + {0x0A51, 0x0A51, prExtend}, // Mn GURMUKHI SIGN UDAAT + {0x0A70, 0x0A71, prExtend}, // Mn [2] GURMUKHI TIPPI..GURMUKHI ADDAK + {0x0A75, 0x0A75, prExtend}, // Mn GURMUKHI SIGN YAKASH + {0x0A81, 0x0A82, prExtend}, // Mn [2] GUJARATI SIGN CANDRABINDU..GUJARATI SIGN ANUSVARA + {0x0A83, 0x0A83, prSpacingMark}, // Mc GUJARATI SIGN VISARGA + {0x0ABC, 0x0ABC, prExtend}, // Mn GUJARATI SIGN NUKTA + {0x0ABE, 0x0AC0, prSpacingMark}, // Mc [3] GUJARATI VOWEL SIGN AA..GUJARATI VOWEL SIGN II + {0x0AC1, 0x0AC5, prExtend}, // Mn [5] GUJARATI VOWEL SIGN U..GUJARATI VOWEL SIGN CANDRA E + {0x0AC7, 0x0AC8, prExtend}, // Mn [2] GUJARATI VOWEL SIGN E..GUJARATI VOWEL SIGN AI + {0x0AC9, 0x0AC9, prSpacingMark}, // Mc GUJARATI VOWEL SIGN CANDRA O + {0x0ACB, 0x0ACC, prSpacingMark}, // Mc [2] GUJARATI VOWEL SIGN O..GUJARATI VOWEL SIGN AU + {0x0ACD, 0x0ACD, prExtend}, // Mn GUJARATI SIGN VIRAMA + {0x0AE2, 0x0AE3, prExtend}, // Mn [2] GUJARATI VOWEL SIGN VOCALIC L..GUJARATI VOWEL SIGN VOCALIC LL + {0x0AFA, 0x0AFF, prExtend}, // Mn [6] GUJARATI SIGN SUKUN..GUJARATI SIGN TWO-CIRCLE NUKTA ABOVE + {0x0B01, 0x0B01, prExtend}, // Mn ORIYA SIGN CANDRABINDU + {0x0B02, 0x0B03, prSpacingMark}, // Mc [2] ORIYA SIGN ANUSVARA..ORIYA SIGN VISARGA + {0x0B3C, 0x0B3C, prExtend}, // Mn ORIYA SIGN NUKTA + {0x0B3E, 0x0B3E, prExtend}, // Mc ORIYA VOWEL SIGN AA + {0x0B3F, 0x0B3F, prExtend}, // Mn ORIYA VOWEL SIGN I + {0x0B40, 0x0B40, prSpacingMark}, // Mc ORIYA VOWEL SIGN II + {0x0B41, 0x0B44, prExtend}, // Mn [4] ORIYA VOWEL SIGN U..ORIYA VOWEL SIGN VOCALIC RR + {0x0B47, 0x0B48, prSpacingMark}, // Mc [2] ORIYA VOWEL SIGN E..ORIYA VOWEL SIGN AI + {0x0B4B, 0x0B4C, prSpacingMark}, // Mc [2] ORIYA VOWEL SIGN O..ORIYA VOWEL SIGN AU + {0x0B4D, 0x0B4D, prExtend}, // Mn ORIYA SIGN VIRAMA + {0x0B56, 0x0B56, prExtend}, // Mn ORIYA AI LENGTH MARK + {0x0B57, 0x0B57, prExtend}, // Mc ORIYA AU LENGTH MARK + {0x0B62, 0x0B63, prExtend}, // Mn [2] ORIYA VOWEL SIGN VOCALIC L..ORIYA VOWEL SIGN VOCALIC LL + {0x0B82, 0x0B82, prExtend}, // Mn TAMIL SIGN ANUSVARA + {0x0BBE, 0x0BBE, prExtend}, // Mc TAMIL VOWEL SIGN AA + {0x0BBF, 0x0BBF, prSpacingMark}, // Mc TAMIL VOWEL SIGN I + {0x0BC0, 0x0BC0, prExtend}, // Mn TAMIL VOWEL SIGN II + {0x0BC1, 0x0BC2, prSpacingMark}, // Mc [2] TAMIL VOWEL SIGN U..TAMIL VOWEL SIGN UU + {0x0BC6, 0x0BC8, prSpacingMark}, // Mc [3] TAMIL VOWEL SIGN E..TAMIL VOWEL SIGN AI + {0x0BCA, 0x0BCC, prSpacingMark}, // Mc [3] TAMIL VOWEL SIGN O..TAMIL VOWEL SIGN AU + {0x0BCD, 0x0BCD, prExtend}, // Mn TAMIL SIGN VIRAMA + {0x0BD7, 0x0BD7, prExtend}, // Mc TAMIL AU LENGTH MARK + {0x0C00, 0x0C00, prExtend}, // Mn TELUGU SIGN COMBINING CANDRABINDU ABOVE + {0x0C01, 0x0C03, prSpacingMark}, // Mc [3] TELUGU SIGN CANDRABINDU..TELUGU SIGN VISARGA + {0x0C04, 0x0C04, prExtend}, // Mn TELUGU SIGN COMBINING ANUSVARA ABOVE + {0x0C3E, 0x0C40, prExtend}, // Mn [3] TELUGU VOWEL SIGN AA..TELUGU VOWEL SIGN II + {0x0C41, 0x0C44, prSpacingMark}, // Mc [4] TELUGU VOWEL SIGN U..TELUGU VOWEL SIGN VOCALIC RR + {0x0C46, 0x0C48, prExtend}, // Mn [3] TELUGU VOWEL SIGN E..TELUGU VOWEL SIGN AI + {0x0C4A, 0x0C4D, prExtend}, // Mn [4] TELUGU VOWEL SIGN O..TELUGU SIGN VIRAMA + {0x0C55, 0x0C56, prExtend}, // Mn [2] TELUGU LENGTH MARK..TELUGU AI LENGTH MARK + {0x0C62, 0x0C63, prExtend}, // Mn [2] TELUGU VOWEL SIGN VOCALIC L..TELUGU VOWEL SIGN VOCALIC LL + {0x0C81, 0x0C81, prExtend}, // Mn KANNADA SIGN CANDRABINDU + {0x0C82, 0x0C83, prSpacingMark}, // Mc [2] KANNADA SIGN ANUSVARA..KANNADA SIGN VISARGA + {0x0CBC, 0x0CBC, prExtend}, // Mn KANNADA SIGN NUKTA + {0x0CBE, 0x0CBE, prSpacingMark}, // Mc KANNADA VOWEL SIGN AA + {0x0CBF, 0x0CBF, prExtend}, // Mn KANNADA VOWEL SIGN I + {0x0CC0, 0x0CC1, prSpacingMark}, // Mc [2] KANNADA VOWEL SIGN II..KANNADA VOWEL SIGN U + {0x0CC2, 0x0CC2, prExtend}, // Mc KANNADA VOWEL SIGN UU + {0x0CC3, 0x0CC4, prSpacingMark}, // Mc [2] KANNADA VOWEL SIGN VOCALIC R..KANNADA VOWEL SIGN VOCALIC RR + {0x0CC6, 0x0CC6, prExtend}, // Mn KANNADA VOWEL SIGN E + {0x0CC7, 0x0CC8, prSpacingMark}, // Mc [2] KANNADA VOWEL SIGN EE..KANNADA VOWEL SIGN AI + {0x0CCA, 0x0CCB, prSpacingMark}, // Mc [2] KANNADA VOWEL SIGN O..KANNADA VOWEL SIGN OO + {0x0CCC, 0x0CCD, prExtend}, // Mn [2] KANNADA VOWEL SIGN AU..KANNADA SIGN VIRAMA + {0x0CD5, 0x0CD6, prExtend}, // Mc [2] KANNADA LENGTH MARK..KANNADA AI LENGTH MARK + {0x0CE2, 0x0CE3, prExtend}, // Mn [2] KANNADA VOWEL SIGN VOCALIC L..KANNADA VOWEL SIGN VOCALIC LL + {0x0D00, 0x0D01, prExtend}, // Mn [2] MALAYALAM SIGN COMBINING ANUSVARA ABOVE..MALAYALAM SIGN CANDRABINDU + {0x0D02, 0x0D03, prSpacingMark}, // Mc [2] MALAYALAM SIGN ANUSVARA..MALAYALAM SIGN VISARGA + {0x0D3B, 0x0D3C, prExtend}, // Mn [2] MALAYALAM SIGN VERTICAL BAR VIRAMA..MALAYALAM SIGN CIRCULAR VIRAMA + {0x0D3E, 0x0D3E, prExtend}, // Mc MALAYALAM VOWEL SIGN AA + {0x0D3F, 0x0D40, prSpacingMark}, // Mc [2] MALAYALAM VOWEL SIGN I..MALAYALAM VOWEL SIGN II + {0x0D41, 0x0D44, prExtend}, // Mn [4] MALAYALAM VOWEL SIGN U..MALAYALAM VOWEL SIGN VOCALIC RR + {0x0D46, 0x0D48, prSpacingMark}, // Mc [3] MALAYALAM VOWEL SIGN E..MALAYALAM VOWEL SIGN AI + {0x0D4A, 0x0D4C, prSpacingMark}, // Mc [3] MALAYALAM VOWEL SIGN O..MALAYALAM VOWEL SIGN AU + {0x0D4D, 0x0D4D, prExtend}, // Mn MALAYALAM SIGN VIRAMA + {0x0D4E, 0x0D4E, prPreprend}, // Lo MALAYALAM LETTER DOT REPH + {0x0D57, 0x0D57, prExtend}, // Mc MALAYALAM AU LENGTH MARK + {0x0D62, 0x0D63, prExtend}, // Mn [2] MALAYALAM VOWEL SIGN VOCALIC L..MALAYALAM VOWEL SIGN VOCALIC LL + {0x0D82, 0x0D83, prSpacingMark}, // Mc [2] SINHALA SIGN ANUSVARAYA..SINHALA SIGN VISARGAYA + {0x0DCA, 0x0DCA, prExtend}, // Mn SINHALA SIGN AL-LAKUNA + {0x0DCF, 0x0DCF, prExtend}, // Mc SINHALA VOWEL SIGN AELA-PILLA + {0x0DD0, 0x0DD1, prSpacingMark}, // Mc [2] SINHALA VOWEL SIGN KETTI AEDA-PILLA..SINHALA VOWEL SIGN DIGA AEDA-PILLA + {0x0DD2, 0x0DD4, prExtend}, // Mn [3] SINHALA VOWEL SIGN KETTI IS-PILLA..SINHALA VOWEL SIGN KETTI PAA-PILLA + {0x0DD6, 0x0DD6, prExtend}, // Mn SINHALA VOWEL SIGN DIGA PAA-PILLA + {0x0DD8, 0x0DDE, prSpacingMark}, // Mc [7] SINHALA VOWEL SIGN GAETTA-PILLA..SINHALA VOWEL SIGN KOMBUVA HAA GAYANUKITTA + {0x0DDF, 0x0DDF, prExtend}, // Mc SINHALA VOWEL SIGN GAYANUKITTA + {0x0DF2, 0x0DF3, prSpacingMark}, // Mc [2] SINHALA VOWEL SIGN DIGA GAETTA-PILLA..SINHALA VOWEL SIGN DIGA GAYANUKITTA + {0x0E31, 0x0E31, prExtend}, // Mn THAI CHARACTER MAI HAN-AKAT + {0x0E33, 0x0E33, prSpacingMark}, // Lo THAI CHARACTER SARA AM + {0x0E34, 0x0E3A, prExtend}, // Mn [7] THAI CHARACTER SARA I..THAI CHARACTER PHINTHU + {0x0E47, 0x0E4E, prExtend}, // Mn [8] THAI CHARACTER MAITAIKHU..THAI CHARACTER YAMAKKAN + {0x0EB1, 0x0EB1, prExtend}, // Mn LAO VOWEL SIGN MAI KAN + {0x0EB3, 0x0EB3, prSpacingMark}, // Lo LAO VOWEL SIGN AM + {0x0EB4, 0x0EBC, prExtend}, // Mn [9] LAO VOWEL SIGN I..LAO SEMIVOWEL SIGN LO + {0x0EC8, 0x0ECD, prExtend}, // Mn [6] LAO TONE MAI EK..LAO NIGGAHITA + {0x0F18, 0x0F19, prExtend}, // Mn [2] TIBETAN ASTROLOGICAL SIGN -KHYUD PA..TIBETAN ASTROLOGICAL SIGN SDONG TSHUGS + {0x0F35, 0x0F35, prExtend}, // Mn TIBETAN MARK NGAS BZUNG NYI ZLA + {0x0F37, 0x0F37, prExtend}, // Mn TIBETAN MARK NGAS BZUNG SGOR RTAGS + {0x0F39, 0x0F39, prExtend}, // Mn TIBETAN MARK TSA -PHRU + {0x0F3E, 0x0F3F, prSpacingMark}, // Mc [2] TIBETAN SIGN YAR TSHES..TIBETAN SIGN MAR TSHES + {0x0F71, 0x0F7E, prExtend}, // Mn [14] TIBETAN VOWEL SIGN AA..TIBETAN SIGN RJES SU NGA RO + {0x0F7F, 0x0F7F, prSpacingMark}, // Mc TIBETAN SIGN RNAM BCAD + {0x0F80, 0x0F84, prExtend}, // Mn [5] TIBETAN VOWEL SIGN REVERSED I..TIBETAN MARK HALANTA + {0x0F86, 0x0F87, prExtend}, // Mn [2] TIBETAN SIGN LCI RTAGS..TIBETAN SIGN YANG RTAGS + {0x0F8D, 0x0F97, prExtend}, // Mn [11] TIBETAN SUBJOINED SIGN LCE TSA CAN..TIBETAN SUBJOINED LETTER JA + {0x0F99, 0x0FBC, prExtend}, // Mn [36] TIBETAN SUBJOINED LETTER NYA..TIBETAN SUBJOINED LETTER FIXED-FORM RA + {0x0FC6, 0x0FC6, prExtend}, // Mn TIBETAN SYMBOL PADMA GDAN + {0x102D, 0x1030, prExtend}, // Mn [4] MYANMAR VOWEL SIGN I..MYANMAR VOWEL SIGN UU + {0x1031, 0x1031, prSpacingMark}, // Mc MYANMAR VOWEL SIGN E + {0x1032, 0x1037, prExtend}, // Mn [6] MYANMAR VOWEL SIGN AI..MYANMAR SIGN DOT BELOW + {0x1039, 0x103A, prExtend}, // Mn [2] MYANMAR SIGN VIRAMA..MYANMAR SIGN ASAT + {0x103B, 0x103C, prSpacingMark}, // Mc [2] MYANMAR CONSONANT SIGN MEDIAL YA..MYANMAR CONSONANT SIGN MEDIAL RA + {0x103D, 0x103E, prExtend}, // Mn [2] MYANMAR CONSONANT SIGN MEDIAL WA..MYANMAR CONSONANT SIGN MEDIAL HA + {0x1056, 0x1057, prSpacingMark}, // Mc [2] MYANMAR VOWEL SIGN VOCALIC R..MYANMAR VOWEL SIGN VOCALIC RR + {0x1058, 0x1059, prExtend}, // Mn [2] MYANMAR VOWEL SIGN VOCALIC L..MYANMAR VOWEL SIGN VOCALIC LL + {0x105E, 0x1060, prExtend}, // Mn [3] MYANMAR CONSONANT SIGN MON MEDIAL NA..MYANMAR CONSONANT SIGN MON MEDIAL LA + {0x1071, 0x1074, prExtend}, // Mn [4] MYANMAR VOWEL SIGN GEBA KAREN I..MYANMAR VOWEL SIGN KAYAH EE + {0x1082, 0x1082, prExtend}, // Mn MYANMAR CONSONANT SIGN SHAN MEDIAL WA + {0x1084, 0x1084, prSpacingMark}, // Mc MYANMAR VOWEL SIGN SHAN E + {0x1085, 0x1086, prExtend}, // Mn [2] MYANMAR VOWEL SIGN SHAN E ABOVE..MYANMAR VOWEL SIGN SHAN FINAL Y + {0x108D, 0x108D, prExtend}, // Mn MYANMAR SIGN SHAN COUNCIL EMPHATIC TONE + {0x109D, 0x109D, prExtend}, // Mn MYANMAR VOWEL SIGN AITON AI + {0x1100, 0x115F, prL}, // Lo [96] HANGUL CHOSEONG KIYEOK..HANGUL CHOSEONG FILLER + {0x1160, 0x11A7, prV}, // Lo [72] HANGUL JUNGSEONG FILLER..HANGUL JUNGSEONG O-YAE + {0x11A8, 0x11FF, prT}, // Lo [88] HANGUL JONGSEONG KIYEOK..HANGUL JONGSEONG SSANGNIEUN + {0x135D, 0x135F, prExtend}, // Mn [3] ETHIOPIC COMBINING GEMINATION AND VOWEL LENGTH MARK..ETHIOPIC COMBINING GEMINATION MARK + {0x1712, 0x1714, prExtend}, // Mn [3] TAGALOG VOWEL SIGN I..TAGALOG SIGN VIRAMA + {0x1732, 0x1734, prExtend}, // Mn [3] HANUNOO VOWEL SIGN I..HANUNOO SIGN PAMUDPOD + {0x1752, 0x1753, prExtend}, // Mn [2] BUHID VOWEL SIGN I..BUHID VOWEL SIGN U + {0x1772, 0x1773, prExtend}, // Mn [2] TAGBANWA VOWEL SIGN I..TAGBANWA VOWEL SIGN U + {0x17B4, 0x17B5, prExtend}, // Mn [2] KHMER VOWEL INHERENT AQ..KHMER VOWEL INHERENT AA + {0x17B6, 0x17B6, prSpacingMark}, // Mc KHMER VOWEL SIGN AA + {0x17B7, 0x17BD, prExtend}, // Mn [7] KHMER VOWEL SIGN I..KHMER VOWEL SIGN UA + {0x17BE, 0x17C5, prSpacingMark}, // Mc [8] KHMER VOWEL SIGN OE..KHMER VOWEL SIGN AU + {0x17C6, 0x17C6, prExtend}, // Mn KHMER SIGN NIKAHIT + {0x17C7, 0x17C8, prSpacingMark}, // Mc [2] KHMER SIGN REAHMUK..KHMER SIGN YUUKALEAPINTU + {0x17C9, 0x17D3, prExtend}, // Mn [11] KHMER SIGN MUUSIKATOAN..KHMER SIGN BATHAMASAT + {0x17DD, 0x17DD, prExtend}, // Mn KHMER SIGN ATTHACAN + {0x180B, 0x180D, prExtend}, // Mn [3] MONGOLIAN FREE VARIATION SELECTOR ONE..MONGOLIAN FREE VARIATION SELECTOR THREE + {0x180E, 0x180E, prControl}, // Cf MONGOLIAN VOWEL SEPARATOR + {0x1885, 0x1886, prExtend}, // Mn [2] MONGOLIAN LETTER ALI GALI BALUDA..MONGOLIAN LETTER ALI GALI THREE BALUDA + {0x18A9, 0x18A9, prExtend}, // Mn MONGOLIAN LETTER ALI GALI DAGALGA + {0x1920, 0x1922, prExtend}, // Mn [3] LIMBU VOWEL SIGN A..LIMBU VOWEL SIGN U + {0x1923, 0x1926, prSpacingMark}, // Mc [4] LIMBU VOWEL SIGN EE..LIMBU VOWEL SIGN AU + {0x1927, 0x1928, prExtend}, // Mn [2] LIMBU VOWEL SIGN E..LIMBU VOWEL SIGN O + {0x1929, 0x192B, prSpacingMark}, // Mc [3] LIMBU SUBJOINED LETTER YA..LIMBU SUBJOINED LETTER WA + {0x1930, 0x1931, prSpacingMark}, // Mc [2] LIMBU SMALL LETTER KA..LIMBU SMALL LETTER NGA + {0x1932, 0x1932, prExtend}, // Mn LIMBU SMALL LETTER ANUSVARA + {0x1933, 0x1938, prSpacingMark}, // Mc [6] LIMBU SMALL LETTER TA..LIMBU SMALL LETTER LA + {0x1939, 0x193B, prExtend}, // Mn [3] LIMBU SIGN MUKPHRENG..LIMBU SIGN SA-I + {0x1A17, 0x1A18, prExtend}, // Mn [2] BUGINESE VOWEL SIGN I..BUGINESE VOWEL SIGN U + {0x1A19, 0x1A1A, prSpacingMark}, // Mc [2] BUGINESE VOWEL SIGN E..BUGINESE VOWEL SIGN O + {0x1A1B, 0x1A1B, prExtend}, // Mn BUGINESE VOWEL SIGN AE + {0x1A55, 0x1A55, prSpacingMark}, // Mc TAI THAM CONSONANT SIGN MEDIAL RA + {0x1A56, 0x1A56, prExtend}, // Mn TAI THAM CONSONANT SIGN MEDIAL LA + {0x1A57, 0x1A57, prSpacingMark}, // Mc TAI THAM CONSONANT SIGN LA TANG LAI + {0x1A58, 0x1A5E, prExtend}, // Mn [7] TAI THAM SIGN MAI KANG LAI..TAI THAM CONSONANT SIGN SA + {0x1A60, 0x1A60, prExtend}, // Mn TAI THAM SIGN SAKOT + {0x1A62, 0x1A62, prExtend}, // Mn TAI THAM VOWEL SIGN MAI SAT + {0x1A65, 0x1A6C, prExtend}, // Mn [8] TAI THAM VOWEL SIGN I..TAI THAM VOWEL SIGN OA BELOW + {0x1A6D, 0x1A72, prSpacingMark}, // Mc [6] TAI THAM VOWEL SIGN OY..TAI THAM VOWEL SIGN THAM AI + {0x1A73, 0x1A7C, prExtend}, // Mn [10] TAI THAM VOWEL SIGN OA ABOVE..TAI THAM SIGN KHUEN-LUE KARAN + {0x1A7F, 0x1A7F, prExtend}, // Mn TAI THAM COMBINING CRYPTOGRAMMIC DOT + {0x1AB0, 0x1ABD, prExtend}, // Mn [14] COMBINING DOUBLED CIRCUMFLEX ACCENT..COMBINING PARENTHESES BELOW + {0x1ABE, 0x1ABE, prExtend}, // Me COMBINING PARENTHESES OVERLAY + {0x1B00, 0x1B03, prExtend}, // Mn [4] BALINESE SIGN ULU RICEM..BALINESE SIGN SURANG + {0x1B04, 0x1B04, prSpacingMark}, // Mc BALINESE SIGN BISAH + {0x1B34, 0x1B34, prExtend}, // Mn BALINESE SIGN REREKAN + {0x1B35, 0x1B35, prExtend}, // Mc BALINESE VOWEL SIGN TEDUNG + {0x1B36, 0x1B3A, prExtend}, // Mn [5] BALINESE VOWEL SIGN ULU..BALINESE VOWEL SIGN RA REPA + {0x1B3B, 0x1B3B, prSpacingMark}, // Mc BALINESE VOWEL SIGN RA REPA TEDUNG + {0x1B3C, 0x1B3C, prExtend}, // Mn BALINESE VOWEL SIGN LA LENGA + {0x1B3D, 0x1B41, prSpacingMark}, // Mc [5] BALINESE VOWEL SIGN LA LENGA TEDUNG..BALINESE VOWEL SIGN TALING REPA TEDUNG + {0x1B42, 0x1B42, prExtend}, // Mn BALINESE VOWEL SIGN PEPET + {0x1B43, 0x1B44, prSpacingMark}, // Mc [2] BALINESE VOWEL SIGN PEPET TEDUNG..BALINESE ADEG ADEG + {0x1B6B, 0x1B73, prExtend}, // Mn [9] BALINESE MUSICAL SYMBOL COMBINING TEGEH..BALINESE MUSICAL SYMBOL COMBINING GONG + {0x1B80, 0x1B81, prExtend}, // Mn [2] SUNDANESE SIGN PANYECEK..SUNDANESE SIGN PANGLAYAR + {0x1B82, 0x1B82, prSpacingMark}, // Mc SUNDANESE SIGN PANGWISAD + {0x1BA1, 0x1BA1, prSpacingMark}, // Mc SUNDANESE CONSONANT SIGN PAMINGKAL + {0x1BA2, 0x1BA5, prExtend}, // Mn [4] SUNDANESE CONSONANT SIGN PANYAKRA..SUNDANESE VOWEL SIGN PANYUKU + {0x1BA6, 0x1BA7, prSpacingMark}, // Mc [2] SUNDANESE VOWEL SIGN PANAELAENG..SUNDANESE VOWEL SIGN PANOLONG + {0x1BA8, 0x1BA9, prExtend}, // Mn [2] SUNDANESE VOWEL SIGN PAMEPET..SUNDANESE VOWEL SIGN PANEULEUNG + {0x1BAA, 0x1BAA, prSpacingMark}, // Mc SUNDANESE SIGN PAMAAEH + {0x1BAB, 0x1BAD, prExtend}, // Mn [3] SUNDANESE SIGN VIRAMA..SUNDANESE CONSONANT SIGN PASANGAN WA + {0x1BE6, 0x1BE6, prExtend}, // Mn BATAK SIGN TOMPI + {0x1BE7, 0x1BE7, prSpacingMark}, // Mc BATAK VOWEL SIGN E + {0x1BE8, 0x1BE9, prExtend}, // Mn [2] BATAK VOWEL SIGN PAKPAK E..BATAK VOWEL SIGN EE + {0x1BEA, 0x1BEC, prSpacingMark}, // Mc [3] BATAK VOWEL SIGN I..BATAK VOWEL SIGN O + {0x1BED, 0x1BED, prExtend}, // Mn BATAK VOWEL SIGN KARO O + {0x1BEE, 0x1BEE, prSpacingMark}, // Mc BATAK VOWEL SIGN U + {0x1BEF, 0x1BF1, prExtend}, // Mn [3] BATAK VOWEL SIGN U FOR SIMALUNGUN SA..BATAK CONSONANT SIGN H + {0x1BF2, 0x1BF3, prSpacingMark}, // Mc [2] BATAK PANGOLAT..BATAK PANONGONAN + {0x1C24, 0x1C2B, prSpacingMark}, // Mc [8] LEPCHA SUBJOINED LETTER YA..LEPCHA VOWEL SIGN UU + {0x1C2C, 0x1C33, prExtend}, // Mn [8] LEPCHA VOWEL SIGN E..LEPCHA CONSONANT SIGN T + {0x1C34, 0x1C35, prSpacingMark}, // Mc [2] LEPCHA CONSONANT SIGN NYIN-DO..LEPCHA CONSONANT SIGN KANG + {0x1C36, 0x1C37, prExtend}, // Mn [2] LEPCHA SIGN RAN..LEPCHA SIGN NUKTA + {0x1CD0, 0x1CD2, prExtend}, // Mn [3] VEDIC TONE KARSHANA..VEDIC TONE PRENKHA + {0x1CD4, 0x1CE0, prExtend}, // Mn [13] VEDIC SIGN YAJURVEDIC MIDLINE SVARITA..VEDIC TONE RIGVEDIC KASHMIRI INDEPENDENT SVARITA + {0x1CE1, 0x1CE1, prSpacingMark}, // Mc VEDIC TONE ATHARVAVEDIC INDEPENDENT SVARITA + {0x1CE2, 0x1CE8, prExtend}, // Mn [7] VEDIC SIGN VISARGA SVARITA..VEDIC SIGN VISARGA ANUDATTA WITH TAIL + {0x1CED, 0x1CED, prExtend}, // Mn VEDIC SIGN TIRYAK + {0x1CF4, 0x1CF4, prExtend}, // Mn VEDIC TONE CANDRA ABOVE + {0x1CF7, 0x1CF7, prSpacingMark}, // Mc VEDIC SIGN ATIKRAMA + {0x1CF8, 0x1CF9, prExtend}, // Mn [2] VEDIC TONE RING ABOVE..VEDIC TONE DOUBLE RING ABOVE + {0x1DC0, 0x1DF9, prExtend}, // Mn [58] COMBINING DOTTED GRAVE ACCENT..COMBINING WIDE INVERTED BRIDGE BELOW + {0x1DFB, 0x1DFF, prExtend}, // Mn [5] COMBINING DELETION MARK..COMBINING RIGHT ARROWHEAD AND DOWN ARROWHEAD BELOW + {0x200B, 0x200B, prControl}, // Cf ZERO WIDTH SPACE + {0x200C, 0x200C, prExtend}, // Cf ZERO WIDTH NON-JOINER + {0x200D, 0x200D, prZWJ}, // Cf ZERO WIDTH JOINER + {0x200E, 0x200F, prControl}, // Cf [2] LEFT-TO-RIGHT MARK..RIGHT-TO-LEFT MARK + {0x2028, 0x2028, prControl}, // Zl LINE SEPARATOR + {0x2029, 0x2029, prControl}, // Zp PARAGRAPH SEPARATOR + {0x202A, 0x202E, prControl}, // Cf [5] LEFT-TO-RIGHT EMBEDDING..RIGHT-TO-LEFT OVERRIDE + {0x203C, 0x203C, prExtendedPictographic}, // 1.1 [1] (‼️) double exclamation mark + {0x2049, 0x2049, prExtendedPictographic}, // 3.0 [1] (⁉️) exclamation question mark + {0x2060, 0x2064, prControl}, // Cf [5] WORD JOINER..INVISIBLE PLUS + {0x2065, 0x2065, prControl}, // Cn + {0x2066, 0x206F, prControl}, // Cf [10] LEFT-TO-RIGHT ISOLATE..NOMINAL DIGIT SHAPES + {0x20D0, 0x20DC, prExtend}, // Mn [13] COMBINING LEFT HARPOON ABOVE..COMBINING FOUR DOTS ABOVE + {0x20DD, 0x20E0, prExtend}, // Me [4] COMBINING ENCLOSING CIRCLE..COMBINING ENCLOSING CIRCLE BACKSLASH + {0x20E1, 0x20E1, prExtend}, // Mn COMBINING LEFT RIGHT ARROW ABOVE + {0x20E2, 0x20E4, prExtend}, // Me [3] COMBINING ENCLOSING SCREEN..COMBINING ENCLOSING UPWARD POINTING TRIANGLE + {0x20E5, 0x20F0, prExtend}, // Mn [12] COMBINING REVERSE SOLIDUS OVERLAY..COMBINING ASTERISK ABOVE + {0x2122, 0x2122, prExtendedPictographic}, // 1.1 [1] (™️) trade mark + {0x2139, 0x2139, prExtendedPictographic}, // 3.0 [1] (ℹ️) information + {0x2194, 0x2199, prExtendedPictographic}, // 1.1 [6] (↔️..↙️) left-right arrow..down-left arrow + {0x21A9, 0x21AA, prExtendedPictographic}, // 1.1 [2] (↩️..↪️) right arrow curving left..left arrow curving right + {0x231A, 0x231B, prExtendedPictographic}, // 1.1 [2] (⌚..⌛) watch..hourglass done + {0x2328, 0x2328, prExtendedPictographic}, // 1.1 [1] (⌨️) keyboard + {0x2388, 0x2388, prExtendedPictographic}, // 3.0 [1] (⎈) HELM SYMBOL + {0x23CF, 0x23CF, prExtendedPictographic}, // 4.0 [1] (⏏️) eject button + {0x23E9, 0x23F3, prExtendedPictographic}, // 6.0 [11] (⏩..⏳) fast-forward button..hourglass not done + {0x23F8, 0x23FA, prExtendedPictographic}, // 7.0 [3] (⏸️..⏺️) pause button..record button + {0x24C2, 0x24C2, prExtendedPictographic}, // 1.1 [1] (Ⓜ️) circled M + {0x25AA, 0x25AB, prExtendedPictographic}, // 1.1 [2] (▪️..▫️) black small square..white small square + {0x25B6, 0x25B6, prExtendedPictographic}, // 1.1 [1] (▶️) play button + {0x25C0, 0x25C0, prExtendedPictographic}, // 1.1 [1] (◀️) reverse button + {0x25FB, 0x25FE, prExtendedPictographic}, // 3.2 [4] (◻️..◾) white medium square..black medium-small square + {0x2600, 0x2605, prExtendedPictographic}, // 1.1 [6] (☀️..★) sun..BLACK STAR + {0x2607, 0x2612, prExtendedPictographic}, // 1.1 [12] (☇..☒) LIGHTNING..BALLOT BOX WITH X + {0x2614, 0x2615, prExtendedPictographic}, // 4.0 [2] (☔..☕) umbrella with rain drops..hot beverage + {0x2616, 0x2617, prExtendedPictographic}, // 3.2 [2] (☖..☗) WHITE SHOGI PIECE..BLACK SHOGI PIECE + {0x2618, 0x2618, prExtendedPictographic}, // 4.1 [1] (☘️) shamrock + {0x2619, 0x2619, prExtendedPictographic}, // 3.0 [1] (☙) REVERSED ROTATED FLORAL HEART BULLET + {0x261A, 0x266F, prExtendedPictographic}, // 1.1 [86] (☚..♯) BLACK LEFT POINTING INDEX..MUSIC SHARP SIGN + {0x2670, 0x2671, prExtendedPictographic}, // 3.0 [2] (♰..♱) WEST SYRIAC CROSS..EAST SYRIAC CROSS + {0x2672, 0x267D, prExtendedPictographic}, // 3.2 [12] (♲..♽) UNIVERSAL RECYCLING SYMBOL..PARTIALLY-RECYCLED PAPER SYMBOL + {0x267E, 0x267F, prExtendedPictographic}, // 4.1 [2] (♾️..♿) infinity..wheelchair symbol + {0x2680, 0x2685, prExtendedPictographic}, // 3.2 [6] (⚀..⚅) DIE FACE-1..DIE FACE-6 + {0x2690, 0x2691, prExtendedPictographic}, // 4.0 [2] (⚐..⚑) WHITE FLAG..BLACK FLAG + {0x2692, 0x269C, prExtendedPictographic}, // 4.1 [11] (⚒️..⚜️) hammer and pick..fleur-de-lis + {0x269D, 0x269D, prExtendedPictographic}, // 5.1 [1] (⚝) OUTLINED WHITE STAR + {0x269E, 0x269F, prExtendedPictographic}, // 5.2 [2] (⚞..⚟) THREE LINES CONVERGING RIGHT..THREE LINES CONVERGING LEFT + {0x26A0, 0x26A1, prExtendedPictographic}, // 4.0 [2] (⚠️..⚡) warning..high voltage + {0x26A2, 0x26B1, prExtendedPictographic}, // 4.1 [16] (⚢..⚱️) DOUBLED FEMALE SIGN..funeral urn + {0x26B2, 0x26B2, prExtendedPictographic}, // 5.0 [1] (⚲) NEUTER + {0x26B3, 0x26BC, prExtendedPictographic}, // 5.1 [10] (⚳..⚼) CERES..SESQUIQUADRATE + {0x26BD, 0x26BF, prExtendedPictographic}, // 5.2 [3] (⚽..⚿) soccer ball..SQUARED KEY + {0x26C0, 0x26C3, prExtendedPictographic}, // 5.1 [4] (⛀..⛃) WHITE DRAUGHTS MAN..BLACK DRAUGHTS KING + {0x26C4, 0x26CD, prExtendedPictographic}, // 5.2 [10] (⛄..⛍) snowman without snow..DISABLED CAR + {0x26CE, 0x26CE, prExtendedPictographic}, // 6.0 [1] (⛎) Ophiuchus + {0x26CF, 0x26E1, prExtendedPictographic}, // 5.2 [19] (⛏️..⛡) pick..RESTRICTED LEFT ENTRY-2 + {0x26E2, 0x26E2, prExtendedPictographic}, // 6.0 [1] (⛢) ASTRONOMICAL SYMBOL FOR URANUS + {0x26E3, 0x26E3, prExtendedPictographic}, // 5.2 [1] (⛣) HEAVY CIRCLE WITH STROKE AND TWO DOTS ABOVE + {0x26E4, 0x26E7, prExtendedPictographic}, // 6.0 [4] (⛤..⛧) PENTAGRAM..INVERTED PENTAGRAM + {0x26E8, 0x26FF, prExtendedPictographic}, // 5.2 [24] (⛨..⛿) BLACK CROSS ON SHIELD..WHITE FLAG WITH HORIZONTAL MIDDLE BLACK STRIPE + {0x2700, 0x2700, prExtendedPictographic}, // 7.0 [1] (✀) BLACK SAFETY SCISSORS + {0x2701, 0x2704, prExtendedPictographic}, // 1.1 [4] (✁..✄) UPPER BLADE SCISSORS..WHITE SCISSORS + {0x2705, 0x2705, prExtendedPictographic}, // 6.0 [1] (✅) check mark button + {0x2708, 0x2709, prExtendedPictographic}, // 1.1 [2] (✈️..✉️) airplane..envelope + {0x270A, 0x270B, prExtendedPictographic}, // 6.0 [2] (✊..✋) raised fist..raised hand + {0x270C, 0x2712, prExtendedPictographic}, // 1.1 [7] (✌️..✒️) victory hand..black nib + {0x2714, 0x2714, prExtendedPictographic}, // 1.1 [1] (✔️) check mark + {0x2716, 0x2716, prExtendedPictographic}, // 1.1 [1] (✖️) multiplication sign + {0x271D, 0x271D, prExtendedPictographic}, // 1.1 [1] (✝️) latin cross + {0x2721, 0x2721, prExtendedPictographic}, // 1.1 [1] (✡️) star of David + {0x2728, 0x2728, prExtendedPictographic}, // 6.0 [1] (✨) sparkles + {0x2733, 0x2734, prExtendedPictographic}, // 1.1 [2] (✳️..✴️) eight-spoked asterisk..eight-pointed star + {0x2744, 0x2744, prExtendedPictographic}, // 1.1 [1] (❄️) snowflake + {0x2747, 0x2747, prExtendedPictographic}, // 1.1 [1] (❇️) sparkle + {0x274C, 0x274C, prExtendedPictographic}, // 6.0 [1] (❌) cross mark + {0x274E, 0x274E, prExtendedPictographic}, // 6.0 [1] (❎) cross mark button + {0x2753, 0x2755, prExtendedPictographic}, // 6.0 [3] (❓..❕) question mark..white exclamation mark + {0x2757, 0x2757, prExtendedPictographic}, // 5.2 [1] (❗) exclamation mark + {0x2763, 0x2767, prExtendedPictographic}, // 1.1 [5] (❣️..❧) heart exclamation..ROTATED FLORAL HEART BULLET + {0x2795, 0x2797, prExtendedPictographic}, // 6.0 [3] (➕..➗) plus sign..division sign + {0x27A1, 0x27A1, prExtendedPictographic}, // 1.1 [1] (➡️) right arrow + {0x27B0, 0x27B0, prExtendedPictographic}, // 6.0 [1] (➰) curly loop + {0x27BF, 0x27BF, prExtendedPictographic}, // 6.0 [1] (➿) double curly loop + {0x2934, 0x2935, prExtendedPictographic}, // 3.2 [2] (⤴️..⤵️) right arrow curving up..right arrow curving down + {0x2B05, 0x2B07, prExtendedPictographic}, // 4.0 [3] (⬅️..⬇️) left arrow..down arrow + {0x2B1B, 0x2B1C, prExtendedPictographic}, // 5.1 [2] (⬛..⬜) black large square..white large square + {0x2B50, 0x2B50, prExtendedPictographic}, // 5.1 [1] (⭐) star + {0x2B55, 0x2B55, prExtendedPictographic}, // 5.2 [1] (⭕) hollow red circle + {0x2CEF, 0x2CF1, prExtend}, // Mn [3] COPTIC COMBINING NI ABOVE..COPTIC COMBINING SPIRITUS LENIS + {0x2D7F, 0x2D7F, prExtend}, // Mn TIFINAGH CONSONANT JOINER + {0x2DE0, 0x2DFF, prExtend}, // Mn [32] COMBINING CYRILLIC LETTER BE..COMBINING CYRILLIC LETTER IOTIFIED BIG YUS + {0x302A, 0x302D, prExtend}, // Mn [4] IDEOGRAPHIC LEVEL TONE MARK..IDEOGRAPHIC ENTERING TONE MARK + {0x302E, 0x302F, prExtend}, // Mc [2] HANGUL SINGLE DOT TONE MARK..HANGUL DOUBLE DOT TONE MARK + {0x3030, 0x3030, prExtendedPictographic}, // 1.1 [1] (〰️) wavy dash + {0x303D, 0x303D, prExtendedPictographic}, // 3.2 [1] (〽️) part alternation mark + {0x3099, 0x309A, prExtend}, // Mn [2] COMBINING KATAKANA-HIRAGANA VOICED SOUND MARK..COMBINING KATAKANA-HIRAGANA SEMI-VOICED SOUND MARK + {0x3297, 0x3297, prExtendedPictographic}, // 1.1 [1] (㊗️) Japanese “congratulations” button + {0x3299, 0x3299, prExtendedPictographic}, // 1.1 [1] (㊙️) Japanese “secret” button + {0xA66F, 0xA66F, prExtend}, // Mn COMBINING CYRILLIC VZMET + {0xA670, 0xA672, prExtend}, // Me [3] COMBINING CYRILLIC TEN MILLIONS SIGN..COMBINING CYRILLIC THOUSAND MILLIONS SIGN + {0xA674, 0xA67D, prExtend}, // Mn [10] COMBINING CYRILLIC LETTER UKRAINIAN IE..COMBINING CYRILLIC PAYEROK + {0xA69E, 0xA69F, prExtend}, // Mn [2] COMBINING CYRILLIC LETTER EF..COMBINING CYRILLIC LETTER IOTIFIED E + {0xA6F0, 0xA6F1, prExtend}, // Mn [2] BAMUM COMBINING MARK KOQNDON..BAMUM COMBINING MARK TUKWENTIS + {0xA802, 0xA802, prExtend}, // Mn SYLOTI NAGRI SIGN DVISVARA + {0xA806, 0xA806, prExtend}, // Mn SYLOTI NAGRI SIGN HASANTA + {0xA80B, 0xA80B, prExtend}, // Mn SYLOTI NAGRI SIGN ANUSVARA + {0xA823, 0xA824, prSpacingMark}, // Mc [2] SYLOTI NAGRI VOWEL SIGN A..SYLOTI NAGRI VOWEL SIGN I + {0xA825, 0xA826, prExtend}, // Mn [2] SYLOTI NAGRI VOWEL SIGN U..SYLOTI NAGRI VOWEL SIGN E + {0xA827, 0xA827, prSpacingMark}, // Mc SYLOTI NAGRI VOWEL SIGN OO + {0xA880, 0xA881, prSpacingMark}, // Mc [2] SAURASHTRA SIGN ANUSVARA..SAURASHTRA SIGN VISARGA + {0xA8B4, 0xA8C3, prSpacingMark}, // Mc [16] SAURASHTRA CONSONANT SIGN HAARU..SAURASHTRA VOWEL SIGN AU + {0xA8C4, 0xA8C5, prExtend}, // Mn [2] SAURASHTRA SIGN VIRAMA..SAURASHTRA SIGN CANDRABINDU + {0xA8E0, 0xA8F1, prExtend}, // Mn [18] COMBINING DEVANAGARI DIGIT ZERO..COMBINING DEVANAGARI SIGN AVAGRAHA + {0xA8FF, 0xA8FF, prExtend}, // Mn DEVANAGARI VOWEL SIGN AY + {0xA926, 0xA92D, prExtend}, // Mn [8] KAYAH LI VOWEL UE..KAYAH LI TONE CALYA PLOPHU + {0xA947, 0xA951, prExtend}, // Mn [11] REJANG VOWEL SIGN I..REJANG CONSONANT SIGN R + {0xA952, 0xA953, prSpacingMark}, // Mc [2] REJANG CONSONANT SIGN H..REJANG VIRAMA + {0xA960, 0xA97C, prL}, // Lo [29] HANGUL CHOSEONG TIKEUT-MIEUM..HANGUL CHOSEONG SSANGYEORINHIEUH + {0xA980, 0xA982, prExtend}, // Mn [3] JAVANESE SIGN PANYANGGA..JAVANESE SIGN LAYAR + {0xA983, 0xA983, prSpacingMark}, // Mc JAVANESE SIGN WIGNYAN + {0xA9B3, 0xA9B3, prExtend}, // Mn JAVANESE SIGN CECAK TELU + {0xA9B4, 0xA9B5, prSpacingMark}, // Mc [2] JAVANESE VOWEL SIGN TARUNG..JAVANESE VOWEL SIGN TOLONG + {0xA9B6, 0xA9B9, prExtend}, // Mn [4] JAVANESE VOWEL SIGN WULU..JAVANESE VOWEL SIGN SUKU MENDUT + {0xA9BA, 0xA9BB, prSpacingMark}, // Mc [2] JAVANESE VOWEL SIGN TALING..JAVANESE VOWEL SIGN DIRGA MURE + {0xA9BC, 0xA9BD, prExtend}, // Mn [2] JAVANESE VOWEL SIGN PEPET..JAVANESE CONSONANT SIGN KERET + {0xA9BE, 0xA9C0, prSpacingMark}, // Mc [3] JAVANESE CONSONANT SIGN PENGKAL..JAVANESE PANGKON + {0xA9E5, 0xA9E5, prExtend}, // Mn MYANMAR SIGN SHAN SAW + {0xAA29, 0xAA2E, prExtend}, // Mn [6] CHAM VOWEL SIGN AA..CHAM VOWEL SIGN OE + {0xAA2F, 0xAA30, prSpacingMark}, // Mc [2] CHAM VOWEL SIGN O..CHAM VOWEL SIGN AI + {0xAA31, 0xAA32, prExtend}, // Mn [2] CHAM VOWEL SIGN AU..CHAM VOWEL SIGN UE + {0xAA33, 0xAA34, prSpacingMark}, // Mc [2] CHAM CONSONANT SIGN YA..CHAM CONSONANT SIGN RA + {0xAA35, 0xAA36, prExtend}, // Mn [2] CHAM CONSONANT SIGN LA..CHAM CONSONANT SIGN WA + {0xAA43, 0xAA43, prExtend}, // Mn CHAM CONSONANT SIGN FINAL NG + {0xAA4C, 0xAA4C, prExtend}, // Mn CHAM CONSONANT SIGN FINAL M + {0xAA4D, 0xAA4D, prSpacingMark}, // Mc CHAM CONSONANT SIGN FINAL H + {0xAA7C, 0xAA7C, prExtend}, // Mn MYANMAR SIGN TAI LAING TONE-2 + {0xAAB0, 0xAAB0, prExtend}, // Mn TAI VIET MAI KANG + {0xAAB2, 0xAAB4, prExtend}, // Mn [3] TAI VIET VOWEL I..TAI VIET VOWEL U + {0xAAB7, 0xAAB8, prExtend}, // Mn [2] TAI VIET MAI KHIT..TAI VIET VOWEL IA + {0xAABE, 0xAABF, prExtend}, // Mn [2] TAI VIET VOWEL AM..TAI VIET TONE MAI EK + {0xAAC1, 0xAAC1, prExtend}, // Mn TAI VIET TONE MAI THO + {0xAAEB, 0xAAEB, prSpacingMark}, // Mc MEETEI MAYEK VOWEL SIGN II + {0xAAEC, 0xAAED, prExtend}, // Mn [2] MEETEI MAYEK VOWEL SIGN UU..MEETEI MAYEK VOWEL SIGN AAI + {0xAAEE, 0xAAEF, prSpacingMark}, // Mc [2] MEETEI MAYEK VOWEL SIGN AU..MEETEI MAYEK VOWEL SIGN AAU + {0xAAF5, 0xAAF5, prSpacingMark}, // Mc MEETEI MAYEK VOWEL SIGN VISARGA + {0xAAF6, 0xAAF6, prExtend}, // Mn MEETEI MAYEK VIRAMA + {0xABE3, 0xABE4, prSpacingMark}, // Mc [2] MEETEI MAYEK VOWEL SIGN ONAP..MEETEI MAYEK VOWEL SIGN INAP + {0xABE5, 0xABE5, prExtend}, // Mn MEETEI MAYEK VOWEL SIGN ANAP + {0xABE6, 0xABE7, prSpacingMark}, // Mc [2] MEETEI MAYEK VOWEL SIGN YENAP..MEETEI MAYEK VOWEL SIGN SOUNAP + {0xABE8, 0xABE8, prExtend}, // Mn MEETEI MAYEK VOWEL SIGN UNAP + {0xABE9, 0xABEA, prSpacingMark}, // Mc [2] MEETEI MAYEK VOWEL SIGN CHEINAP..MEETEI MAYEK VOWEL SIGN NUNG + {0xABEC, 0xABEC, prSpacingMark}, // Mc MEETEI MAYEK LUM IYEK + {0xABED, 0xABED, prExtend}, // Mn MEETEI MAYEK APUN IYEK + {0xAC00, 0xAC00, prLV}, // Lo HANGUL SYLLABLE GA + {0xAC01, 0xAC1B, prLVT}, // Lo [27] HANGUL SYLLABLE GAG..HANGUL SYLLABLE GAH + {0xAC1C, 0xAC1C, prLV}, // Lo HANGUL SYLLABLE GAE + {0xAC1D, 0xAC37, prLVT}, // Lo [27] HANGUL SYLLABLE GAEG..HANGUL SYLLABLE GAEH + {0xAC38, 0xAC38, prLV}, // Lo HANGUL SYLLABLE GYA + {0xAC39, 0xAC53, prLVT}, // Lo [27] HANGUL SYLLABLE GYAG..HANGUL SYLLABLE GYAH + {0xAC54, 0xAC54, prLV}, // Lo HANGUL SYLLABLE GYAE + {0xAC55, 0xAC6F, prLVT}, // Lo [27] HANGUL SYLLABLE GYAEG..HANGUL SYLLABLE GYAEH + {0xAC70, 0xAC70, prLV}, // Lo HANGUL SYLLABLE GEO + {0xAC71, 0xAC8B, prLVT}, // Lo [27] HANGUL SYLLABLE GEOG..HANGUL SYLLABLE GEOH + {0xAC8C, 0xAC8C, prLV}, // Lo HANGUL SYLLABLE GE + {0xAC8D, 0xACA7, prLVT}, // Lo [27] HANGUL SYLLABLE GEG..HANGUL SYLLABLE GEH + {0xACA8, 0xACA8, prLV}, // Lo HANGUL SYLLABLE GYEO + {0xACA9, 0xACC3, prLVT}, // Lo [27] HANGUL SYLLABLE GYEOG..HANGUL SYLLABLE GYEOH + {0xACC4, 0xACC4, prLV}, // Lo HANGUL SYLLABLE GYE + {0xACC5, 0xACDF, prLVT}, // Lo [27] HANGUL SYLLABLE GYEG..HANGUL SYLLABLE GYEH + {0xACE0, 0xACE0, prLV}, // Lo HANGUL SYLLABLE GO + {0xACE1, 0xACFB, prLVT}, // Lo [27] HANGUL SYLLABLE GOG..HANGUL SYLLABLE GOH + {0xACFC, 0xACFC, prLV}, // Lo HANGUL SYLLABLE GWA + {0xACFD, 0xAD17, prLVT}, // Lo [27] HANGUL SYLLABLE GWAG..HANGUL SYLLABLE GWAH + {0xAD18, 0xAD18, prLV}, // Lo HANGUL SYLLABLE GWAE + {0xAD19, 0xAD33, prLVT}, // Lo [27] HANGUL SYLLABLE GWAEG..HANGUL SYLLABLE GWAEH + {0xAD34, 0xAD34, prLV}, // Lo HANGUL SYLLABLE GOE + {0xAD35, 0xAD4F, prLVT}, // Lo [27] HANGUL SYLLABLE GOEG..HANGUL SYLLABLE GOEH + {0xAD50, 0xAD50, prLV}, // Lo HANGUL SYLLABLE GYO + {0xAD51, 0xAD6B, prLVT}, // Lo [27] HANGUL SYLLABLE GYOG..HANGUL SYLLABLE GYOH + {0xAD6C, 0xAD6C, prLV}, // Lo HANGUL SYLLABLE GU + {0xAD6D, 0xAD87, prLVT}, // Lo [27] HANGUL SYLLABLE GUG..HANGUL SYLLABLE GUH + {0xAD88, 0xAD88, prLV}, // Lo HANGUL SYLLABLE GWEO + {0xAD89, 0xADA3, prLVT}, // Lo [27] HANGUL SYLLABLE GWEOG..HANGUL SYLLABLE GWEOH + {0xADA4, 0xADA4, prLV}, // Lo HANGUL SYLLABLE GWE + {0xADA5, 0xADBF, prLVT}, // Lo [27] HANGUL SYLLABLE GWEG..HANGUL SYLLABLE GWEH + {0xADC0, 0xADC0, prLV}, // Lo HANGUL SYLLABLE GWI + {0xADC1, 0xADDB, prLVT}, // Lo [27] HANGUL SYLLABLE GWIG..HANGUL SYLLABLE GWIH + {0xADDC, 0xADDC, prLV}, // Lo HANGUL SYLLABLE GYU + {0xADDD, 0xADF7, prLVT}, // Lo [27] HANGUL SYLLABLE GYUG..HANGUL SYLLABLE GYUH + {0xADF8, 0xADF8, prLV}, // Lo HANGUL SYLLABLE GEU + {0xADF9, 0xAE13, prLVT}, // Lo [27] HANGUL SYLLABLE GEUG..HANGUL SYLLABLE GEUH + {0xAE14, 0xAE14, prLV}, // Lo HANGUL SYLLABLE GYI + {0xAE15, 0xAE2F, prLVT}, // Lo [27] HANGUL SYLLABLE GYIG..HANGUL SYLLABLE GYIH + {0xAE30, 0xAE30, prLV}, // Lo HANGUL SYLLABLE GI + {0xAE31, 0xAE4B, prLVT}, // Lo [27] HANGUL SYLLABLE GIG..HANGUL SYLLABLE GIH + {0xAE4C, 0xAE4C, prLV}, // Lo HANGUL SYLLABLE GGA + {0xAE4D, 0xAE67, prLVT}, // Lo [27] HANGUL SYLLABLE GGAG..HANGUL SYLLABLE GGAH + {0xAE68, 0xAE68, prLV}, // Lo HANGUL SYLLABLE GGAE + {0xAE69, 0xAE83, prLVT}, // Lo [27] HANGUL SYLLABLE GGAEG..HANGUL SYLLABLE GGAEH + {0xAE84, 0xAE84, prLV}, // Lo HANGUL SYLLABLE GGYA + {0xAE85, 0xAE9F, prLVT}, // Lo [27] HANGUL SYLLABLE GGYAG..HANGUL SYLLABLE GGYAH + {0xAEA0, 0xAEA0, prLV}, // Lo HANGUL SYLLABLE GGYAE + {0xAEA1, 0xAEBB, prLVT}, // Lo [27] HANGUL SYLLABLE GGYAEG..HANGUL SYLLABLE GGYAEH + {0xAEBC, 0xAEBC, prLV}, // Lo HANGUL SYLLABLE GGEO + {0xAEBD, 0xAED7, prLVT}, // Lo [27] HANGUL SYLLABLE GGEOG..HANGUL SYLLABLE GGEOH + {0xAED8, 0xAED8, prLV}, // Lo HANGUL SYLLABLE GGE + {0xAED9, 0xAEF3, prLVT}, // Lo [27] HANGUL SYLLABLE GGEG..HANGUL SYLLABLE GGEH + {0xAEF4, 0xAEF4, prLV}, // Lo HANGUL SYLLABLE GGYEO + {0xAEF5, 0xAF0F, prLVT}, // Lo [27] HANGUL SYLLABLE GGYEOG..HANGUL SYLLABLE GGYEOH + {0xAF10, 0xAF10, prLV}, // Lo HANGUL SYLLABLE GGYE + {0xAF11, 0xAF2B, prLVT}, // Lo [27] HANGUL SYLLABLE GGYEG..HANGUL SYLLABLE GGYEH + {0xAF2C, 0xAF2C, prLV}, // Lo HANGUL SYLLABLE GGO + {0xAF2D, 0xAF47, prLVT}, // Lo [27] HANGUL SYLLABLE GGOG..HANGUL SYLLABLE GGOH + {0xAF48, 0xAF48, prLV}, // Lo HANGUL SYLLABLE GGWA + {0xAF49, 0xAF63, prLVT}, // Lo [27] HANGUL SYLLABLE GGWAG..HANGUL SYLLABLE GGWAH + {0xAF64, 0xAF64, prLV}, // Lo HANGUL SYLLABLE GGWAE + {0xAF65, 0xAF7F, prLVT}, // Lo [27] HANGUL SYLLABLE GGWAEG..HANGUL SYLLABLE GGWAEH + {0xAF80, 0xAF80, prLV}, // Lo HANGUL SYLLABLE GGOE + {0xAF81, 0xAF9B, prLVT}, // Lo [27] HANGUL SYLLABLE GGOEG..HANGUL SYLLABLE GGOEH + {0xAF9C, 0xAF9C, prLV}, // Lo HANGUL SYLLABLE GGYO + {0xAF9D, 0xAFB7, prLVT}, // Lo [27] HANGUL SYLLABLE GGYOG..HANGUL SYLLABLE GGYOH + {0xAFB8, 0xAFB8, prLV}, // Lo HANGUL SYLLABLE GGU + {0xAFB9, 0xAFD3, prLVT}, // Lo [27] HANGUL SYLLABLE GGUG..HANGUL SYLLABLE GGUH + {0xAFD4, 0xAFD4, prLV}, // Lo HANGUL SYLLABLE GGWEO + {0xAFD5, 0xAFEF, prLVT}, // Lo [27] HANGUL SYLLABLE GGWEOG..HANGUL SYLLABLE GGWEOH + {0xAFF0, 0xAFF0, prLV}, // Lo HANGUL SYLLABLE GGWE + {0xAFF1, 0xB00B, prLVT}, // Lo [27] HANGUL SYLLABLE GGWEG..HANGUL SYLLABLE GGWEH + {0xB00C, 0xB00C, prLV}, // Lo HANGUL SYLLABLE GGWI + {0xB00D, 0xB027, prLVT}, // Lo [27] HANGUL SYLLABLE GGWIG..HANGUL SYLLABLE GGWIH + {0xB028, 0xB028, prLV}, // Lo HANGUL SYLLABLE GGYU + {0xB029, 0xB043, prLVT}, // Lo [27] HANGUL SYLLABLE GGYUG..HANGUL SYLLABLE GGYUH + {0xB044, 0xB044, prLV}, // Lo HANGUL SYLLABLE GGEU + {0xB045, 0xB05F, prLVT}, // Lo [27] HANGUL SYLLABLE GGEUG..HANGUL SYLLABLE GGEUH + {0xB060, 0xB060, prLV}, // Lo HANGUL SYLLABLE GGYI + {0xB061, 0xB07B, prLVT}, // Lo [27] HANGUL SYLLABLE GGYIG..HANGUL SYLLABLE GGYIH + {0xB07C, 0xB07C, prLV}, // Lo HANGUL SYLLABLE GGI + {0xB07D, 0xB097, prLVT}, // Lo [27] HANGUL SYLLABLE GGIG..HANGUL SYLLABLE GGIH + {0xB098, 0xB098, prLV}, // Lo HANGUL SYLLABLE NA + {0xB099, 0xB0B3, prLVT}, // Lo [27] HANGUL SYLLABLE NAG..HANGUL SYLLABLE NAH + {0xB0B4, 0xB0B4, prLV}, // Lo HANGUL SYLLABLE NAE + {0xB0B5, 0xB0CF, prLVT}, // Lo [27] HANGUL SYLLABLE NAEG..HANGUL SYLLABLE NAEH + {0xB0D0, 0xB0D0, prLV}, // Lo HANGUL SYLLABLE NYA + {0xB0D1, 0xB0EB, prLVT}, // Lo [27] HANGUL SYLLABLE NYAG..HANGUL SYLLABLE NYAH + {0xB0EC, 0xB0EC, prLV}, // Lo HANGUL SYLLABLE NYAE + {0xB0ED, 0xB107, prLVT}, // Lo [27] HANGUL SYLLABLE NYAEG..HANGUL SYLLABLE NYAEH + {0xB108, 0xB108, prLV}, // Lo HANGUL SYLLABLE NEO + {0xB109, 0xB123, prLVT}, // Lo [27] HANGUL SYLLABLE NEOG..HANGUL SYLLABLE NEOH + {0xB124, 0xB124, prLV}, // Lo HANGUL SYLLABLE NE + {0xB125, 0xB13F, prLVT}, // Lo [27] HANGUL SYLLABLE NEG..HANGUL SYLLABLE NEH + {0xB140, 0xB140, prLV}, // Lo HANGUL SYLLABLE NYEO + {0xB141, 0xB15B, prLVT}, // Lo [27] HANGUL SYLLABLE NYEOG..HANGUL SYLLABLE NYEOH + {0xB15C, 0xB15C, prLV}, // Lo HANGUL SYLLABLE NYE + {0xB15D, 0xB177, prLVT}, // Lo [27] HANGUL SYLLABLE NYEG..HANGUL SYLLABLE NYEH + {0xB178, 0xB178, prLV}, // Lo HANGUL SYLLABLE NO + {0xB179, 0xB193, prLVT}, // Lo [27] HANGUL SYLLABLE NOG..HANGUL SYLLABLE NOH + {0xB194, 0xB194, prLV}, // Lo HANGUL SYLLABLE NWA + {0xB195, 0xB1AF, prLVT}, // Lo [27] HANGUL SYLLABLE NWAG..HANGUL SYLLABLE NWAH + {0xB1B0, 0xB1B0, prLV}, // Lo HANGUL SYLLABLE NWAE + {0xB1B1, 0xB1CB, prLVT}, // Lo [27] HANGUL SYLLABLE NWAEG..HANGUL SYLLABLE NWAEH + {0xB1CC, 0xB1CC, prLV}, // Lo HANGUL SYLLABLE NOE + {0xB1CD, 0xB1E7, prLVT}, // Lo [27] HANGUL SYLLABLE NOEG..HANGUL SYLLABLE NOEH + {0xB1E8, 0xB1E8, prLV}, // Lo HANGUL SYLLABLE NYO + {0xB1E9, 0xB203, prLVT}, // Lo [27] HANGUL SYLLABLE NYOG..HANGUL SYLLABLE NYOH + {0xB204, 0xB204, prLV}, // Lo HANGUL SYLLABLE NU + {0xB205, 0xB21F, prLVT}, // Lo [27] HANGUL SYLLABLE NUG..HANGUL SYLLABLE NUH + {0xB220, 0xB220, prLV}, // Lo HANGUL SYLLABLE NWEO + {0xB221, 0xB23B, prLVT}, // Lo [27] HANGUL SYLLABLE NWEOG..HANGUL SYLLABLE NWEOH + {0xB23C, 0xB23C, prLV}, // Lo HANGUL SYLLABLE NWE + {0xB23D, 0xB257, prLVT}, // Lo [27] HANGUL SYLLABLE NWEG..HANGUL SYLLABLE NWEH + {0xB258, 0xB258, prLV}, // Lo HANGUL SYLLABLE NWI + {0xB259, 0xB273, prLVT}, // Lo [27] HANGUL SYLLABLE NWIG..HANGUL SYLLABLE NWIH + {0xB274, 0xB274, prLV}, // Lo HANGUL SYLLABLE NYU + {0xB275, 0xB28F, prLVT}, // Lo [27] HANGUL SYLLABLE NYUG..HANGUL SYLLABLE NYUH + {0xB290, 0xB290, prLV}, // Lo HANGUL SYLLABLE NEU + {0xB291, 0xB2AB, prLVT}, // Lo [27] HANGUL SYLLABLE NEUG..HANGUL SYLLABLE NEUH + {0xB2AC, 0xB2AC, prLV}, // Lo HANGUL SYLLABLE NYI + {0xB2AD, 0xB2C7, prLVT}, // Lo [27] HANGUL SYLLABLE NYIG..HANGUL SYLLABLE NYIH + {0xB2C8, 0xB2C8, prLV}, // Lo HANGUL SYLLABLE NI + {0xB2C9, 0xB2E3, prLVT}, // Lo [27] HANGUL SYLLABLE NIG..HANGUL SYLLABLE NIH + {0xB2E4, 0xB2E4, prLV}, // Lo HANGUL SYLLABLE DA + {0xB2E5, 0xB2FF, prLVT}, // Lo [27] HANGUL SYLLABLE DAG..HANGUL SYLLABLE DAH + {0xB300, 0xB300, prLV}, // Lo HANGUL SYLLABLE DAE + {0xB301, 0xB31B, prLVT}, // Lo [27] HANGUL SYLLABLE DAEG..HANGUL SYLLABLE DAEH + {0xB31C, 0xB31C, prLV}, // Lo HANGUL SYLLABLE DYA + {0xB31D, 0xB337, prLVT}, // Lo [27] HANGUL SYLLABLE DYAG..HANGUL SYLLABLE DYAH + {0xB338, 0xB338, prLV}, // Lo HANGUL SYLLABLE DYAE + {0xB339, 0xB353, prLVT}, // Lo [27] HANGUL SYLLABLE DYAEG..HANGUL SYLLABLE DYAEH + {0xB354, 0xB354, prLV}, // Lo HANGUL SYLLABLE DEO + {0xB355, 0xB36F, prLVT}, // Lo [27] HANGUL SYLLABLE DEOG..HANGUL SYLLABLE DEOH + {0xB370, 0xB370, prLV}, // Lo HANGUL SYLLABLE DE + {0xB371, 0xB38B, prLVT}, // Lo [27] HANGUL SYLLABLE DEG..HANGUL SYLLABLE DEH + {0xB38C, 0xB38C, prLV}, // Lo HANGUL SYLLABLE DYEO + {0xB38D, 0xB3A7, prLVT}, // Lo [27] HANGUL SYLLABLE DYEOG..HANGUL SYLLABLE DYEOH + {0xB3A8, 0xB3A8, prLV}, // Lo HANGUL SYLLABLE DYE + {0xB3A9, 0xB3C3, prLVT}, // Lo [27] HANGUL SYLLABLE DYEG..HANGUL SYLLABLE DYEH + {0xB3C4, 0xB3C4, prLV}, // Lo HANGUL SYLLABLE DO + {0xB3C5, 0xB3DF, prLVT}, // Lo [27] HANGUL SYLLABLE DOG..HANGUL SYLLABLE DOH + {0xB3E0, 0xB3E0, prLV}, // Lo HANGUL SYLLABLE DWA + {0xB3E1, 0xB3FB, prLVT}, // Lo [27] HANGUL SYLLABLE DWAG..HANGUL SYLLABLE DWAH + {0xB3FC, 0xB3FC, prLV}, // Lo HANGUL SYLLABLE DWAE + {0xB3FD, 0xB417, prLVT}, // Lo [27] HANGUL SYLLABLE DWAEG..HANGUL SYLLABLE DWAEH + {0xB418, 0xB418, prLV}, // Lo HANGUL SYLLABLE DOE + {0xB419, 0xB433, prLVT}, // Lo [27] HANGUL SYLLABLE DOEG..HANGUL SYLLABLE DOEH + {0xB434, 0xB434, prLV}, // Lo HANGUL SYLLABLE DYO + {0xB435, 0xB44F, prLVT}, // Lo [27] HANGUL SYLLABLE DYOG..HANGUL SYLLABLE DYOH + {0xB450, 0xB450, prLV}, // Lo HANGUL SYLLABLE DU + {0xB451, 0xB46B, prLVT}, // Lo [27] HANGUL SYLLABLE DUG..HANGUL SYLLABLE DUH + {0xB46C, 0xB46C, prLV}, // Lo HANGUL SYLLABLE DWEO + {0xB46D, 0xB487, prLVT}, // Lo [27] HANGUL SYLLABLE DWEOG..HANGUL SYLLABLE DWEOH + {0xB488, 0xB488, prLV}, // Lo HANGUL SYLLABLE DWE + {0xB489, 0xB4A3, prLVT}, // Lo [27] HANGUL SYLLABLE DWEG..HANGUL SYLLABLE DWEH + {0xB4A4, 0xB4A4, prLV}, // Lo HANGUL SYLLABLE DWI + {0xB4A5, 0xB4BF, prLVT}, // Lo [27] HANGUL SYLLABLE DWIG..HANGUL SYLLABLE DWIH + {0xB4C0, 0xB4C0, prLV}, // Lo HANGUL SYLLABLE DYU + {0xB4C1, 0xB4DB, prLVT}, // Lo [27] HANGUL SYLLABLE DYUG..HANGUL SYLLABLE DYUH + {0xB4DC, 0xB4DC, prLV}, // Lo HANGUL SYLLABLE DEU + {0xB4DD, 0xB4F7, prLVT}, // Lo [27] HANGUL SYLLABLE DEUG..HANGUL SYLLABLE DEUH + {0xB4F8, 0xB4F8, prLV}, // Lo HANGUL SYLLABLE DYI + {0xB4F9, 0xB513, prLVT}, // Lo [27] HANGUL SYLLABLE DYIG..HANGUL SYLLABLE DYIH + {0xB514, 0xB514, prLV}, // Lo HANGUL SYLLABLE DI + {0xB515, 0xB52F, prLVT}, // Lo [27] HANGUL SYLLABLE DIG..HANGUL SYLLABLE DIH + {0xB530, 0xB530, prLV}, // Lo HANGUL SYLLABLE DDA + {0xB531, 0xB54B, prLVT}, // Lo [27] HANGUL SYLLABLE DDAG..HANGUL SYLLABLE DDAH + {0xB54C, 0xB54C, prLV}, // Lo HANGUL SYLLABLE DDAE + {0xB54D, 0xB567, prLVT}, // Lo [27] HANGUL SYLLABLE DDAEG..HANGUL SYLLABLE DDAEH + {0xB568, 0xB568, prLV}, // Lo HANGUL SYLLABLE DDYA + {0xB569, 0xB583, prLVT}, // Lo [27] HANGUL SYLLABLE DDYAG..HANGUL SYLLABLE DDYAH + {0xB584, 0xB584, prLV}, // Lo HANGUL SYLLABLE DDYAE + {0xB585, 0xB59F, prLVT}, // Lo [27] HANGUL SYLLABLE DDYAEG..HANGUL SYLLABLE DDYAEH + {0xB5A0, 0xB5A0, prLV}, // Lo HANGUL SYLLABLE DDEO + {0xB5A1, 0xB5BB, prLVT}, // Lo [27] HANGUL SYLLABLE DDEOG..HANGUL SYLLABLE DDEOH + {0xB5BC, 0xB5BC, prLV}, // Lo HANGUL SYLLABLE DDE + {0xB5BD, 0xB5D7, prLVT}, // Lo [27] HANGUL SYLLABLE DDEG..HANGUL SYLLABLE DDEH + {0xB5D8, 0xB5D8, prLV}, // Lo HANGUL SYLLABLE DDYEO + {0xB5D9, 0xB5F3, prLVT}, // Lo [27] HANGUL SYLLABLE DDYEOG..HANGUL SYLLABLE DDYEOH + {0xB5F4, 0xB5F4, prLV}, // Lo HANGUL SYLLABLE DDYE + {0xB5F5, 0xB60F, prLVT}, // Lo [27] HANGUL SYLLABLE DDYEG..HANGUL SYLLABLE DDYEH + {0xB610, 0xB610, prLV}, // Lo HANGUL SYLLABLE DDO + {0xB611, 0xB62B, prLVT}, // Lo [27] HANGUL SYLLABLE DDOG..HANGUL SYLLABLE DDOH + {0xB62C, 0xB62C, prLV}, // Lo HANGUL SYLLABLE DDWA + {0xB62D, 0xB647, prLVT}, // Lo [27] HANGUL SYLLABLE DDWAG..HANGUL SYLLABLE DDWAH + {0xB648, 0xB648, prLV}, // Lo HANGUL SYLLABLE DDWAE + {0xB649, 0xB663, prLVT}, // Lo [27] HANGUL SYLLABLE DDWAEG..HANGUL SYLLABLE DDWAEH + {0xB664, 0xB664, prLV}, // Lo HANGUL SYLLABLE DDOE + {0xB665, 0xB67F, prLVT}, // Lo [27] HANGUL SYLLABLE DDOEG..HANGUL SYLLABLE DDOEH + {0xB680, 0xB680, prLV}, // Lo HANGUL SYLLABLE DDYO + {0xB681, 0xB69B, prLVT}, // Lo [27] HANGUL SYLLABLE DDYOG..HANGUL SYLLABLE DDYOH + {0xB69C, 0xB69C, prLV}, // Lo HANGUL SYLLABLE DDU + {0xB69D, 0xB6B7, prLVT}, // Lo [27] HANGUL SYLLABLE DDUG..HANGUL SYLLABLE DDUH + {0xB6B8, 0xB6B8, prLV}, // Lo HANGUL SYLLABLE DDWEO + {0xB6B9, 0xB6D3, prLVT}, // Lo [27] HANGUL SYLLABLE DDWEOG..HANGUL SYLLABLE DDWEOH + {0xB6D4, 0xB6D4, prLV}, // Lo HANGUL SYLLABLE DDWE + {0xB6D5, 0xB6EF, prLVT}, // Lo [27] HANGUL SYLLABLE DDWEG..HANGUL SYLLABLE DDWEH + {0xB6F0, 0xB6F0, prLV}, // Lo HANGUL SYLLABLE DDWI + {0xB6F1, 0xB70B, prLVT}, // Lo [27] HANGUL SYLLABLE DDWIG..HANGUL SYLLABLE DDWIH + {0xB70C, 0xB70C, prLV}, // Lo HANGUL SYLLABLE DDYU + {0xB70D, 0xB727, prLVT}, // Lo [27] HANGUL SYLLABLE DDYUG..HANGUL SYLLABLE DDYUH + {0xB728, 0xB728, prLV}, // Lo HANGUL SYLLABLE DDEU + {0xB729, 0xB743, prLVT}, // Lo [27] HANGUL SYLLABLE DDEUG..HANGUL SYLLABLE DDEUH + {0xB744, 0xB744, prLV}, // Lo HANGUL SYLLABLE DDYI + {0xB745, 0xB75F, prLVT}, // Lo [27] HANGUL SYLLABLE DDYIG..HANGUL SYLLABLE DDYIH + {0xB760, 0xB760, prLV}, // Lo HANGUL SYLLABLE DDI + {0xB761, 0xB77B, prLVT}, // Lo [27] HANGUL SYLLABLE DDIG..HANGUL SYLLABLE DDIH + {0xB77C, 0xB77C, prLV}, // Lo HANGUL SYLLABLE RA + {0xB77D, 0xB797, prLVT}, // Lo [27] HANGUL SYLLABLE RAG..HANGUL SYLLABLE RAH + {0xB798, 0xB798, prLV}, // Lo HANGUL SYLLABLE RAE + {0xB799, 0xB7B3, prLVT}, // Lo [27] HANGUL SYLLABLE RAEG..HANGUL SYLLABLE RAEH + {0xB7B4, 0xB7B4, prLV}, // Lo HANGUL SYLLABLE RYA + {0xB7B5, 0xB7CF, prLVT}, // Lo [27] HANGUL SYLLABLE RYAG..HANGUL SYLLABLE RYAH + {0xB7D0, 0xB7D0, prLV}, // Lo HANGUL SYLLABLE RYAE + {0xB7D1, 0xB7EB, prLVT}, // Lo [27] HANGUL SYLLABLE RYAEG..HANGUL SYLLABLE RYAEH + {0xB7EC, 0xB7EC, prLV}, // Lo HANGUL SYLLABLE REO + {0xB7ED, 0xB807, prLVT}, // Lo [27] HANGUL SYLLABLE REOG..HANGUL SYLLABLE REOH + {0xB808, 0xB808, prLV}, // Lo HANGUL SYLLABLE RE + {0xB809, 0xB823, prLVT}, // Lo [27] HANGUL SYLLABLE REG..HANGUL SYLLABLE REH + {0xB824, 0xB824, prLV}, // Lo HANGUL SYLLABLE RYEO + {0xB825, 0xB83F, prLVT}, // Lo [27] HANGUL SYLLABLE RYEOG..HANGUL SYLLABLE RYEOH + {0xB840, 0xB840, prLV}, // Lo HANGUL SYLLABLE RYE + {0xB841, 0xB85B, prLVT}, // Lo [27] HANGUL SYLLABLE RYEG..HANGUL SYLLABLE RYEH + {0xB85C, 0xB85C, prLV}, // Lo HANGUL SYLLABLE RO + {0xB85D, 0xB877, prLVT}, // Lo [27] HANGUL SYLLABLE ROG..HANGUL SYLLABLE ROH + {0xB878, 0xB878, prLV}, // Lo HANGUL SYLLABLE RWA + {0xB879, 0xB893, prLVT}, // Lo [27] HANGUL SYLLABLE RWAG..HANGUL SYLLABLE RWAH + {0xB894, 0xB894, prLV}, // Lo HANGUL SYLLABLE RWAE + {0xB895, 0xB8AF, prLVT}, // Lo [27] HANGUL SYLLABLE RWAEG..HANGUL SYLLABLE RWAEH + {0xB8B0, 0xB8B0, prLV}, // Lo HANGUL SYLLABLE ROE + {0xB8B1, 0xB8CB, prLVT}, // Lo [27] HANGUL SYLLABLE ROEG..HANGUL SYLLABLE ROEH + {0xB8CC, 0xB8CC, prLV}, // Lo HANGUL SYLLABLE RYO + {0xB8CD, 0xB8E7, prLVT}, // Lo [27] HANGUL SYLLABLE RYOG..HANGUL SYLLABLE RYOH + {0xB8E8, 0xB8E8, prLV}, // Lo HANGUL SYLLABLE RU + {0xB8E9, 0xB903, prLVT}, // Lo [27] HANGUL SYLLABLE RUG..HANGUL SYLLABLE RUH + {0xB904, 0xB904, prLV}, // Lo HANGUL SYLLABLE RWEO + {0xB905, 0xB91F, prLVT}, // Lo [27] HANGUL SYLLABLE RWEOG..HANGUL SYLLABLE RWEOH + {0xB920, 0xB920, prLV}, // Lo HANGUL SYLLABLE RWE + {0xB921, 0xB93B, prLVT}, // Lo [27] HANGUL SYLLABLE RWEG..HANGUL SYLLABLE RWEH + {0xB93C, 0xB93C, prLV}, // Lo HANGUL SYLLABLE RWI + {0xB93D, 0xB957, prLVT}, // Lo [27] HANGUL SYLLABLE RWIG..HANGUL SYLLABLE RWIH + {0xB958, 0xB958, prLV}, // Lo HANGUL SYLLABLE RYU + {0xB959, 0xB973, prLVT}, // Lo [27] HANGUL SYLLABLE RYUG..HANGUL SYLLABLE RYUH + {0xB974, 0xB974, prLV}, // Lo HANGUL SYLLABLE REU + {0xB975, 0xB98F, prLVT}, // Lo [27] HANGUL SYLLABLE REUG..HANGUL SYLLABLE REUH + {0xB990, 0xB990, prLV}, // Lo HANGUL SYLLABLE RYI + {0xB991, 0xB9AB, prLVT}, // Lo [27] HANGUL SYLLABLE RYIG..HANGUL SYLLABLE RYIH + {0xB9AC, 0xB9AC, prLV}, // Lo HANGUL SYLLABLE RI + {0xB9AD, 0xB9C7, prLVT}, // Lo [27] HANGUL SYLLABLE RIG..HANGUL SYLLABLE RIH + {0xB9C8, 0xB9C8, prLV}, // Lo HANGUL SYLLABLE MA + {0xB9C9, 0xB9E3, prLVT}, // Lo [27] HANGUL SYLLABLE MAG..HANGUL SYLLABLE MAH + {0xB9E4, 0xB9E4, prLV}, // Lo HANGUL SYLLABLE MAE + {0xB9E5, 0xB9FF, prLVT}, // Lo [27] HANGUL SYLLABLE MAEG..HANGUL SYLLABLE MAEH + {0xBA00, 0xBA00, prLV}, // Lo HANGUL SYLLABLE MYA + {0xBA01, 0xBA1B, prLVT}, // Lo [27] HANGUL SYLLABLE MYAG..HANGUL SYLLABLE MYAH + {0xBA1C, 0xBA1C, prLV}, // Lo HANGUL SYLLABLE MYAE + {0xBA1D, 0xBA37, prLVT}, // Lo [27] HANGUL SYLLABLE MYAEG..HANGUL SYLLABLE MYAEH + {0xBA38, 0xBA38, prLV}, // Lo HANGUL SYLLABLE MEO + {0xBA39, 0xBA53, prLVT}, // Lo [27] HANGUL SYLLABLE MEOG..HANGUL SYLLABLE MEOH + {0xBA54, 0xBA54, prLV}, // Lo HANGUL SYLLABLE ME + {0xBA55, 0xBA6F, prLVT}, // Lo [27] HANGUL SYLLABLE MEG..HANGUL SYLLABLE MEH + {0xBA70, 0xBA70, prLV}, // Lo HANGUL SYLLABLE MYEO + {0xBA71, 0xBA8B, prLVT}, // Lo [27] HANGUL SYLLABLE MYEOG..HANGUL SYLLABLE MYEOH + {0xBA8C, 0xBA8C, prLV}, // Lo HANGUL SYLLABLE MYE + {0xBA8D, 0xBAA7, prLVT}, // Lo [27] HANGUL SYLLABLE MYEG..HANGUL SYLLABLE MYEH + {0xBAA8, 0xBAA8, prLV}, // Lo HANGUL SYLLABLE MO + {0xBAA9, 0xBAC3, prLVT}, // Lo [27] HANGUL SYLLABLE MOG..HANGUL SYLLABLE MOH + {0xBAC4, 0xBAC4, prLV}, // Lo HANGUL SYLLABLE MWA + {0xBAC5, 0xBADF, prLVT}, // Lo [27] HANGUL SYLLABLE MWAG..HANGUL SYLLABLE MWAH + {0xBAE0, 0xBAE0, prLV}, // Lo HANGUL SYLLABLE MWAE + {0xBAE1, 0xBAFB, prLVT}, // Lo [27] HANGUL SYLLABLE MWAEG..HANGUL SYLLABLE MWAEH + {0xBAFC, 0xBAFC, prLV}, // Lo HANGUL SYLLABLE MOE + {0xBAFD, 0xBB17, prLVT}, // Lo [27] HANGUL SYLLABLE MOEG..HANGUL SYLLABLE MOEH + {0xBB18, 0xBB18, prLV}, // Lo HANGUL SYLLABLE MYO + {0xBB19, 0xBB33, prLVT}, // Lo [27] HANGUL SYLLABLE MYOG..HANGUL SYLLABLE MYOH + {0xBB34, 0xBB34, prLV}, // Lo HANGUL SYLLABLE MU + {0xBB35, 0xBB4F, prLVT}, // Lo [27] HANGUL SYLLABLE MUG..HANGUL SYLLABLE MUH + {0xBB50, 0xBB50, prLV}, // Lo HANGUL SYLLABLE MWEO + {0xBB51, 0xBB6B, prLVT}, // Lo [27] HANGUL SYLLABLE MWEOG..HANGUL SYLLABLE MWEOH + {0xBB6C, 0xBB6C, prLV}, // Lo HANGUL SYLLABLE MWE + {0xBB6D, 0xBB87, prLVT}, // Lo [27] HANGUL SYLLABLE MWEG..HANGUL SYLLABLE MWEH + {0xBB88, 0xBB88, prLV}, // Lo HANGUL SYLLABLE MWI + {0xBB89, 0xBBA3, prLVT}, // Lo [27] HANGUL SYLLABLE MWIG..HANGUL SYLLABLE MWIH + {0xBBA4, 0xBBA4, prLV}, // Lo HANGUL SYLLABLE MYU + {0xBBA5, 0xBBBF, prLVT}, // Lo [27] HANGUL SYLLABLE MYUG..HANGUL SYLLABLE MYUH + {0xBBC0, 0xBBC0, prLV}, // Lo HANGUL SYLLABLE MEU + {0xBBC1, 0xBBDB, prLVT}, // Lo [27] HANGUL SYLLABLE MEUG..HANGUL SYLLABLE MEUH + {0xBBDC, 0xBBDC, prLV}, // Lo HANGUL SYLLABLE MYI + {0xBBDD, 0xBBF7, prLVT}, // Lo [27] HANGUL SYLLABLE MYIG..HANGUL SYLLABLE MYIH + {0xBBF8, 0xBBF8, prLV}, // Lo HANGUL SYLLABLE MI + {0xBBF9, 0xBC13, prLVT}, // Lo [27] HANGUL SYLLABLE MIG..HANGUL SYLLABLE MIH + {0xBC14, 0xBC14, prLV}, // Lo HANGUL SYLLABLE BA + {0xBC15, 0xBC2F, prLVT}, // Lo [27] HANGUL SYLLABLE BAG..HANGUL SYLLABLE BAH + {0xBC30, 0xBC30, prLV}, // Lo HANGUL SYLLABLE BAE + {0xBC31, 0xBC4B, prLVT}, // Lo [27] HANGUL SYLLABLE BAEG..HANGUL SYLLABLE BAEH + {0xBC4C, 0xBC4C, prLV}, // Lo HANGUL SYLLABLE BYA + {0xBC4D, 0xBC67, prLVT}, // Lo [27] HANGUL SYLLABLE BYAG..HANGUL SYLLABLE BYAH + {0xBC68, 0xBC68, prLV}, // Lo HANGUL SYLLABLE BYAE + {0xBC69, 0xBC83, prLVT}, // Lo [27] HANGUL SYLLABLE BYAEG..HANGUL SYLLABLE BYAEH + {0xBC84, 0xBC84, prLV}, // Lo HANGUL SYLLABLE BEO + {0xBC85, 0xBC9F, prLVT}, // Lo [27] HANGUL SYLLABLE BEOG..HANGUL SYLLABLE BEOH + {0xBCA0, 0xBCA0, prLV}, // Lo HANGUL SYLLABLE BE + {0xBCA1, 0xBCBB, prLVT}, // Lo [27] HANGUL SYLLABLE BEG..HANGUL SYLLABLE BEH + {0xBCBC, 0xBCBC, prLV}, // Lo HANGUL SYLLABLE BYEO + {0xBCBD, 0xBCD7, prLVT}, // Lo [27] HANGUL SYLLABLE BYEOG..HANGUL SYLLABLE BYEOH + {0xBCD8, 0xBCD8, prLV}, // Lo HANGUL SYLLABLE BYE + {0xBCD9, 0xBCF3, prLVT}, // Lo [27] HANGUL SYLLABLE BYEG..HANGUL SYLLABLE BYEH + {0xBCF4, 0xBCF4, prLV}, // Lo HANGUL SYLLABLE BO + {0xBCF5, 0xBD0F, prLVT}, // Lo [27] HANGUL SYLLABLE BOG..HANGUL SYLLABLE BOH + {0xBD10, 0xBD10, prLV}, // Lo HANGUL SYLLABLE BWA + {0xBD11, 0xBD2B, prLVT}, // Lo [27] HANGUL SYLLABLE BWAG..HANGUL SYLLABLE BWAH + {0xBD2C, 0xBD2C, prLV}, // Lo HANGUL SYLLABLE BWAE + {0xBD2D, 0xBD47, prLVT}, // Lo [27] HANGUL SYLLABLE BWAEG..HANGUL SYLLABLE BWAEH + {0xBD48, 0xBD48, prLV}, // Lo HANGUL SYLLABLE BOE + {0xBD49, 0xBD63, prLVT}, // Lo [27] HANGUL SYLLABLE BOEG..HANGUL SYLLABLE BOEH + {0xBD64, 0xBD64, prLV}, // Lo HANGUL SYLLABLE BYO + {0xBD65, 0xBD7F, prLVT}, // Lo [27] HANGUL SYLLABLE BYOG..HANGUL SYLLABLE BYOH + {0xBD80, 0xBD80, prLV}, // Lo HANGUL SYLLABLE BU + {0xBD81, 0xBD9B, prLVT}, // Lo [27] HANGUL SYLLABLE BUG..HANGUL SYLLABLE BUH + {0xBD9C, 0xBD9C, prLV}, // Lo HANGUL SYLLABLE BWEO + {0xBD9D, 0xBDB7, prLVT}, // Lo [27] HANGUL SYLLABLE BWEOG..HANGUL SYLLABLE BWEOH + {0xBDB8, 0xBDB8, prLV}, // Lo HANGUL SYLLABLE BWE + {0xBDB9, 0xBDD3, prLVT}, // Lo [27] HANGUL SYLLABLE BWEG..HANGUL SYLLABLE BWEH + {0xBDD4, 0xBDD4, prLV}, // Lo HANGUL SYLLABLE BWI + {0xBDD5, 0xBDEF, prLVT}, // Lo [27] HANGUL SYLLABLE BWIG..HANGUL SYLLABLE BWIH + {0xBDF0, 0xBDF0, prLV}, // Lo HANGUL SYLLABLE BYU + {0xBDF1, 0xBE0B, prLVT}, // Lo [27] HANGUL SYLLABLE BYUG..HANGUL SYLLABLE BYUH + {0xBE0C, 0xBE0C, prLV}, // Lo HANGUL SYLLABLE BEU + {0xBE0D, 0xBE27, prLVT}, // Lo [27] HANGUL SYLLABLE BEUG..HANGUL SYLLABLE BEUH + {0xBE28, 0xBE28, prLV}, // Lo HANGUL SYLLABLE BYI + {0xBE29, 0xBE43, prLVT}, // Lo [27] HANGUL SYLLABLE BYIG..HANGUL SYLLABLE BYIH + {0xBE44, 0xBE44, prLV}, // Lo HANGUL SYLLABLE BI + {0xBE45, 0xBE5F, prLVT}, // Lo [27] HANGUL SYLLABLE BIG..HANGUL SYLLABLE BIH + {0xBE60, 0xBE60, prLV}, // Lo HANGUL SYLLABLE BBA + {0xBE61, 0xBE7B, prLVT}, // Lo [27] HANGUL SYLLABLE BBAG..HANGUL SYLLABLE BBAH + {0xBE7C, 0xBE7C, prLV}, // Lo HANGUL SYLLABLE BBAE + {0xBE7D, 0xBE97, prLVT}, // Lo [27] HANGUL SYLLABLE BBAEG..HANGUL SYLLABLE BBAEH + {0xBE98, 0xBE98, prLV}, // Lo HANGUL SYLLABLE BBYA + {0xBE99, 0xBEB3, prLVT}, // Lo [27] HANGUL SYLLABLE BBYAG..HANGUL SYLLABLE BBYAH + {0xBEB4, 0xBEB4, prLV}, // Lo HANGUL SYLLABLE BBYAE + {0xBEB5, 0xBECF, prLVT}, // Lo [27] HANGUL SYLLABLE BBYAEG..HANGUL SYLLABLE BBYAEH + {0xBED0, 0xBED0, prLV}, // Lo HANGUL SYLLABLE BBEO + {0xBED1, 0xBEEB, prLVT}, // Lo [27] HANGUL SYLLABLE BBEOG..HANGUL SYLLABLE BBEOH + {0xBEEC, 0xBEEC, prLV}, // Lo HANGUL SYLLABLE BBE + {0xBEED, 0xBF07, prLVT}, // Lo [27] HANGUL SYLLABLE BBEG..HANGUL SYLLABLE BBEH + {0xBF08, 0xBF08, prLV}, // Lo HANGUL SYLLABLE BBYEO + {0xBF09, 0xBF23, prLVT}, // Lo [27] HANGUL SYLLABLE BBYEOG..HANGUL SYLLABLE BBYEOH + {0xBF24, 0xBF24, prLV}, // Lo HANGUL SYLLABLE BBYE + {0xBF25, 0xBF3F, prLVT}, // Lo [27] HANGUL SYLLABLE BBYEG..HANGUL SYLLABLE BBYEH + {0xBF40, 0xBF40, prLV}, // Lo HANGUL SYLLABLE BBO + {0xBF41, 0xBF5B, prLVT}, // Lo [27] HANGUL SYLLABLE BBOG..HANGUL SYLLABLE BBOH + {0xBF5C, 0xBF5C, prLV}, // Lo HANGUL SYLLABLE BBWA + {0xBF5D, 0xBF77, prLVT}, // Lo [27] HANGUL SYLLABLE BBWAG..HANGUL SYLLABLE BBWAH + {0xBF78, 0xBF78, prLV}, // Lo HANGUL SYLLABLE BBWAE + {0xBF79, 0xBF93, prLVT}, // Lo [27] HANGUL SYLLABLE BBWAEG..HANGUL SYLLABLE BBWAEH + {0xBF94, 0xBF94, prLV}, // Lo HANGUL SYLLABLE BBOE + {0xBF95, 0xBFAF, prLVT}, // Lo [27] HANGUL SYLLABLE BBOEG..HANGUL SYLLABLE BBOEH + {0xBFB0, 0xBFB0, prLV}, // Lo HANGUL SYLLABLE BBYO + {0xBFB1, 0xBFCB, prLVT}, // Lo [27] HANGUL SYLLABLE BBYOG..HANGUL SYLLABLE BBYOH + {0xBFCC, 0xBFCC, prLV}, // Lo HANGUL SYLLABLE BBU + {0xBFCD, 0xBFE7, prLVT}, // Lo [27] HANGUL SYLLABLE BBUG..HANGUL SYLLABLE BBUH + {0xBFE8, 0xBFE8, prLV}, // Lo HANGUL SYLLABLE BBWEO + {0xBFE9, 0xC003, prLVT}, // Lo [27] HANGUL SYLLABLE BBWEOG..HANGUL SYLLABLE BBWEOH + {0xC004, 0xC004, prLV}, // Lo HANGUL SYLLABLE BBWE + {0xC005, 0xC01F, prLVT}, // Lo [27] HANGUL SYLLABLE BBWEG..HANGUL SYLLABLE BBWEH + {0xC020, 0xC020, prLV}, // Lo HANGUL SYLLABLE BBWI + {0xC021, 0xC03B, prLVT}, // Lo [27] HANGUL SYLLABLE BBWIG..HANGUL SYLLABLE BBWIH + {0xC03C, 0xC03C, prLV}, // Lo HANGUL SYLLABLE BBYU + {0xC03D, 0xC057, prLVT}, // Lo [27] HANGUL SYLLABLE BBYUG..HANGUL SYLLABLE BBYUH + {0xC058, 0xC058, prLV}, // Lo HANGUL SYLLABLE BBEU + {0xC059, 0xC073, prLVT}, // Lo [27] HANGUL SYLLABLE BBEUG..HANGUL SYLLABLE BBEUH + {0xC074, 0xC074, prLV}, // Lo HANGUL SYLLABLE BBYI + {0xC075, 0xC08F, prLVT}, // Lo [27] HANGUL SYLLABLE BBYIG..HANGUL SYLLABLE BBYIH + {0xC090, 0xC090, prLV}, // Lo HANGUL SYLLABLE BBI + {0xC091, 0xC0AB, prLVT}, // Lo [27] HANGUL SYLLABLE BBIG..HANGUL SYLLABLE BBIH + {0xC0AC, 0xC0AC, prLV}, // Lo HANGUL SYLLABLE SA + {0xC0AD, 0xC0C7, prLVT}, // Lo [27] HANGUL SYLLABLE SAG..HANGUL SYLLABLE SAH + {0xC0C8, 0xC0C8, prLV}, // Lo HANGUL SYLLABLE SAE + {0xC0C9, 0xC0E3, prLVT}, // Lo [27] HANGUL SYLLABLE SAEG..HANGUL SYLLABLE SAEH + {0xC0E4, 0xC0E4, prLV}, // Lo HANGUL SYLLABLE SYA + {0xC0E5, 0xC0FF, prLVT}, // Lo [27] HANGUL SYLLABLE SYAG..HANGUL SYLLABLE SYAH + {0xC100, 0xC100, prLV}, // Lo HANGUL SYLLABLE SYAE + {0xC101, 0xC11B, prLVT}, // Lo [27] HANGUL SYLLABLE SYAEG..HANGUL SYLLABLE SYAEH + {0xC11C, 0xC11C, prLV}, // Lo HANGUL SYLLABLE SEO + {0xC11D, 0xC137, prLVT}, // Lo [27] HANGUL SYLLABLE SEOG..HANGUL SYLLABLE SEOH + {0xC138, 0xC138, prLV}, // Lo HANGUL SYLLABLE SE + {0xC139, 0xC153, prLVT}, // Lo [27] HANGUL SYLLABLE SEG..HANGUL SYLLABLE SEH + {0xC154, 0xC154, prLV}, // Lo HANGUL SYLLABLE SYEO + {0xC155, 0xC16F, prLVT}, // Lo [27] HANGUL SYLLABLE SYEOG..HANGUL SYLLABLE SYEOH + {0xC170, 0xC170, prLV}, // Lo HANGUL SYLLABLE SYE + {0xC171, 0xC18B, prLVT}, // Lo [27] HANGUL SYLLABLE SYEG..HANGUL SYLLABLE SYEH + {0xC18C, 0xC18C, prLV}, // Lo HANGUL SYLLABLE SO + {0xC18D, 0xC1A7, prLVT}, // Lo [27] HANGUL SYLLABLE SOG..HANGUL SYLLABLE SOH + {0xC1A8, 0xC1A8, prLV}, // Lo HANGUL SYLLABLE SWA + {0xC1A9, 0xC1C3, prLVT}, // Lo [27] HANGUL SYLLABLE SWAG..HANGUL SYLLABLE SWAH + {0xC1C4, 0xC1C4, prLV}, // Lo HANGUL SYLLABLE SWAE + {0xC1C5, 0xC1DF, prLVT}, // Lo [27] HANGUL SYLLABLE SWAEG..HANGUL SYLLABLE SWAEH + {0xC1E0, 0xC1E0, prLV}, // Lo HANGUL SYLLABLE SOE + {0xC1E1, 0xC1FB, prLVT}, // Lo [27] HANGUL SYLLABLE SOEG..HANGUL SYLLABLE SOEH + {0xC1FC, 0xC1FC, prLV}, // Lo HANGUL SYLLABLE SYO + {0xC1FD, 0xC217, prLVT}, // Lo [27] HANGUL SYLLABLE SYOG..HANGUL SYLLABLE SYOH + {0xC218, 0xC218, prLV}, // Lo HANGUL SYLLABLE SU + {0xC219, 0xC233, prLVT}, // Lo [27] HANGUL SYLLABLE SUG..HANGUL SYLLABLE SUH + {0xC234, 0xC234, prLV}, // Lo HANGUL SYLLABLE SWEO + {0xC235, 0xC24F, prLVT}, // Lo [27] HANGUL SYLLABLE SWEOG..HANGUL SYLLABLE SWEOH + {0xC250, 0xC250, prLV}, // Lo HANGUL SYLLABLE SWE + {0xC251, 0xC26B, prLVT}, // Lo [27] HANGUL SYLLABLE SWEG..HANGUL SYLLABLE SWEH + {0xC26C, 0xC26C, prLV}, // Lo HANGUL SYLLABLE SWI + {0xC26D, 0xC287, prLVT}, // Lo [27] HANGUL SYLLABLE SWIG..HANGUL SYLLABLE SWIH + {0xC288, 0xC288, prLV}, // Lo HANGUL SYLLABLE SYU + {0xC289, 0xC2A3, prLVT}, // Lo [27] HANGUL SYLLABLE SYUG..HANGUL SYLLABLE SYUH + {0xC2A4, 0xC2A4, prLV}, // Lo HANGUL SYLLABLE SEU + {0xC2A5, 0xC2BF, prLVT}, // Lo [27] HANGUL SYLLABLE SEUG..HANGUL SYLLABLE SEUH + {0xC2C0, 0xC2C0, prLV}, // Lo HANGUL SYLLABLE SYI + {0xC2C1, 0xC2DB, prLVT}, // Lo [27] HANGUL SYLLABLE SYIG..HANGUL SYLLABLE SYIH + {0xC2DC, 0xC2DC, prLV}, // Lo HANGUL SYLLABLE SI + {0xC2DD, 0xC2F7, prLVT}, // Lo [27] HANGUL SYLLABLE SIG..HANGUL SYLLABLE SIH + {0xC2F8, 0xC2F8, prLV}, // Lo HANGUL SYLLABLE SSA + {0xC2F9, 0xC313, prLVT}, // Lo [27] HANGUL SYLLABLE SSAG..HANGUL SYLLABLE SSAH + {0xC314, 0xC314, prLV}, // Lo HANGUL SYLLABLE SSAE + {0xC315, 0xC32F, prLVT}, // Lo [27] HANGUL SYLLABLE SSAEG..HANGUL SYLLABLE SSAEH + {0xC330, 0xC330, prLV}, // Lo HANGUL SYLLABLE SSYA + {0xC331, 0xC34B, prLVT}, // Lo [27] HANGUL SYLLABLE SSYAG..HANGUL SYLLABLE SSYAH + {0xC34C, 0xC34C, prLV}, // Lo HANGUL SYLLABLE SSYAE + {0xC34D, 0xC367, prLVT}, // Lo [27] HANGUL SYLLABLE SSYAEG..HANGUL SYLLABLE SSYAEH + {0xC368, 0xC368, prLV}, // Lo HANGUL SYLLABLE SSEO + {0xC369, 0xC383, prLVT}, // Lo [27] HANGUL SYLLABLE SSEOG..HANGUL SYLLABLE SSEOH + {0xC384, 0xC384, prLV}, // Lo HANGUL SYLLABLE SSE + {0xC385, 0xC39F, prLVT}, // Lo [27] HANGUL SYLLABLE SSEG..HANGUL SYLLABLE SSEH + {0xC3A0, 0xC3A0, prLV}, // Lo HANGUL SYLLABLE SSYEO + {0xC3A1, 0xC3BB, prLVT}, // Lo [27] HANGUL SYLLABLE SSYEOG..HANGUL SYLLABLE SSYEOH + {0xC3BC, 0xC3BC, prLV}, // Lo HANGUL SYLLABLE SSYE + {0xC3BD, 0xC3D7, prLVT}, // Lo [27] HANGUL SYLLABLE SSYEG..HANGUL SYLLABLE SSYEH + {0xC3D8, 0xC3D8, prLV}, // Lo HANGUL SYLLABLE SSO + {0xC3D9, 0xC3F3, prLVT}, // Lo [27] HANGUL SYLLABLE SSOG..HANGUL SYLLABLE SSOH + {0xC3F4, 0xC3F4, prLV}, // Lo HANGUL SYLLABLE SSWA + {0xC3F5, 0xC40F, prLVT}, // Lo [27] HANGUL SYLLABLE SSWAG..HANGUL SYLLABLE SSWAH + {0xC410, 0xC410, prLV}, // Lo HANGUL SYLLABLE SSWAE + {0xC411, 0xC42B, prLVT}, // Lo [27] HANGUL SYLLABLE SSWAEG..HANGUL SYLLABLE SSWAEH + {0xC42C, 0xC42C, prLV}, // Lo HANGUL SYLLABLE SSOE + {0xC42D, 0xC447, prLVT}, // Lo [27] HANGUL SYLLABLE SSOEG..HANGUL SYLLABLE SSOEH + {0xC448, 0xC448, prLV}, // Lo HANGUL SYLLABLE SSYO + {0xC449, 0xC463, prLVT}, // Lo [27] HANGUL SYLLABLE SSYOG..HANGUL SYLLABLE SSYOH + {0xC464, 0xC464, prLV}, // Lo HANGUL SYLLABLE SSU + {0xC465, 0xC47F, prLVT}, // Lo [27] HANGUL SYLLABLE SSUG..HANGUL SYLLABLE SSUH + {0xC480, 0xC480, prLV}, // Lo HANGUL SYLLABLE SSWEO + {0xC481, 0xC49B, prLVT}, // Lo [27] HANGUL SYLLABLE SSWEOG..HANGUL SYLLABLE SSWEOH + {0xC49C, 0xC49C, prLV}, // Lo HANGUL SYLLABLE SSWE + {0xC49D, 0xC4B7, prLVT}, // Lo [27] HANGUL SYLLABLE SSWEG..HANGUL SYLLABLE SSWEH + {0xC4B8, 0xC4B8, prLV}, // Lo HANGUL SYLLABLE SSWI + {0xC4B9, 0xC4D3, prLVT}, // Lo [27] HANGUL SYLLABLE SSWIG..HANGUL SYLLABLE SSWIH + {0xC4D4, 0xC4D4, prLV}, // Lo HANGUL SYLLABLE SSYU + {0xC4D5, 0xC4EF, prLVT}, // Lo [27] HANGUL SYLLABLE SSYUG..HANGUL SYLLABLE SSYUH + {0xC4F0, 0xC4F0, prLV}, // Lo HANGUL SYLLABLE SSEU + {0xC4F1, 0xC50B, prLVT}, // Lo [27] HANGUL SYLLABLE SSEUG..HANGUL SYLLABLE SSEUH + {0xC50C, 0xC50C, prLV}, // Lo HANGUL SYLLABLE SSYI + {0xC50D, 0xC527, prLVT}, // Lo [27] HANGUL SYLLABLE SSYIG..HANGUL SYLLABLE SSYIH + {0xC528, 0xC528, prLV}, // Lo HANGUL SYLLABLE SSI + {0xC529, 0xC543, prLVT}, // Lo [27] HANGUL SYLLABLE SSIG..HANGUL SYLLABLE SSIH + {0xC544, 0xC544, prLV}, // Lo HANGUL SYLLABLE A + {0xC545, 0xC55F, prLVT}, // Lo [27] HANGUL SYLLABLE AG..HANGUL SYLLABLE AH + {0xC560, 0xC560, prLV}, // Lo HANGUL SYLLABLE AE + {0xC561, 0xC57B, prLVT}, // Lo [27] HANGUL SYLLABLE AEG..HANGUL SYLLABLE AEH + {0xC57C, 0xC57C, prLV}, // Lo HANGUL SYLLABLE YA + {0xC57D, 0xC597, prLVT}, // Lo [27] HANGUL SYLLABLE YAG..HANGUL SYLLABLE YAH + {0xC598, 0xC598, prLV}, // Lo HANGUL SYLLABLE YAE + {0xC599, 0xC5B3, prLVT}, // Lo [27] HANGUL SYLLABLE YAEG..HANGUL SYLLABLE YAEH + {0xC5B4, 0xC5B4, prLV}, // Lo HANGUL SYLLABLE EO + {0xC5B5, 0xC5CF, prLVT}, // Lo [27] HANGUL SYLLABLE EOG..HANGUL SYLLABLE EOH + {0xC5D0, 0xC5D0, prLV}, // Lo HANGUL SYLLABLE E + {0xC5D1, 0xC5EB, prLVT}, // Lo [27] HANGUL SYLLABLE EG..HANGUL SYLLABLE EH + {0xC5EC, 0xC5EC, prLV}, // Lo HANGUL SYLLABLE YEO + {0xC5ED, 0xC607, prLVT}, // Lo [27] HANGUL SYLLABLE YEOG..HANGUL SYLLABLE YEOH + {0xC608, 0xC608, prLV}, // Lo HANGUL SYLLABLE YE + {0xC609, 0xC623, prLVT}, // Lo [27] HANGUL SYLLABLE YEG..HANGUL SYLLABLE YEH + {0xC624, 0xC624, prLV}, // Lo HANGUL SYLLABLE O + {0xC625, 0xC63F, prLVT}, // Lo [27] HANGUL SYLLABLE OG..HANGUL SYLLABLE OH + {0xC640, 0xC640, prLV}, // Lo HANGUL SYLLABLE WA + {0xC641, 0xC65B, prLVT}, // Lo [27] HANGUL SYLLABLE WAG..HANGUL SYLLABLE WAH + {0xC65C, 0xC65C, prLV}, // Lo HANGUL SYLLABLE WAE + {0xC65D, 0xC677, prLVT}, // Lo [27] HANGUL SYLLABLE WAEG..HANGUL SYLLABLE WAEH + {0xC678, 0xC678, prLV}, // Lo HANGUL SYLLABLE OE + {0xC679, 0xC693, prLVT}, // Lo [27] HANGUL SYLLABLE OEG..HANGUL SYLLABLE OEH + {0xC694, 0xC694, prLV}, // Lo HANGUL SYLLABLE YO + {0xC695, 0xC6AF, prLVT}, // Lo [27] HANGUL SYLLABLE YOG..HANGUL SYLLABLE YOH + {0xC6B0, 0xC6B0, prLV}, // Lo HANGUL SYLLABLE U + {0xC6B1, 0xC6CB, prLVT}, // Lo [27] HANGUL SYLLABLE UG..HANGUL SYLLABLE UH + {0xC6CC, 0xC6CC, prLV}, // Lo HANGUL SYLLABLE WEO + {0xC6CD, 0xC6E7, prLVT}, // Lo [27] HANGUL SYLLABLE WEOG..HANGUL SYLLABLE WEOH + {0xC6E8, 0xC6E8, prLV}, // Lo HANGUL SYLLABLE WE + {0xC6E9, 0xC703, prLVT}, // Lo [27] HANGUL SYLLABLE WEG..HANGUL SYLLABLE WEH + {0xC704, 0xC704, prLV}, // Lo HANGUL SYLLABLE WI + {0xC705, 0xC71F, prLVT}, // Lo [27] HANGUL SYLLABLE WIG..HANGUL SYLLABLE WIH + {0xC720, 0xC720, prLV}, // Lo HANGUL SYLLABLE YU + {0xC721, 0xC73B, prLVT}, // Lo [27] HANGUL SYLLABLE YUG..HANGUL SYLLABLE YUH + {0xC73C, 0xC73C, prLV}, // Lo HANGUL SYLLABLE EU + {0xC73D, 0xC757, prLVT}, // Lo [27] HANGUL SYLLABLE EUG..HANGUL SYLLABLE EUH + {0xC758, 0xC758, prLV}, // Lo HANGUL SYLLABLE YI + {0xC759, 0xC773, prLVT}, // Lo [27] HANGUL SYLLABLE YIG..HANGUL SYLLABLE YIH + {0xC774, 0xC774, prLV}, // Lo HANGUL SYLLABLE I + {0xC775, 0xC78F, prLVT}, // Lo [27] HANGUL SYLLABLE IG..HANGUL SYLLABLE IH + {0xC790, 0xC790, prLV}, // Lo HANGUL SYLLABLE JA + {0xC791, 0xC7AB, prLVT}, // Lo [27] HANGUL SYLLABLE JAG..HANGUL SYLLABLE JAH + {0xC7AC, 0xC7AC, prLV}, // Lo HANGUL SYLLABLE JAE + {0xC7AD, 0xC7C7, prLVT}, // Lo [27] HANGUL SYLLABLE JAEG..HANGUL SYLLABLE JAEH + {0xC7C8, 0xC7C8, prLV}, // Lo HANGUL SYLLABLE JYA + {0xC7C9, 0xC7E3, prLVT}, // Lo [27] HANGUL SYLLABLE JYAG..HANGUL SYLLABLE JYAH + {0xC7E4, 0xC7E4, prLV}, // Lo HANGUL SYLLABLE JYAE + {0xC7E5, 0xC7FF, prLVT}, // Lo [27] HANGUL SYLLABLE JYAEG..HANGUL SYLLABLE JYAEH + {0xC800, 0xC800, prLV}, // Lo HANGUL SYLLABLE JEO + {0xC801, 0xC81B, prLVT}, // Lo [27] HANGUL SYLLABLE JEOG..HANGUL SYLLABLE JEOH + {0xC81C, 0xC81C, prLV}, // Lo HANGUL SYLLABLE JE + {0xC81D, 0xC837, prLVT}, // Lo [27] HANGUL SYLLABLE JEG..HANGUL SYLLABLE JEH + {0xC838, 0xC838, prLV}, // Lo HANGUL SYLLABLE JYEO + {0xC839, 0xC853, prLVT}, // Lo [27] HANGUL SYLLABLE JYEOG..HANGUL SYLLABLE JYEOH + {0xC854, 0xC854, prLV}, // Lo HANGUL SYLLABLE JYE + {0xC855, 0xC86F, prLVT}, // Lo [27] HANGUL SYLLABLE JYEG..HANGUL SYLLABLE JYEH + {0xC870, 0xC870, prLV}, // Lo HANGUL SYLLABLE JO + {0xC871, 0xC88B, prLVT}, // Lo [27] HANGUL SYLLABLE JOG..HANGUL SYLLABLE JOH + {0xC88C, 0xC88C, prLV}, // Lo HANGUL SYLLABLE JWA + {0xC88D, 0xC8A7, prLVT}, // Lo [27] HANGUL SYLLABLE JWAG..HANGUL SYLLABLE JWAH + {0xC8A8, 0xC8A8, prLV}, // Lo HANGUL SYLLABLE JWAE + {0xC8A9, 0xC8C3, prLVT}, // Lo [27] HANGUL SYLLABLE JWAEG..HANGUL SYLLABLE JWAEH + {0xC8C4, 0xC8C4, prLV}, // Lo HANGUL SYLLABLE JOE + {0xC8C5, 0xC8DF, prLVT}, // Lo [27] HANGUL SYLLABLE JOEG..HANGUL SYLLABLE JOEH + {0xC8E0, 0xC8E0, prLV}, // Lo HANGUL SYLLABLE JYO + {0xC8E1, 0xC8FB, prLVT}, // Lo [27] HANGUL SYLLABLE JYOG..HANGUL SYLLABLE JYOH + {0xC8FC, 0xC8FC, prLV}, // Lo HANGUL SYLLABLE JU + {0xC8FD, 0xC917, prLVT}, // Lo [27] HANGUL SYLLABLE JUG..HANGUL SYLLABLE JUH + {0xC918, 0xC918, prLV}, // Lo HANGUL SYLLABLE JWEO + {0xC919, 0xC933, prLVT}, // Lo [27] HANGUL SYLLABLE JWEOG..HANGUL SYLLABLE JWEOH + {0xC934, 0xC934, prLV}, // Lo HANGUL SYLLABLE JWE + {0xC935, 0xC94F, prLVT}, // Lo [27] HANGUL SYLLABLE JWEG..HANGUL SYLLABLE JWEH + {0xC950, 0xC950, prLV}, // Lo HANGUL SYLLABLE JWI + {0xC951, 0xC96B, prLVT}, // Lo [27] HANGUL SYLLABLE JWIG..HANGUL SYLLABLE JWIH + {0xC96C, 0xC96C, prLV}, // Lo HANGUL SYLLABLE JYU + {0xC96D, 0xC987, prLVT}, // Lo [27] HANGUL SYLLABLE JYUG..HANGUL SYLLABLE JYUH + {0xC988, 0xC988, prLV}, // Lo HANGUL SYLLABLE JEU + {0xC989, 0xC9A3, prLVT}, // Lo [27] HANGUL SYLLABLE JEUG..HANGUL SYLLABLE JEUH + {0xC9A4, 0xC9A4, prLV}, // Lo HANGUL SYLLABLE JYI + {0xC9A5, 0xC9BF, prLVT}, // Lo [27] HANGUL SYLLABLE JYIG..HANGUL SYLLABLE JYIH + {0xC9C0, 0xC9C0, prLV}, // Lo HANGUL SYLLABLE JI + {0xC9C1, 0xC9DB, prLVT}, // Lo [27] HANGUL SYLLABLE JIG..HANGUL SYLLABLE JIH + {0xC9DC, 0xC9DC, prLV}, // Lo HANGUL SYLLABLE JJA + {0xC9DD, 0xC9F7, prLVT}, // Lo [27] HANGUL SYLLABLE JJAG..HANGUL SYLLABLE JJAH + {0xC9F8, 0xC9F8, prLV}, // Lo HANGUL SYLLABLE JJAE + {0xC9F9, 0xCA13, prLVT}, // Lo [27] HANGUL SYLLABLE JJAEG..HANGUL SYLLABLE JJAEH + {0xCA14, 0xCA14, prLV}, // Lo HANGUL SYLLABLE JJYA + {0xCA15, 0xCA2F, prLVT}, // Lo [27] HANGUL SYLLABLE JJYAG..HANGUL SYLLABLE JJYAH + {0xCA30, 0xCA30, prLV}, // Lo HANGUL SYLLABLE JJYAE + {0xCA31, 0xCA4B, prLVT}, // Lo [27] HANGUL SYLLABLE JJYAEG..HANGUL SYLLABLE JJYAEH + {0xCA4C, 0xCA4C, prLV}, // Lo HANGUL SYLLABLE JJEO + {0xCA4D, 0xCA67, prLVT}, // Lo [27] HANGUL SYLLABLE JJEOG..HANGUL SYLLABLE JJEOH + {0xCA68, 0xCA68, prLV}, // Lo HANGUL SYLLABLE JJE + {0xCA69, 0xCA83, prLVT}, // Lo [27] HANGUL SYLLABLE JJEG..HANGUL SYLLABLE JJEH + {0xCA84, 0xCA84, prLV}, // Lo HANGUL SYLLABLE JJYEO + {0xCA85, 0xCA9F, prLVT}, // Lo [27] HANGUL SYLLABLE JJYEOG..HANGUL SYLLABLE JJYEOH + {0xCAA0, 0xCAA0, prLV}, // Lo HANGUL SYLLABLE JJYE + {0xCAA1, 0xCABB, prLVT}, // Lo [27] HANGUL SYLLABLE JJYEG..HANGUL SYLLABLE JJYEH + {0xCABC, 0xCABC, prLV}, // Lo HANGUL SYLLABLE JJO + {0xCABD, 0xCAD7, prLVT}, // Lo [27] HANGUL SYLLABLE JJOG..HANGUL SYLLABLE JJOH + {0xCAD8, 0xCAD8, prLV}, // Lo HANGUL SYLLABLE JJWA + {0xCAD9, 0xCAF3, prLVT}, // Lo [27] HANGUL SYLLABLE JJWAG..HANGUL SYLLABLE JJWAH + {0xCAF4, 0xCAF4, prLV}, // Lo HANGUL SYLLABLE JJWAE + {0xCAF5, 0xCB0F, prLVT}, // Lo [27] HANGUL SYLLABLE JJWAEG..HANGUL SYLLABLE JJWAEH + {0xCB10, 0xCB10, prLV}, // Lo HANGUL SYLLABLE JJOE + {0xCB11, 0xCB2B, prLVT}, // Lo [27] HANGUL SYLLABLE JJOEG..HANGUL SYLLABLE JJOEH + {0xCB2C, 0xCB2C, prLV}, // Lo HANGUL SYLLABLE JJYO + {0xCB2D, 0xCB47, prLVT}, // Lo [27] HANGUL SYLLABLE JJYOG..HANGUL SYLLABLE JJYOH + {0xCB48, 0xCB48, prLV}, // Lo HANGUL SYLLABLE JJU + {0xCB49, 0xCB63, prLVT}, // Lo [27] HANGUL SYLLABLE JJUG..HANGUL SYLLABLE JJUH + {0xCB64, 0xCB64, prLV}, // Lo HANGUL SYLLABLE JJWEO + {0xCB65, 0xCB7F, prLVT}, // Lo [27] HANGUL SYLLABLE JJWEOG..HANGUL SYLLABLE JJWEOH + {0xCB80, 0xCB80, prLV}, // Lo HANGUL SYLLABLE JJWE + {0xCB81, 0xCB9B, prLVT}, // Lo [27] HANGUL SYLLABLE JJWEG..HANGUL SYLLABLE JJWEH + {0xCB9C, 0xCB9C, prLV}, // Lo HANGUL SYLLABLE JJWI + {0xCB9D, 0xCBB7, prLVT}, // Lo [27] HANGUL SYLLABLE JJWIG..HANGUL SYLLABLE JJWIH + {0xCBB8, 0xCBB8, prLV}, // Lo HANGUL SYLLABLE JJYU + {0xCBB9, 0xCBD3, prLVT}, // Lo [27] HANGUL SYLLABLE JJYUG..HANGUL SYLLABLE JJYUH + {0xCBD4, 0xCBD4, prLV}, // Lo HANGUL SYLLABLE JJEU + {0xCBD5, 0xCBEF, prLVT}, // Lo [27] HANGUL SYLLABLE JJEUG..HANGUL SYLLABLE JJEUH + {0xCBF0, 0xCBF0, prLV}, // Lo HANGUL SYLLABLE JJYI + {0xCBF1, 0xCC0B, prLVT}, // Lo [27] HANGUL SYLLABLE JJYIG..HANGUL SYLLABLE JJYIH + {0xCC0C, 0xCC0C, prLV}, // Lo HANGUL SYLLABLE JJI + {0xCC0D, 0xCC27, prLVT}, // Lo [27] HANGUL SYLLABLE JJIG..HANGUL SYLLABLE JJIH + {0xCC28, 0xCC28, prLV}, // Lo HANGUL SYLLABLE CA + {0xCC29, 0xCC43, prLVT}, // Lo [27] HANGUL SYLLABLE CAG..HANGUL SYLLABLE CAH + {0xCC44, 0xCC44, prLV}, // Lo HANGUL SYLLABLE CAE + {0xCC45, 0xCC5F, prLVT}, // Lo [27] HANGUL SYLLABLE CAEG..HANGUL SYLLABLE CAEH + {0xCC60, 0xCC60, prLV}, // Lo HANGUL SYLLABLE CYA + {0xCC61, 0xCC7B, prLVT}, // Lo [27] HANGUL SYLLABLE CYAG..HANGUL SYLLABLE CYAH + {0xCC7C, 0xCC7C, prLV}, // Lo HANGUL SYLLABLE CYAE + {0xCC7D, 0xCC97, prLVT}, // Lo [27] HANGUL SYLLABLE CYAEG..HANGUL SYLLABLE CYAEH + {0xCC98, 0xCC98, prLV}, // Lo HANGUL SYLLABLE CEO + {0xCC99, 0xCCB3, prLVT}, // Lo [27] HANGUL SYLLABLE CEOG..HANGUL SYLLABLE CEOH + {0xCCB4, 0xCCB4, prLV}, // Lo HANGUL SYLLABLE CE + {0xCCB5, 0xCCCF, prLVT}, // Lo [27] HANGUL SYLLABLE CEG..HANGUL SYLLABLE CEH + {0xCCD0, 0xCCD0, prLV}, // Lo HANGUL SYLLABLE CYEO + {0xCCD1, 0xCCEB, prLVT}, // Lo [27] HANGUL SYLLABLE CYEOG..HANGUL SYLLABLE CYEOH + {0xCCEC, 0xCCEC, prLV}, // Lo HANGUL SYLLABLE CYE + {0xCCED, 0xCD07, prLVT}, // Lo [27] HANGUL SYLLABLE CYEG..HANGUL SYLLABLE CYEH + {0xCD08, 0xCD08, prLV}, // Lo HANGUL SYLLABLE CO + {0xCD09, 0xCD23, prLVT}, // Lo [27] HANGUL SYLLABLE COG..HANGUL SYLLABLE COH + {0xCD24, 0xCD24, prLV}, // Lo HANGUL SYLLABLE CWA + {0xCD25, 0xCD3F, prLVT}, // Lo [27] HANGUL SYLLABLE CWAG..HANGUL SYLLABLE CWAH + {0xCD40, 0xCD40, prLV}, // Lo HANGUL SYLLABLE CWAE + {0xCD41, 0xCD5B, prLVT}, // Lo [27] HANGUL SYLLABLE CWAEG..HANGUL SYLLABLE CWAEH + {0xCD5C, 0xCD5C, prLV}, // Lo HANGUL SYLLABLE COE + {0xCD5D, 0xCD77, prLVT}, // Lo [27] HANGUL SYLLABLE COEG..HANGUL SYLLABLE COEH + {0xCD78, 0xCD78, prLV}, // Lo HANGUL SYLLABLE CYO + {0xCD79, 0xCD93, prLVT}, // Lo [27] HANGUL SYLLABLE CYOG..HANGUL SYLLABLE CYOH + {0xCD94, 0xCD94, prLV}, // Lo HANGUL SYLLABLE CU + {0xCD95, 0xCDAF, prLVT}, // Lo [27] HANGUL SYLLABLE CUG..HANGUL SYLLABLE CUH + {0xCDB0, 0xCDB0, prLV}, // Lo HANGUL SYLLABLE CWEO + {0xCDB1, 0xCDCB, prLVT}, // Lo [27] HANGUL SYLLABLE CWEOG..HANGUL SYLLABLE CWEOH + {0xCDCC, 0xCDCC, prLV}, // Lo HANGUL SYLLABLE CWE + {0xCDCD, 0xCDE7, prLVT}, // Lo [27] HANGUL SYLLABLE CWEG..HANGUL SYLLABLE CWEH + {0xCDE8, 0xCDE8, prLV}, // Lo HANGUL SYLLABLE CWI + {0xCDE9, 0xCE03, prLVT}, // Lo [27] HANGUL SYLLABLE CWIG..HANGUL SYLLABLE CWIH + {0xCE04, 0xCE04, prLV}, // Lo HANGUL SYLLABLE CYU + {0xCE05, 0xCE1F, prLVT}, // Lo [27] HANGUL SYLLABLE CYUG..HANGUL SYLLABLE CYUH + {0xCE20, 0xCE20, prLV}, // Lo HANGUL SYLLABLE CEU + {0xCE21, 0xCE3B, prLVT}, // Lo [27] HANGUL SYLLABLE CEUG..HANGUL SYLLABLE CEUH + {0xCE3C, 0xCE3C, prLV}, // Lo HANGUL SYLLABLE CYI + {0xCE3D, 0xCE57, prLVT}, // Lo [27] HANGUL SYLLABLE CYIG..HANGUL SYLLABLE CYIH + {0xCE58, 0xCE58, prLV}, // Lo HANGUL SYLLABLE CI + {0xCE59, 0xCE73, prLVT}, // Lo [27] HANGUL SYLLABLE CIG..HANGUL SYLLABLE CIH + {0xCE74, 0xCE74, prLV}, // Lo HANGUL SYLLABLE KA + {0xCE75, 0xCE8F, prLVT}, // Lo [27] HANGUL SYLLABLE KAG..HANGUL SYLLABLE KAH + {0xCE90, 0xCE90, prLV}, // Lo HANGUL SYLLABLE KAE + {0xCE91, 0xCEAB, prLVT}, // Lo [27] HANGUL SYLLABLE KAEG..HANGUL SYLLABLE KAEH + {0xCEAC, 0xCEAC, prLV}, // Lo HANGUL SYLLABLE KYA + {0xCEAD, 0xCEC7, prLVT}, // Lo [27] HANGUL SYLLABLE KYAG..HANGUL SYLLABLE KYAH + {0xCEC8, 0xCEC8, prLV}, // Lo HANGUL SYLLABLE KYAE + {0xCEC9, 0xCEE3, prLVT}, // Lo [27] HANGUL SYLLABLE KYAEG..HANGUL SYLLABLE KYAEH + {0xCEE4, 0xCEE4, prLV}, // Lo HANGUL SYLLABLE KEO + {0xCEE5, 0xCEFF, prLVT}, // Lo [27] HANGUL SYLLABLE KEOG..HANGUL SYLLABLE KEOH + {0xCF00, 0xCF00, prLV}, // Lo HANGUL SYLLABLE KE + {0xCF01, 0xCF1B, prLVT}, // Lo [27] HANGUL SYLLABLE KEG..HANGUL SYLLABLE KEH + {0xCF1C, 0xCF1C, prLV}, // Lo HANGUL SYLLABLE KYEO + {0xCF1D, 0xCF37, prLVT}, // Lo [27] HANGUL SYLLABLE KYEOG..HANGUL SYLLABLE KYEOH + {0xCF38, 0xCF38, prLV}, // Lo HANGUL SYLLABLE KYE + {0xCF39, 0xCF53, prLVT}, // Lo [27] HANGUL SYLLABLE KYEG..HANGUL SYLLABLE KYEH + {0xCF54, 0xCF54, prLV}, // Lo HANGUL SYLLABLE KO + {0xCF55, 0xCF6F, prLVT}, // Lo [27] HANGUL SYLLABLE KOG..HANGUL SYLLABLE KOH + {0xCF70, 0xCF70, prLV}, // Lo HANGUL SYLLABLE KWA + {0xCF71, 0xCF8B, prLVT}, // Lo [27] HANGUL SYLLABLE KWAG..HANGUL SYLLABLE KWAH + {0xCF8C, 0xCF8C, prLV}, // Lo HANGUL SYLLABLE KWAE + {0xCF8D, 0xCFA7, prLVT}, // Lo [27] HANGUL SYLLABLE KWAEG..HANGUL SYLLABLE KWAEH + {0xCFA8, 0xCFA8, prLV}, // Lo HANGUL SYLLABLE KOE + {0xCFA9, 0xCFC3, prLVT}, // Lo [27] HANGUL SYLLABLE KOEG..HANGUL SYLLABLE KOEH + {0xCFC4, 0xCFC4, prLV}, // Lo HANGUL SYLLABLE KYO + {0xCFC5, 0xCFDF, prLVT}, // Lo [27] HANGUL SYLLABLE KYOG..HANGUL SYLLABLE KYOH + {0xCFE0, 0xCFE0, prLV}, // Lo HANGUL SYLLABLE KU + {0xCFE1, 0xCFFB, prLVT}, // Lo [27] HANGUL SYLLABLE KUG..HANGUL SYLLABLE KUH + {0xCFFC, 0xCFFC, prLV}, // Lo HANGUL SYLLABLE KWEO + {0xCFFD, 0xD017, prLVT}, // Lo [27] HANGUL SYLLABLE KWEOG..HANGUL SYLLABLE KWEOH + {0xD018, 0xD018, prLV}, // Lo HANGUL SYLLABLE KWE + {0xD019, 0xD033, prLVT}, // Lo [27] HANGUL SYLLABLE KWEG..HANGUL SYLLABLE KWEH + {0xD034, 0xD034, prLV}, // Lo HANGUL SYLLABLE KWI + {0xD035, 0xD04F, prLVT}, // Lo [27] HANGUL SYLLABLE KWIG..HANGUL SYLLABLE KWIH + {0xD050, 0xD050, prLV}, // Lo HANGUL SYLLABLE KYU + {0xD051, 0xD06B, prLVT}, // Lo [27] HANGUL SYLLABLE KYUG..HANGUL SYLLABLE KYUH + {0xD06C, 0xD06C, prLV}, // Lo HANGUL SYLLABLE KEU + {0xD06D, 0xD087, prLVT}, // Lo [27] HANGUL SYLLABLE KEUG..HANGUL SYLLABLE KEUH + {0xD088, 0xD088, prLV}, // Lo HANGUL SYLLABLE KYI + {0xD089, 0xD0A3, prLVT}, // Lo [27] HANGUL SYLLABLE KYIG..HANGUL SYLLABLE KYIH + {0xD0A4, 0xD0A4, prLV}, // Lo HANGUL SYLLABLE KI + {0xD0A5, 0xD0BF, prLVT}, // Lo [27] HANGUL SYLLABLE KIG..HANGUL SYLLABLE KIH + {0xD0C0, 0xD0C0, prLV}, // Lo HANGUL SYLLABLE TA + {0xD0C1, 0xD0DB, prLVT}, // Lo [27] HANGUL SYLLABLE TAG..HANGUL SYLLABLE TAH + {0xD0DC, 0xD0DC, prLV}, // Lo HANGUL SYLLABLE TAE + {0xD0DD, 0xD0F7, prLVT}, // Lo [27] HANGUL SYLLABLE TAEG..HANGUL SYLLABLE TAEH + {0xD0F8, 0xD0F8, prLV}, // Lo HANGUL SYLLABLE TYA + {0xD0F9, 0xD113, prLVT}, // Lo [27] HANGUL SYLLABLE TYAG..HANGUL SYLLABLE TYAH + {0xD114, 0xD114, prLV}, // Lo HANGUL SYLLABLE TYAE + {0xD115, 0xD12F, prLVT}, // Lo [27] HANGUL SYLLABLE TYAEG..HANGUL SYLLABLE TYAEH + {0xD130, 0xD130, prLV}, // Lo HANGUL SYLLABLE TEO + {0xD131, 0xD14B, prLVT}, // Lo [27] HANGUL SYLLABLE TEOG..HANGUL SYLLABLE TEOH + {0xD14C, 0xD14C, prLV}, // Lo HANGUL SYLLABLE TE + {0xD14D, 0xD167, prLVT}, // Lo [27] HANGUL SYLLABLE TEG..HANGUL SYLLABLE TEH + {0xD168, 0xD168, prLV}, // Lo HANGUL SYLLABLE TYEO + {0xD169, 0xD183, prLVT}, // Lo [27] HANGUL SYLLABLE TYEOG..HANGUL SYLLABLE TYEOH + {0xD184, 0xD184, prLV}, // Lo HANGUL SYLLABLE TYE + {0xD185, 0xD19F, prLVT}, // Lo [27] HANGUL SYLLABLE TYEG..HANGUL SYLLABLE TYEH + {0xD1A0, 0xD1A0, prLV}, // Lo HANGUL SYLLABLE TO + {0xD1A1, 0xD1BB, prLVT}, // Lo [27] HANGUL SYLLABLE TOG..HANGUL SYLLABLE TOH + {0xD1BC, 0xD1BC, prLV}, // Lo HANGUL SYLLABLE TWA + {0xD1BD, 0xD1D7, prLVT}, // Lo [27] HANGUL SYLLABLE TWAG..HANGUL SYLLABLE TWAH + {0xD1D8, 0xD1D8, prLV}, // Lo HANGUL SYLLABLE TWAE + {0xD1D9, 0xD1F3, prLVT}, // Lo [27] HANGUL SYLLABLE TWAEG..HANGUL SYLLABLE TWAEH + {0xD1F4, 0xD1F4, prLV}, // Lo HANGUL SYLLABLE TOE + {0xD1F5, 0xD20F, prLVT}, // Lo [27] HANGUL SYLLABLE TOEG..HANGUL SYLLABLE TOEH + {0xD210, 0xD210, prLV}, // Lo HANGUL SYLLABLE TYO + {0xD211, 0xD22B, prLVT}, // Lo [27] HANGUL SYLLABLE TYOG..HANGUL SYLLABLE TYOH + {0xD22C, 0xD22C, prLV}, // Lo HANGUL SYLLABLE TU + {0xD22D, 0xD247, prLVT}, // Lo [27] HANGUL SYLLABLE TUG..HANGUL SYLLABLE TUH + {0xD248, 0xD248, prLV}, // Lo HANGUL SYLLABLE TWEO + {0xD249, 0xD263, prLVT}, // Lo [27] HANGUL SYLLABLE TWEOG..HANGUL SYLLABLE TWEOH + {0xD264, 0xD264, prLV}, // Lo HANGUL SYLLABLE TWE + {0xD265, 0xD27F, prLVT}, // Lo [27] HANGUL SYLLABLE TWEG..HANGUL SYLLABLE TWEH + {0xD280, 0xD280, prLV}, // Lo HANGUL SYLLABLE TWI + {0xD281, 0xD29B, prLVT}, // Lo [27] HANGUL SYLLABLE TWIG..HANGUL SYLLABLE TWIH + {0xD29C, 0xD29C, prLV}, // Lo HANGUL SYLLABLE TYU + {0xD29D, 0xD2B7, prLVT}, // Lo [27] HANGUL SYLLABLE TYUG..HANGUL SYLLABLE TYUH + {0xD2B8, 0xD2B8, prLV}, // Lo HANGUL SYLLABLE TEU + {0xD2B9, 0xD2D3, prLVT}, // Lo [27] HANGUL SYLLABLE TEUG..HANGUL SYLLABLE TEUH + {0xD2D4, 0xD2D4, prLV}, // Lo HANGUL SYLLABLE TYI + {0xD2D5, 0xD2EF, prLVT}, // Lo [27] HANGUL SYLLABLE TYIG..HANGUL SYLLABLE TYIH + {0xD2F0, 0xD2F0, prLV}, // Lo HANGUL SYLLABLE TI + {0xD2F1, 0xD30B, prLVT}, // Lo [27] HANGUL SYLLABLE TIG..HANGUL SYLLABLE TIH + {0xD30C, 0xD30C, prLV}, // Lo HANGUL SYLLABLE PA + {0xD30D, 0xD327, prLVT}, // Lo [27] HANGUL SYLLABLE PAG..HANGUL SYLLABLE PAH + {0xD328, 0xD328, prLV}, // Lo HANGUL SYLLABLE PAE + {0xD329, 0xD343, prLVT}, // Lo [27] HANGUL SYLLABLE PAEG..HANGUL SYLLABLE PAEH + {0xD344, 0xD344, prLV}, // Lo HANGUL SYLLABLE PYA + {0xD345, 0xD35F, prLVT}, // Lo [27] HANGUL SYLLABLE PYAG..HANGUL SYLLABLE PYAH + {0xD360, 0xD360, prLV}, // Lo HANGUL SYLLABLE PYAE + {0xD361, 0xD37B, prLVT}, // Lo [27] HANGUL SYLLABLE PYAEG..HANGUL SYLLABLE PYAEH + {0xD37C, 0xD37C, prLV}, // Lo HANGUL SYLLABLE PEO + {0xD37D, 0xD397, prLVT}, // Lo [27] HANGUL SYLLABLE PEOG..HANGUL SYLLABLE PEOH + {0xD398, 0xD398, prLV}, // Lo HANGUL SYLLABLE PE + {0xD399, 0xD3B3, prLVT}, // Lo [27] HANGUL SYLLABLE PEG..HANGUL SYLLABLE PEH + {0xD3B4, 0xD3B4, prLV}, // Lo HANGUL SYLLABLE PYEO + {0xD3B5, 0xD3CF, prLVT}, // Lo [27] HANGUL SYLLABLE PYEOG..HANGUL SYLLABLE PYEOH + {0xD3D0, 0xD3D0, prLV}, // Lo HANGUL SYLLABLE PYE + {0xD3D1, 0xD3EB, prLVT}, // Lo [27] HANGUL SYLLABLE PYEG..HANGUL SYLLABLE PYEH + {0xD3EC, 0xD3EC, prLV}, // Lo HANGUL SYLLABLE PO + {0xD3ED, 0xD407, prLVT}, // Lo [27] HANGUL SYLLABLE POG..HANGUL SYLLABLE POH + {0xD408, 0xD408, prLV}, // Lo HANGUL SYLLABLE PWA + {0xD409, 0xD423, prLVT}, // Lo [27] HANGUL SYLLABLE PWAG..HANGUL SYLLABLE PWAH + {0xD424, 0xD424, prLV}, // Lo HANGUL SYLLABLE PWAE + {0xD425, 0xD43F, prLVT}, // Lo [27] HANGUL SYLLABLE PWAEG..HANGUL SYLLABLE PWAEH + {0xD440, 0xD440, prLV}, // Lo HANGUL SYLLABLE POE + {0xD441, 0xD45B, prLVT}, // Lo [27] HANGUL SYLLABLE POEG..HANGUL SYLLABLE POEH + {0xD45C, 0xD45C, prLV}, // Lo HANGUL SYLLABLE PYO + {0xD45D, 0xD477, prLVT}, // Lo [27] HANGUL SYLLABLE PYOG..HANGUL SYLLABLE PYOH + {0xD478, 0xD478, prLV}, // Lo HANGUL SYLLABLE PU + {0xD479, 0xD493, prLVT}, // Lo [27] HANGUL SYLLABLE PUG..HANGUL SYLLABLE PUH + {0xD494, 0xD494, prLV}, // Lo HANGUL SYLLABLE PWEO + {0xD495, 0xD4AF, prLVT}, // Lo [27] HANGUL SYLLABLE PWEOG..HANGUL SYLLABLE PWEOH + {0xD4B0, 0xD4B0, prLV}, // Lo HANGUL SYLLABLE PWE + {0xD4B1, 0xD4CB, prLVT}, // Lo [27] HANGUL SYLLABLE PWEG..HANGUL SYLLABLE PWEH + {0xD4CC, 0xD4CC, prLV}, // Lo HANGUL SYLLABLE PWI + {0xD4CD, 0xD4E7, prLVT}, // Lo [27] HANGUL SYLLABLE PWIG..HANGUL SYLLABLE PWIH + {0xD4E8, 0xD4E8, prLV}, // Lo HANGUL SYLLABLE PYU + {0xD4E9, 0xD503, prLVT}, // Lo [27] HANGUL SYLLABLE PYUG..HANGUL SYLLABLE PYUH + {0xD504, 0xD504, prLV}, // Lo HANGUL SYLLABLE PEU + {0xD505, 0xD51F, prLVT}, // Lo [27] HANGUL SYLLABLE PEUG..HANGUL SYLLABLE PEUH + {0xD520, 0xD520, prLV}, // Lo HANGUL SYLLABLE PYI + {0xD521, 0xD53B, prLVT}, // Lo [27] HANGUL SYLLABLE PYIG..HANGUL SYLLABLE PYIH + {0xD53C, 0xD53C, prLV}, // Lo HANGUL SYLLABLE PI + {0xD53D, 0xD557, prLVT}, // Lo [27] HANGUL SYLLABLE PIG..HANGUL SYLLABLE PIH + {0xD558, 0xD558, prLV}, // Lo HANGUL SYLLABLE HA + {0xD559, 0xD573, prLVT}, // Lo [27] HANGUL SYLLABLE HAG..HANGUL SYLLABLE HAH + {0xD574, 0xD574, prLV}, // Lo HANGUL SYLLABLE HAE + {0xD575, 0xD58F, prLVT}, // Lo [27] HANGUL SYLLABLE HAEG..HANGUL SYLLABLE HAEH + {0xD590, 0xD590, prLV}, // Lo HANGUL SYLLABLE HYA + {0xD591, 0xD5AB, prLVT}, // Lo [27] HANGUL SYLLABLE HYAG..HANGUL SYLLABLE HYAH + {0xD5AC, 0xD5AC, prLV}, // Lo HANGUL SYLLABLE HYAE + {0xD5AD, 0xD5C7, prLVT}, // Lo [27] HANGUL SYLLABLE HYAEG..HANGUL SYLLABLE HYAEH + {0xD5C8, 0xD5C8, prLV}, // Lo HANGUL SYLLABLE HEO + {0xD5C9, 0xD5E3, prLVT}, // Lo [27] HANGUL SYLLABLE HEOG..HANGUL SYLLABLE HEOH + {0xD5E4, 0xD5E4, prLV}, // Lo HANGUL SYLLABLE HE + {0xD5E5, 0xD5FF, prLVT}, // Lo [27] HANGUL SYLLABLE HEG..HANGUL SYLLABLE HEH + {0xD600, 0xD600, prLV}, // Lo HANGUL SYLLABLE HYEO + {0xD601, 0xD61B, prLVT}, // Lo [27] HANGUL SYLLABLE HYEOG..HANGUL SYLLABLE HYEOH + {0xD61C, 0xD61C, prLV}, // Lo HANGUL SYLLABLE HYE + {0xD61D, 0xD637, prLVT}, // Lo [27] HANGUL SYLLABLE HYEG..HANGUL SYLLABLE HYEH + {0xD638, 0xD638, prLV}, // Lo HANGUL SYLLABLE HO + {0xD639, 0xD653, prLVT}, // Lo [27] HANGUL SYLLABLE HOG..HANGUL SYLLABLE HOH + {0xD654, 0xD654, prLV}, // Lo HANGUL SYLLABLE HWA + {0xD655, 0xD66F, prLVT}, // Lo [27] HANGUL SYLLABLE HWAG..HANGUL SYLLABLE HWAH + {0xD670, 0xD670, prLV}, // Lo HANGUL SYLLABLE HWAE + {0xD671, 0xD68B, prLVT}, // Lo [27] HANGUL SYLLABLE HWAEG..HANGUL SYLLABLE HWAEH + {0xD68C, 0xD68C, prLV}, // Lo HANGUL SYLLABLE HOE + {0xD68D, 0xD6A7, prLVT}, // Lo [27] HANGUL SYLLABLE HOEG..HANGUL SYLLABLE HOEH + {0xD6A8, 0xD6A8, prLV}, // Lo HANGUL SYLLABLE HYO + {0xD6A9, 0xD6C3, prLVT}, // Lo [27] HANGUL SYLLABLE HYOG..HANGUL SYLLABLE HYOH + {0xD6C4, 0xD6C4, prLV}, // Lo HANGUL SYLLABLE HU + {0xD6C5, 0xD6DF, prLVT}, // Lo [27] HANGUL SYLLABLE HUG..HANGUL SYLLABLE HUH + {0xD6E0, 0xD6E0, prLV}, // Lo HANGUL SYLLABLE HWEO + {0xD6E1, 0xD6FB, prLVT}, // Lo [27] HANGUL SYLLABLE HWEOG..HANGUL SYLLABLE HWEOH + {0xD6FC, 0xD6FC, prLV}, // Lo HANGUL SYLLABLE HWE + {0xD6FD, 0xD717, prLVT}, // Lo [27] HANGUL SYLLABLE HWEG..HANGUL SYLLABLE HWEH + {0xD718, 0xD718, prLV}, // Lo HANGUL SYLLABLE HWI + {0xD719, 0xD733, prLVT}, // Lo [27] HANGUL SYLLABLE HWIG..HANGUL SYLLABLE HWIH + {0xD734, 0xD734, prLV}, // Lo HANGUL SYLLABLE HYU + {0xD735, 0xD74F, prLVT}, // Lo [27] HANGUL SYLLABLE HYUG..HANGUL SYLLABLE HYUH + {0xD750, 0xD750, prLV}, // Lo HANGUL SYLLABLE HEU + {0xD751, 0xD76B, prLVT}, // Lo [27] HANGUL SYLLABLE HEUG..HANGUL SYLLABLE HEUH + {0xD76C, 0xD76C, prLV}, // Lo HANGUL SYLLABLE HYI + {0xD76D, 0xD787, prLVT}, // Lo [27] HANGUL SYLLABLE HYIG..HANGUL SYLLABLE HYIH + {0xD788, 0xD788, prLV}, // Lo HANGUL SYLLABLE HI + {0xD789, 0xD7A3, prLVT}, // Lo [27] HANGUL SYLLABLE HIG..HANGUL SYLLABLE HIH + {0xD7B0, 0xD7C6, prV}, // Lo [23] HANGUL JUNGSEONG O-YEO..HANGUL JUNGSEONG ARAEA-E + {0xD7CB, 0xD7FB, prT}, // Lo [49] HANGUL JONGSEONG NIEUN-RIEUL..HANGUL JONGSEONG PHIEUPH-THIEUTH + {0xFB1E, 0xFB1E, prExtend}, // Mn HEBREW POINT JUDEO-SPANISH VARIKA + {0xFE00, 0xFE0F, prExtend}, // Mn [16] VARIATION SELECTOR-1..VARIATION SELECTOR-16 + {0xFE20, 0xFE2F, prExtend}, // Mn [16] COMBINING LIGATURE LEFT HALF..COMBINING CYRILLIC TITLO RIGHT HALF + {0xFEFF, 0xFEFF, prControl}, // Cf ZERO WIDTH NO-BREAK SPACE + {0xFF9E, 0xFF9F, prExtend}, // Lm [2] HALFWIDTH KATAKANA VOICED SOUND MARK..HALFWIDTH KATAKANA SEMI-VOICED SOUND MARK + {0xFFF0, 0xFFF8, prControl}, // Cn [9] .. + {0xFFF9, 0xFFFB, prControl}, // Cf [3] INTERLINEAR ANNOTATION ANCHOR..INTERLINEAR ANNOTATION TERMINATOR + {0x101FD, 0x101FD, prExtend}, // Mn PHAISTOS DISC SIGN COMBINING OBLIQUE STROKE + {0x102E0, 0x102E0, prExtend}, // Mn COPTIC EPACT THOUSANDS MARK + {0x10376, 0x1037A, prExtend}, // Mn [5] COMBINING OLD PERMIC LETTER AN..COMBINING OLD PERMIC LETTER SII + {0x10A01, 0x10A03, prExtend}, // Mn [3] KHAROSHTHI VOWEL SIGN I..KHAROSHTHI VOWEL SIGN VOCALIC R + {0x10A05, 0x10A06, prExtend}, // Mn [2] KHAROSHTHI VOWEL SIGN E..KHAROSHTHI VOWEL SIGN O + {0x10A0C, 0x10A0F, prExtend}, // Mn [4] KHAROSHTHI VOWEL LENGTH MARK..KHAROSHTHI SIGN VISARGA + {0x10A38, 0x10A3A, prExtend}, // Mn [3] KHAROSHTHI SIGN BAR ABOVE..KHAROSHTHI SIGN DOT BELOW + {0x10A3F, 0x10A3F, prExtend}, // Mn KHAROSHTHI VIRAMA + {0x10AE5, 0x10AE6, prExtend}, // Mn [2] MANICHAEAN ABBREVIATION MARK ABOVE..MANICHAEAN ABBREVIATION MARK BELOW + {0x10D24, 0x10D27, prExtend}, // Mn [4] HANIFI ROHINGYA SIGN HARBAHAY..HANIFI ROHINGYA SIGN TASSI + {0x10F46, 0x10F50, prExtend}, // Mn [11] SOGDIAN COMBINING DOT BELOW..SOGDIAN COMBINING STROKE BELOW + {0x11000, 0x11000, prSpacingMark}, // Mc BRAHMI SIGN CANDRABINDU + {0x11001, 0x11001, prExtend}, // Mn BRAHMI SIGN ANUSVARA + {0x11002, 0x11002, prSpacingMark}, // Mc BRAHMI SIGN VISARGA + {0x11038, 0x11046, prExtend}, // Mn [15] BRAHMI VOWEL SIGN AA..BRAHMI VIRAMA + {0x1107F, 0x11081, prExtend}, // Mn [3] BRAHMI NUMBER JOINER..KAITHI SIGN ANUSVARA + {0x11082, 0x11082, prSpacingMark}, // Mc KAITHI SIGN VISARGA + {0x110B0, 0x110B2, prSpacingMark}, // Mc [3] KAITHI VOWEL SIGN AA..KAITHI VOWEL SIGN II + {0x110B3, 0x110B6, prExtend}, // Mn [4] KAITHI VOWEL SIGN U..KAITHI VOWEL SIGN AI + {0x110B7, 0x110B8, prSpacingMark}, // Mc [2] KAITHI VOWEL SIGN O..KAITHI VOWEL SIGN AU + {0x110B9, 0x110BA, prExtend}, // Mn [2] KAITHI SIGN VIRAMA..KAITHI SIGN NUKTA + {0x110BD, 0x110BD, prPreprend}, // Cf KAITHI NUMBER SIGN + {0x110CD, 0x110CD, prPreprend}, // Cf KAITHI NUMBER SIGN ABOVE + {0x11100, 0x11102, prExtend}, // Mn [3] CHAKMA SIGN CANDRABINDU..CHAKMA SIGN VISARGA + {0x11127, 0x1112B, prExtend}, // Mn [5] CHAKMA VOWEL SIGN A..CHAKMA VOWEL SIGN UU + {0x1112C, 0x1112C, prSpacingMark}, // Mc CHAKMA VOWEL SIGN E + {0x1112D, 0x11134, prExtend}, // Mn [8] CHAKMA VOWEL SIGN AI..CHAKMA MAAYYAA + {0x11145, 0x11146, prSpacingMark}, // Mc [2] CHAKMA VOWEL SIGN AA..CHAKMA VOWEL SIGN EI + {0x11173, 0x11173, prExtend}, // Mn MAHAJANI SIGN NUKTA + {0x11180, 0x11181, prExtend}, // Mn [2] SHARADA SIGN CANDRABINDU..SHARADA SIGN ANUSVARA + {0x11182, 0x11182, prSpacingMark}, // Mc SHARADA SIGN VISARGA + {0x111B3, 0x111B5, prSpacingMark}, // Mc [3] SHARADA VOWEL SIGN AA..SHARADA VOWEL SIGN II + {0x111B6, 0x111BE, prExtend}, // Mn [9] SHARADA VOWEL SIGN U..SHARADA VOWEL SIGN O + {0x111BF, 0x111C0, prSpacingMark}, // Mc [2] SHARADA VOWEL SIGN AU..SHARADA SIGN VIRAMA + {0x111C2, 0x111C3, prPreprend}, // Lo [2] SHARADA SIGN JIHVAMULIYA..SHARADA SIGN UPADHMANIYA + {0x111C9, 0x111CC, prExtend}, // Mn [4] SHARADA SANDHI MARK..SHARADA EXTRA SHORT VOWEL MARK + {0x1122C, 0x1122E, prSpacingMark}, // Mc [3] KHOJKI VOWEL SIGN AA..KHOJKI VOWEL SIGN II + {0x1122F, 0x11231, prExtend}, // Mn [3] KHOJKI VOWEL SIGN U..KHOJKI VOWEL SIGN AI + {0x11232, 0x11233, prSpacingMark}, // Mc [2] KHOJKI VOWEL SIGN O..KHOJKI VOWEL SIGN AU + {0x11234, 0x11234, prExtend}, // Mn KHOJKI SIGN ANUSVARA + {0x11235, 0x11235, prSpacingMark}, // Mc KHOJKI SIGN VIRAMA + {0x11236, 0x11237, prExtend}, // Mn [2] KHOJKI SIGN NUKTA..KHOJKI SIGN SHADDA + {0x1123E, 0x1123E, prExtend}, // Mn KHOJKI SIGN SUKUN + {0x112DF, 0x112DF, prExtend}, // Mn KHUDAWADI SIGN ANUSVARA + {0x112E0, 0x112E2, prSpacingMark}, // Mc [3] KHUDAWADI VOWEL SIGN AA..KHUDAWADI VOWEL SIGN II + {0x112E3, 0x112EA, prExtend}, // Mn [8] KHUDAWADI VOWEL SIGN U..KHUDAWADI SIGN VIRAMA + {0x11300, 0x11301, prExtend}, // Mn [2] GRANTHA SIGN COMBINING ANUSVARA ABOVE..GRANTHA SIGN CANDRABINDU + {0x11302, 0x11303, prSpacingMark}, // Mc [2] GRANTHA SIGN ANUSVARA..GRANTHA SIGN VISARGA + {0x1133B, 0x1133C, prExtend}, // Mn [2] COMBINING BINDU BELOW..GRANTHA SIGN NUKTA + {0x1133E, 0x1133E, prExtend}, // Mc GRANTHA VOWEL SIGN AA + {0x1133F, 0x1133F, prSpacingMark}, // Mc GRANTHA VOWEL SIGN I + {0x11340, 0x11340, prExtend}, // Mn GRANTHA VOWEL SIGN II + {0x11341, 0x11344, prSpacingMark}, // Mc [4] GRANTHA VOWEL SIGN U..GRANTHA VOWEL SIGN VOCALIC RR + {0x11347, 0x11348, prSpacingMark}, // Mc [2] GRANTHA VOWEL SIGN EE..GRANTHA VOWEL SIGN AI + {0x1134B, 0x1134D, prSpacingMark}, // Mc [3] GRANTHA VOWEL SIGN OO..GRANTHA SIGN VIRAMA + {0x11357, 0x11357, prExtend}, // Mc GRANTHA AU LENGTH MARK + {0x11362, 0x11363, prSpacingMark}, // Mc [2] GRANTHA VOWEL SIGN VOCALIC L..GRANTHA VOWEL SIGN VOCALIC LL + {0x11366, 0x1136C, prExtend}, // Mn [7] COMBINING GRANTHA DIGIT ZERO..COMBINING GRANTHA DIGIT SIX + {0x11370, 0x11374, prExtend}, // Mn [5] COMBINING GRANTHA LETTER A..COMBINING GRANTHA LETTER PA + {0x11435, 0x11437, prSpacingMark}, // Mc [3] NEWA VOWEL SIGN AA..NEWA VOWEL SIGN II + {0x11438, 0x1143F, prExtend}, // Mn [8] NEWA VOWEL SIGN U..NEWA VOWEL SIGN AI + {0x11440, 0x11441, prSpacingMark}, // Mc [2] NEWA VOWEL SIGN O..NEWA VOWEL SIGN AU + {0x11442, 0x11444, prExtend}, // Mn [3] NEWA SIGN VIRAMA..NEWA SIGN ANUSVARA + {0x11445, 0x11445, prSpacingMark}, // Mc NEWA SIGN VISARGA + {0x11446, 0x11446, prExtend}, // Mn NEWA SIGN NUKTA + {0x1145E, 0x1145E, prExtend}, // Mn NEWA SANDHI MARK + {0x114B0, 0x114B0, prExtend}, // Mc TIRHUTA VOWEL SIGN AA + {0x114B1, 0x114B2, prSpacingMark}, // Mc [2] TIRHUTA VOWEL SIGN I..TIRHUTA VOWEL SIGN II + {0x114B3, 0x114B8, prExtend}, // Mn [6] TIRHUTA VOWEL SIGN U..TIRHUTA VOWEL SIGN VOCALIC LL + {0x114B9, 0x114B9, prSpacingMark}, // Mc TIRHUTA VOWEL SIGN E + {0x114BA, 0x114BA, prExtend}, // Mn TIRHUTA VOWEL SIGN SHORT E + {0x114BB, 0x114BC, prSpacingMark}, // Mc [2] TIRHUTA VOWEL SIGN AI..TIRHUTA VOWEL SIGN O + {0x114BD, 0x114BD, prExtend}, // Mc TIRHUTA VOWEL SIGN SHORT O + {0x114BE, 0x114BE, prSpacingMark}, // Mc TIRHUTA VOWEL SIGN AU + {0x114BF, 0x114C0, prExtend}, // Mn [2] TIRHUTA SIGN CANDRABINDU..TIRHUTA SIGN ANUSVARA + {0x114C1, 0x114C1, prSpacingMark}, // Mc TIRHUTA SIGN VISARGA + {0x114C2, 0x114C3, prExtend}, // Mn [2] TIRHUTA SIGN VIRAMA..TIRHUTA SIGN NUKTA + {0x115AF, 0x115AF, prExtend}, // Mc SIDDHAM VOWEL SIGN AA + {0x115B0, 0x115B1, prSpacingMark}, // Mc [2] SIDDHAM VOWEL SIGN I..SIDDHAM VOWEL SIGN II + {0x115B2, 0x115B5, prExtend}, // Mn [4] SIDDHAM VOWEL SIGN U..SIDDHAM VOWEL SIGN VOCALIC RR + {0x115B8, 0x115BB, prSpacingMark}, // Mc [4] SIDDHAM VOWEL SIGN E..SIDDHAM VOWEL SIGN AU + {0x115BC, 0x115BD, prExtend}, // Mn [2] SIDDHAM SIGN CANDRABINDU..SIDDHAM SIGN ANUSVARA + {0x115BE, 0x115BE, prSpacingMark}, // Mc SIDDHAM SIGN VISARGA + {0x115BF, 0x115C0, prExtend}, // Mn [2] SIDDHAM SIGN VIRAMA..SIDDHAM SIGN NUKTA + {0x115DC, 0x115DD, prExtend}, // Mn [2] SIDDHAM VOWEL SIGN ALTERNATE U..SIDDHAM VOWEL SIGN ALTERNATE UU + {0x11630, 0x11632, prSpacingMark}, // Mc [3] MODI VOWEL SIGN AA..MODI VOWEL SIGN II + {0x11633, 0x1163A, prExtend}, // Mn [8] MODI VOWEL SIGN U..MODI VOWEL SIGN AI + {0x1163B, 0x1163C, prSpacingMark}, // Mc [2] MODI VOWEL SIGN O..MODI VOWEL SIGN AU + {0x1163D, 0x1163D, prExtend}, // Mn MODI SIGN ANUSVARA + {0x1163E, 0x1163E, prSpacingMark}, // Mc MODI SIGN VISARGA + {0x1163F, 0x11640, prExtend}, // Mn [2] MODI SIGN VIRAMA..MODI SIGN ARDHACANDRA + {0x116AB, 0x116AB, prExtend}, // Mn TAKRI SIGN ANUSVARA + {0x116AC, 0x116AC, prSpacingMark}, // Mc TAKRI SIGN VISARGA + {0x116AD, 0x116AD, prExtend}, // Mn TAKRI VOWEL SIGN AA + {0x116AE, 0x116AF, prSpacingMark}, // Mc [2] TAKRI VOWEL SIGN I..TAKRI VOWEL SIGN II + {0x116B0, 0x116B5, prExtend}, // Mn [6] TAKRI VOWEL SIGN U..TAKRI VOWEL SIGN AU + {0x116B6, 0x116B6, prSpacingMark}, // Mc TAKRI SIGN VIRAMA + {0x116B7, 0x116B7, prExtend}, // Mn TAKRI SIGN NUKTA + {0x1171D, 0x1171F, prExtend}, // Mn [3] AHOM CONSONANT SIGN MEDIAL LA..AHOM CONSONANT SIGN MEDIAL LIGATING RA + {0x11720, 0x11721, prSpacingMark}, // Mc [2] AHOM VOWEL SIGN A..AHOM VOWEL SIGN AA + {0x11722, 0x11725, prExtend}, // Mn [4] AHOM VOWEL SIGN I..AHOM VOWEL SIGN UU + {0x11726, 0x11726, prSpacingMark}, // Mc AHOM VOWEL SIGN E + {0x11727, 0x1172B, prExtend}, // Mn [5] AHOM VOWEL SIGN AW..AHOM SIGN KILLER + {0x1182C, 0x1182E, prSpacingMark}, // Mc [3] DOGRA VOWEL SIGN AA..DOGRA VOWEL SIGN II + {0x1182F, 0x11837, prExtend}, // Mn [9] DOGRA VOWEL SIGN U..DOGRA SIGN ANUSVARA + {0x11838, 0x11838, prSpacingMark}, // Mc DOGRA SIGN VISARGA + {0x11839, 0x1183A, prExtend}, // Mn [2] DOGRA SIGN VIRAMA..DOGRA SIGN NUKTA + {0x119D1, 0x119D3, prSpacingMark}, // Mc [3] NANDINAGARI VOWEL SIGN AA..NANDINAGARI VOWEL SIGN II + {0x119D4, 0x119D7, prExtend}, // Mn [4] NANDINAGARI VOWEL SIGN U..NANDINAGARI VOWEL SIGN VOCALIC RR + {0x119DA, 0x119DB, prExtend}, // Mn [2] NANDINAGARI VOWEL SIGN E..NANDINAGARI VOWEL SIGN AI + {0x119DC, 0x119DF, prSpacingMark}, // Mc [4] NANDINAGARI VOWEL SIGN O..NANDINAGARI SIGN VISARGA + {0x119E0, 0x119E0, prExtend}, // Mn NANDINAGARI SIGN VIRAMA + {0x119E4, 0x119E4, prSpacingMark}, // Mc NANDINAGARI VOWEL SIGN PRISHTHAMATRA E + {0x11A01, 0x11A0A, prExtend}, // Mn [10] ZANABAZAR SQUARE VOWEL SIGN I..ZANABAZAR SQUARE VOWEL LENGTH MARK + {0x11A33, 0x11A38, prExtend}, // Mn [6] ZANABAZAR SQUARE FINAL CONSONANT MARK..ZANABAZAR SQUARE SIGN ANUSVARA + {0x11A39, 0x11A39, prSpacingMark}, // Mc ZANABAZAR SQUARE SIGN VISARGA + {0x11A3A, 0x11A3A, prPreprend}, // Lo ZANABAZAR SQUARE CLUSTER-INITIAL LETTER RA + {0x11A3B, 0x11A3E, prExtend}, // Mn [4] ZANABAZAR SQUARE CLUSTER-FINAL LETTER YA..ZANABAZAR SQUARE CLUSTER-FINAL LETTER VA + {0x11A47, 0x11A47, prExtend}, // Mn ZANABAZAR SQUARE SUBJOINER + {0x11A51, 0x11A56, prExtend}, // Mn [6] SOYOMBO VOWEL SIGN I..SOYOMBO VOWEL SIGN OE + {0x11A57, 0x11A58, prSpacingMark}, // Mc [2] SOYOMBO VOWEL SIGN AI..SOYOMBO VOWEL SIGN AU + {0x11A59, 0x11A5B, prExtend}, // Mn [3] SOYOMBO VOWEL SIGN VOCALIC R..SOYOMBO VOWEL LENGTH MARK + {0x11A84, 0x11A89, prPreprend}, // Lo [6] SOYOMBO SIGN JIHVAMULIYA..SOYOMBO CLUSTER-INITIAL LETTER SA + {0x11A8A, 0x11A96, prExtend}, // Mn [13] SOYOMBO FINAL CONSONANT SIGN G..SOYOMBO SIGN ANUSVARA + {0x11A97, 0x11A97, prSpacingMark}, // Mc SOYOMBO SIGN VISARGA + {0x11A98, 0x11A99, prExtend}, // Mn [2] SOYOMBO GEMINATION MARK..SOYOMBO SUBJOINER + {0x11C2F, 0x11C2F, prSpacingMark}, // Mc BHAIKSUKI VOWEL SIGN AA + {0x11C30, 0x11C36, prExtend}, // Mn [7] BHAIKSUKI VOWEL SIGN I..BHAIKSUKI VOWEL SIGN VOCALIC L + {0x11C38, 0x11C3D, prExtend}, // Mn [6] BHAIKSUKI VOWEL SIGN E..BHAIKSUKI SIGN ANUSVARA + {0x11C3E, 0x11C3E, prSpacingMark}, // Mc BHAIKSUKI SIGN VISARGA + {0x11C3F, 0x11C3F, prExtend}, // Mn BHAIKSUKI SIGN VIRAMA + {0x11C92, 0x11CA7, prExtend}, // Mn [22] MARCHEN SUBJOINED LETTER KA..MARCHEN SUBJOINED LETTER ZA + {0x11CA9, 0x11CA9, prSpacingMark}, // Mc MARCHEN SUBJOINED LETTER YA + {0x11CAA, 0x11CB0, prExtend}, // Mn [7] MARCHEN SUBJOINED LETTER RA..MARCHEN VOWEL SIGN AA + {0x11CB1, 0x11CB1, prSpacingMark}, // Mc MARCHEN VOWEL SIGN I + {0x11CB2, 0x11CB3, prExtend}, // Mn [2] MARCHEN VOWEL SIGN U..MARCHEN VOWEL SIGN E + {0x11CB4, 0x11CB4, prSpacingMark}, // Mc MARCHEN VOWEL SIGN O + {0x11CB5, 0x11CB6, prExtend}, // Mn [2] MARCHEN SIGN ANUSVARA..MARCHEN SIGN CANDRABINDU + {0x11D31, 0x11D36, prExtend}, // Mn [6] MASARAM GONDI VOWEL SIGN AA..MASARAM GONDI VOWEL SIGN VOCALIC R + {0x11D3A, 0x11D3A, prExtend}, // Mn MASARAM GONDI VOWEL SIGN E + {0x11D3C, 0x11D3D, prExtend}, // Mn [2] MASARAM GONDI VOWEL SIGN AI..MASARAM GONDI VOWEL SIGN O + {0x11D3F, 0x11D45, prExtend}, // Mn [7] MASARAM GONDI VOWEL SIGN AU..MASARAM GONDI VIRAMA + {0x11D46, 0x11D46, prPreprend}, // Lo MASARAM GONDI REPHA + {0x11D47, 0x11D47, prExtend}, // Mn MASARAM GONDI RA-KARA + {0x11D8A, 0x11D8E, prSpacingMark}, // Mc [5] GUNJALA GONDI VOWEL SIGN AA..GUNJALA GONDI VOWEL SIGN UU + {0x11D90, 0x11D91, prExtend}, // Mn [2] GUNJALA GONDI VOWEL SIGN EE..GUNJALA GONDI VOWEL SIGN AI + {0x11D93, 0x11D94, prSpacingMark}, // Mc [2] GUNJALA GONDI VOWEL SIGN OO..GUNJALA GONDI VOWEL SIGN AU + {0x11D95, 0x11D95, prExtend}, // Mn GUNJALA GONDI SIGN ANUSVARA + {0x11D96, 0x11D96, prSpacingMark}, // Mc GUNJALA GONDI SIGN VISARGA + {0x11D97, 0x11D97, prExtend}, // Mn GUNJALA GONDI VIRAMA + {0x11EF3, 0x11EF4, prExtend}, // Mn [2] MAKASAR VOWEL SIGN I..MAKASAR VOWEL SIGN U + {0x11EF5, 0x11EF6, prSpacingMark}, // Mc [2] MAKASAR VOWEL SIGN E..MAKASAR VOWEL SIGN O + {0x13430, 0x13438, prControl}, // Cf [9] EGYPTIAN HIEROGLYPH VERTICAL JOINER..EGYPTIAN HIEROGLYPH END SEGMENT + {0x16AF0, 0x16AF4, prExtend}, // Mn [5] BASSA VAH COMBINING HIGH TONE..BASSA VAH COMBINING HIGH-LOW TONE + {0x16B30, 0x16B36, prExtend}, // Mn [7] PAHAWH HMONG MARK CIM TUB..PAHAWH HMONG MARK CIM TAUM + {0x16F4F, 0x16F4F, prExtend}, // Mn MIAO SIGN CONSONANT MODIFIER BAR + {0x16F51, 0x16F87, prSpacingMark}, // Mc [55] MIAO SIGN ASPIRATION..MIAO VOWEL SIGN UI + {0x16F8F, 0x16F92, prExtend}, // Mn [4] MIAO TONE RIGHT..MIAO TONE BELOW + {0x1BC9D, 0x1BC9E, prExtend}, // Mn [2] DUPLOYAN THICK LETTER SELECTOR..DUPLOYAN DOUBLE MARK + {0x1BCA0, 0x1BCA3, prControl}, // Cf [4] SHORTHAND FORMAT LETTER OVERLAP..SHORTHAND FORMAT UP STEP + {0x1D165, 0x1D165, prExtend}, // Mc MUSICAL SYMBOL COMBINING STEM + {0x1D166, 0x1D166, prSpacingMark}, // Mc MUSICAL SYMBOL COMBINING SPRECHGESANG STEM + {0x1D167, 0x1D169, prExtend}, // Mn [3] MUSICAL SYMBOL COMBINING TREMOLO-1..MUSICAL SYMBOL COMBINING TREMOLO-3 + {0x1D16D, 0x1D16D, prSpacingMark}, // Mc MUSICAL SYMBOL COMBINING AUGMENTATION DOT + {0x1D16E, 0x1D172, prExtend}, // Mc [5] MUSICAL SYMBOL COMBINING FLAG-1..MUSICAL SYMBOL COMBINING FLAG-5 + {0x1D173, 0x1D17A, prControl}, // Cf [8] MUSICAL SYMBOL BEGIN BEAM..MUSICAL SYMBOL END PHRASE + {0x1D17B, 0x1D182, prExtend}, // Mn [8] MUSICAL SYMBOL COMBINING ACCENT..MUSICAL SYMBOL COMBINING LOURE + {0x1D185, 0x1D18B, prExtend}, // Mn [7] MUSICAL SYMBOL COMBINING DOIT..MUSICAL SYMBOL COMBINING TRIPLE TONGUE + {0x1D1AA, 0x1D1AD, prExtend}, // Mn [4] MUSICAL SYMBOL COMBINING DOWN BOW..MUSICAL SYMBOL COMBINING SNAP PIZZICATO + {0x1D242, 0x1D244, prExtend}, // Mn [3] COMBINING GREEK MUSICAL TRISEME..COMBINING GREEK MUSICAL PENTASEME + {0x1DA00, 0x1DA36, prExtend}, // Mn [55] SIGNWRITING HEAD RIM..SIGNWRITING AIR SUCKING IN + {0x1DA3B, 0x1DA6C, prExtend}, // Mn [50] SIGNWRITING MOUTH CLOSED NEUTRAL..SIGNWRITING EXCITEMENT + {0x1DA75, 0x1DA75, prExtend}, // Mn SIGNWRITING UPPER BODY TILTING FROM HIP JOINTS + {0x1DA84, 0x1DA84, prExtend}, // Mn SIGNWRITING LOCATION HEAD NECK + {0x1DA9B, 0x1DA9F, prExtend}, // Mn [5] SIGNWRITING FILL MODIFIER-2..SIGNWRITING FILL MODIFIER-6 + {0x1DAA1, 0x1DAAF, prExtend}, // Mn [15] SIGNWRITING ROTATION MODIFIER-2..SIGNWRITING ROTATION MODIFIER-16 + {0x1E000, 0x1E006, prExtend}, // Mn [7] COMBINING GLAGOLITIC LETTER AZU..COMBINING GLAGOLITIC LETTER ZHIVETE + {0x1E008, 0x1E018, prExtend}, // Mn [17] COMBINING GLAGOLITIC LETTER ZEMLJA..COMBINING GLAGOLITIC LETTER HERU + {0x1E01B, 0x1E021, prExtend}, // Mn [7] COMBINING GLAGOLITIC LETTER SHTA..COMBINING GLAGOLITIC LETTER YATI + {0x1E023, 0x1E024, prExtend}, // Mn [2] COMBINING GLAGOLITIC LETTER YU..COMBINING GLAGOLITIC LETTER SMALL YUS + {0x1E026, 0x1E02A, prExtend}, // Mn [5] COMBINING GLAGOLITIC LETTER YO..COMBINING GLAGOLITIC LETTER FITA + {0x1E130, 0x1E136, prExtend}, // Mn [7] NYIAKENG PUACHUE HMONG TONE-B..NYIAKENG PUACHUE HMONG TONE-D + {0x1E2EC, 0x1E2EF, prExtend}, // Mn [4] WANCHO TONE TUP..WANCHO TONE KOINI + {0x1E8D0, 0x1E8D6, prExtend}, // Mn [7] MENDE KIKAKUI COMBINING NUMBER TEENS..MENDE KIKAKUI COMBINING NUMBER MILLIONS + {0x1E944, 0x1E94A, prExtend}, // Mn [7] ADLAM ALIF LENGTHENER..ADLAM NUKTA + {0x1F000, 0x1F02B, prExtendedPictographic}, // 5.1 [44] (🀀..🀫) MAHJONG TILE EAST WIND..MAHJONG TILE BACK + {0x1F02C, 0x1F02F, prExtendedPictographic}, // NA [4] (🀬..🀯) .. + {0x1F030, 0x1F093, prExtendedPictographic}, // 5.1[100] (🀰..🂓) DOMINO TILE HORIZONTAL BACK..DOMINO TILE VERTICAL-06-06 + {0x1F094, 0x1F09F, prExtendedPictographic}, // NA [12] (🂔..🂟) .. + {0x1F0A0, 0x1F0AE, prExtendedPictographic}, // 6.0 [15] (🂠..🂮) PLAYING CARD BACK..PLAYING CARD KING OF SPADES + {0x1F0AF, 0x1F0B0, prExtendedPictographic}, // NA [2] (🂯..🂰) .. + {0x1F0B1, 0x1F0BE, prExtendedPictographic}, // 6.0 [14] (🂱..🂾) PLAYING CARD ACE OF HEARTS..PLAYING CARD KING OF HEARTS + {0x1F0BF, 0x1F0BF, prExtendedPictographic}, // 7.0 [1] (🂿) PLAYING CARD RED JOKER + {0x1F0C0, 0x1F0C0, prExtendedPictographic}, // NA [1] (🃀) + {0x1F0C1, 0x1F0CF, prExtendedPictographic}, // 6.0 [15] (🃁..🃏) PLAYING CARD ACE OF DIAMONDS..joker + {0x1F0D0, 0x1F0D0, prExtendedPictographic}, // NA [1] (🃐) + {0x1F0D1, 0x1F0DF, prExtendedPictographic}, // 6.0 [15] (🃑..🃟) PLAYING CARD ACE OF CLUBS..PLAYING CARD WHITE JOKER + {0x1F0E0, 0x1F0F5, prExtendedPictographic}, // 7.0 [22] (🃠..🃵) PLAYING CARD FOOL..PLAYING CARD TRUMP-21 + {0x1F0F6, 0x1F0FF, prExtendedPictographic}, // NA [10] (🃶..🃿) .. + {0x1F10D, 0x1F10F, prExtendedPictographic}, // NA [3] (🄍..🄏) .. + {0x1F12F, 0x1F12F, prExtendedPictographic}, // 11.0 [1] (🄯) COPYLEFT SYMBOL + {0x1F16C, 0x1F16C, prExtendedPictographic}, // 12.0 [1] (🅬) RAISED MR SIGN + {0x1F16D, 0x1F16F, prExtendedPictographic}, // NA [3] (🅭..🅯) .. + {0x1F170, 0x1F171, prExtendedPictographic}, // 6.0 [2] (🅰️..🅱️) A button (blood type)..B button (blood type) + {0x1F17E, 0x1F17E, prExtendedPictographic}, // 6.0 [1] (🅾️) O button (blood type) + {0x1F17F, 0x1F17F, prExtendedPictographic}, // 5.2 [1] (🅿️) P button + {0x1F18E, 0x1F18E, prExtendedPictographic}, // 6.0 [1] (🆎) AB button (blood type) + {0x1F191, 0x1F19A, prExtendedPictographic}, // 6.0 [10] (🆑..🆚) CL button..VS button + {0x1F1AD, 0x1F1E5, prExtendedPictographic}, // NA [57] (🆭..🇥) .. + {0x1F1E6, 0x1F1FF, prRegionalIndicator}, // So [26] REGIONAL INDICATOR SYMBOL LETTER A..REGIONAL INDICATOR SYMBOL LETTER Z + {0x1F201, 0x1F202, prExtendedPictographic}, // 6.0 [2] (🈁..🈂️) Japanese “here” button..Japanese “service charge” button + {0x1F203, 0x1F20F, prExtendedPictographic}, // NA [13] (🈃..🈏) .. + {0x1F21A, 0x1F21A, prExtendedPictographic}, // 5.2 [1] (🈚) Japanese “free of charge” button + {0x1F22F, 0x1F22F, prExtendedPictographic}, // 5.2 [1] (🈯) Japanese “reserved” button + {0x1F232, 0x1F23A, prExtendedPictographic}, // 6.0 [9] (🈲..🈺) Japanese “prohibited” button..Japanese “open for business” button + {0x1F23C, 0x1F23F, prExtendedPictographic}, // NA [4] (🈼..🈿) .. + {0x1F249, 0x1F24F, prExtendedPictographic}, // NA [7] (🉉..🉏) .. + {0x1F250, 0x1F251, prExtendedPictographic}, // 6.0 [2] (🉐..🉑) Japanese “bargain” button..Japanese “acceptable” button + {0x1F252, 0x1F25F, prExtendedPictographic}, // NA [14] (🉒..🉟) .. + {0x1F260, 0x1F265, prExtendedPictographic}, // 10.0 [6] (🉠..🉥) ROUNDED SYMBOL FOR FU..ROUNDED SYMBOL FOR CAI + {0x1F266, 0x1F2FF, prExtendedPictographic}, // NA[154] (🉦..🋿) .. + {0x1F300, 0x1F320, prExtendedPictographic}, // 6.0 [33] (🌀..🌠) cyclone..shooting star + {0x1F321, 0x1F32C, prExtendedPictographic}, // 7.0 [12] (🌡️..🌬️) thermometer..wind face + {0x1F32D, 0x1F32F, prExtendedPictographic}, // 8.0 [3] (🌭..🌯) hot dog..burrito + {0x1F330, 0x1F335, prExtendedPictographic}, // 6.0 [6] (🌰..🌵) chestnut..cactus + {0x1F336, 0x1F336, prExtendedPictographic}, // 7.0 [1] (🌶️) hot pepper + {0x1F337, 0x1F37C, prExtendedPictographic}, // 6.0 [70] (🌷..🍼) tulip..baby bottle + {0x1F37D, 0x1F37D, prExtendedPictographic}, // 7.0 [1] (🍽️) fork and knife with plate + {0x1F37E, 0x1F37F, prExtendedPictographic}, // 8.0 [2] (🍾..🍿) bottle with popping cork..popcorn + {0x1F380, 0x1F393, prExtendedPictographic}, // 6.0 [20] (🎀..🎓) ribbon..graduation cap + {0x1F394, 0x1F39F, prExtendedPictographic}, // 7.0 [12] (🎔..🎟️) HEART WITH TIP ON THE LEFT..admission tickets + {0x1F3A0, 0x1F3C4, prExtendedPictographic}, // 6.0 [37] (🎠..🏄) carousel horse..person surfing + {0x1F3C5, 0x1F3C5, prExtendedPictographic}, // 7.0 [1] (🏅) sports medal + {0x1F3C6, 0x1F3CA, prExtendedPictographic}, // 6.0 [5] (🏆..🏊) trophy..person swimming + {0x1F3CB, 0x1F3CE, prExtendedPictographic}, // 7.0 [4] (🏋️..🏎️) person lifting weights..racing car + {0x1F3CF, 0x1F3D3, prExtendedPictographic}, // 8.0 [5] (🏏..🏓) cricket game..ping pong + {0x1F3D4, 0x1F3DF, prExtendedPictographic}, // 7.0 [12] (🏔️..🏟️) snow-capped mountain..stadium + {0x1F3E0, 0x1F3F0, prExtendedPictographic}, // 6.0 [17] (🏠..🏰) house..castle + {0x1F3F1, 0x1F3F7, prExtendedPictographic}, // 7.0 [7] (🏱..🏷️) WHITE PENNANT..label + {0x1F3F8, 0x1F3FA, prExtendedPictographic}, // 8.0 [3] (🏸..🏺) badminton..amphora + {0x1F3FB, 0x1F3FF, prExtend}, // Sk [5] EMOJI MODIFIER FITZPATRICK TYPE-1-2..EMOJI MODIFIER FITZPATRICK TYPE-6 + {0x1F400, 0x1F43E, prExtendedPictographic}, // 6.0 [63] (🐀..🐾) rat..paw prints + {0x1F43F, 0x1F43F, prExtendedPictographic}, // 7.0 [1] (🐿️) chipmunk + {0x1F440, 0x1F440, prExtendedPictographic}, // 6.0 [1] (👀) eyes + {0x1F441, 0x1F441, prExtendedPictographic}, // 7.0 [1] (👁️) eye + {0x1F442, 0x1F4F7, prExtendedPictographic}, // 6.0[182] (👂..📷) ear..camera + {0x1F4F8, 0x1F4F8, prExtendedPictographic}, // 7.0 [1] (📸) camera with flash + {0x1F4F9, 0x1F4FC, prExtendedPictographic}, // 6.0 [4] (📹..📼) video camera..videocassette + {0x1F4FD, 0x1F4FE, prExtendedPictographic}, // 7.0 [2] (📽️..📾) film projector..PORTABLE STEREO + {0x1F4FF, 0x1F4FF, prExtendedPictographic}, // 8.0 [1] (📿) prayer beads + {0x1F500, 0x1F53D, prExtendedPictographic}, // 6.0 [62] (🔀..🔽) shuffle tracks button..downwards button + {0x1F546, 0x1F54A, prExtendedPictographic}, // 7.0 [5] (🕆..🕊️) WHITE LATIN CROSS..dove + {0x1F54B, 0x1F54F, prExtendedPictographic}, // 8.0 [5] (🕋..🕏) kaaba..BOWL OF HYGIEIA + {0x1F550, 0x1F567, prExtendedPictographic}, // 6.0 [24] (🕐..🕧) one o’clock..twelve-thirty + {0x1F568, 0x1F579, prExtendedPictographic}, // 7.0 [18] (🕨..🕹️) RIGHT SPEAKER..joystick + {0x1F57A, 0x1F57A, prExtendedPictographic}, // 9.0 [1] (🕺) man dancing + {0x1F57B, 0x1F5A3, prExtendedPictographic}, // 7.0 [41] (🕻..🖣) LEFT HAND TELEPHONE RECEIVER..BLACK DOWN POINTING BACKHAND INDEX + {0x1F5A4, 0x1F5A4, prExtendedPictographic}, // 9.0 [1] (🖤) black heart + {0x1F5A5, 0x1F5FA, prExtendedPictographic}, // 7.0 [86] (🖥️..🗺️) desktop computer..world map + {0x1F5FB, 0x1F5FF, prExtendedPictographic}, // 6.0 [5] (🗻..🗿) mount fuji..moai + {0x1F600, 0x1F600, prExtendedPictographic}, // 6.1 [1] (😀) grinning face + {0x1F601, 0x1F610, prExtendedPictographic}, // 6.0 [16] (😁..😐) beaming face with smiling eyes..neutral face + {0x1F611, 0x1F611, prExtendedPictographic}, // 6.1 [1] (😑) expressionless face + {0x1F612, 0x1F614, prExtendedPictographic}, // 6.0 [3] (😒..😔) unamused face..pensive face + {0x1F615, 0x1F615, prExtendedPictographic}, // 6.1 [1] (😕) confused face + {0x1F616, 0x1F616, prExtendedPictographic}, // 6.0 [1] (😖) confounded face + {0x1F617, 0x1F617, prExtendedPictographic}, // 6.1 [1] (😗) kissing face + {0x1F618, 0x1F618, prExtendedPictographic}, // 6.0 [1] (😘) face blowing a kiss + {0x1F619, 0x1F619, prExtendedPictographic}, // 6.1 [1] (😙) kissing face with smiling eyes + {0x1F61A, 0x1F61A, prExtendedPictographic}, // 6.0 [1] (😚) kissing face with closed eyes + {0x1F61B, 0x1F61B, prExtendedPictographic}, // 6.1 [1] (😛) face with tongue + {0x1F61C, 0x1F61E, prExtendedPictographic}, // 6.0 [3] (😜..😞) winking face with tongue..disappointed face + {0x1F61F, 0x1F61F, prExtendedPictographic}, // 6.1 [1] (😟) worried face + {0x1F620, 0x1F625, prExtendedPictographic}, // 6.0 [6] (😠..😥) angry face..sad but relieved face + {0x1F626, 0x1F627, prExtendedPictographic}, // 6.1 [2] (😦..😧) frowning face with open mouth..anguished face + {0x1F628, 0x1F62B, prExtendedPictographic}, // 6.0 [4] (😨..😫) fearful face..tired face + {0x1F62C, 0x1F62C, prExtendedPictographic}, // 6.1 [1] (😬) grimacing face + {0x1F62D, 0x1F62D, prExtendedPictographic}, // 6.0 [1] (😭) loudly crying face + {0x1F62E, 0x1F62F, prExtendedPictographic}, // 6.1 [2] (😮..😯) face with open mouth..hushed face + {0x1F630, 0x1F633, prExtendedPictographic}, // 6.0 [4] (😰..😳) anxious face with sweat..flushed face + {0x1F634, 0x1F634, prExtendedPictographic}, // 6.1 [1] (😴) sleeping face + {0x1F635, 0x1F640, prExtendedPictographic}, // 6.0 [12] (😵..🙀) dizzy face..weary cat + {0x1F641, 0x1F642, prExtendedPictographic}, // 7.0 [2] (🙁..🙂) slightly frowning face..slightly smiling face + {0x1F643, 0x1F644, prExtendedPictographic}, // 8.0 [2] (🙃..🙄) upside-down face..face with rolling eyes + {0x1F645, 0x1F64F, prExtendedPictographic}, // 6.0 [11] (🙅..🙏) person gesturing NO..folded hands + {0x1F680, 0x1F6C5, prExtendedPictographic}, // 6.0 [70] (🚀..🛅) rocket..left luggage + {0x1F6C6, 0x1F6CF, prExtendedPictographic}, // 7.0 [10] (🛆..🛏️) TRIANGLE WITH ROUNDED CORNERS..bed + {0x1F6D0, 0x1F6D0, prExtendedPictographic}, // 8.0 [1] (🛐) place of worship + {0x1F6D1, 0x1F6D2, prExtendedPictographic}, // 9.0 [2] (🛑..🛒) stop sign..shopping cart + {0x1F6D3, 0x1F6D4, prExtendedPictographic}, // 10.0 [2] (🛓..🛔) STUPA..PAGODA + {0x1F6D5, 0x1F6D5, prExtendedPictographic}, // 12.0 [1] (🛕) hindu temple + {0x1F6D6, 0x1F6DF, prExtendedPictographic}, // NA [10] (🛖..🛟) .. + {0x1F6E0, 0x1F6EC, prExtendedPictographic}, // 7.0 [13] (🛠️..🛬) hammer and wrench..airplane arrival + {0x1F6ED, 0x1F6EF, prExtendedPictographic}, // NA [3] (🛭..🛯) .. + {0x1F6F0, 0x1F6F3, prExtendedPictographic}, // 7.0 [4] (🛰️..🛳️) satellite..passenger ship + {0x1F6F4, 0x1F6F6, prExtendedPictographic}, // 9.0 [3] (🛴..🛶) kick scooter..canoe + {0x1F6F7, 0x1F6F8, prExtendedPictographic}, // 10.0 [2] (🛷..🛸) sled..flying saucer + {0x1F6F9, 0x1F6F9, prExtendedPictographic}, // 11.0 [1] (🛹) skateboard + {0x1F6FA, 0x1F6FA, prExtendedPictographic}, // 12.0 [1] (🛺) auto rickshaw + {0x1F6FB, 0x1F6FF, prExtendedPictographic}, // NA [5] (🛻..🛿) .. + {0x1F774, 0x1F77F, prExtendedPictographic}, // NA [12] (🝴..🝿) .. + {0x1F7D5, 0x1F7D8, prExtendedPictographic}, // 11.0 [4] (🟕..🟘) CIRCLED TRIANGLE..NEGATIVE CIRCLED SQUARE + {0x1F7D9, 0x1F7DF, prExtendedPictographic}, // NA [7] (🟙..🟟) .. + {0x1F7E0, 0x1F7EB, prExtendedPictographic}, // 12.0 [12] (🟠..🟫) orange circle..brown square + {0x1F7EC, 0x1F7FF, prExtendedPictographic}, // NA [20] (🟬..🟿) .. + {0x1F80C, 0x1F80F, prExtendedPictographic}, // NA [4] (🠌..🠏) .. + {0x1F848, 0x1F84F, prExtendedPictographic}, // NA [8] (🡈..🡏) .. + {0x1F85A, 0x1F85F, prExtendedPictographic}, // NA [6] (🡚..🡟) .. + {0x1F888, 0x1F88F, prExtendedPictographic}, // NA [8] (🢈..🢏) .. + {0x1F8AE, 0x1F8FF, prExtendedPictographic}, // NA [82] (🢮..🣿) .. + {0x1F90C, 0x1F90C, prExtendedPictographic}, // NA [1] (🤌) + {0x1F90D, 0x1F90F, prExtendedPictographic}, // 12.0 [3] (🤍..🤏) white heart..pinching hand + {0x1F910, 0x1F918, prExtendedPictographic}, // 8.0 [9] (🤐..🤘) zipper-mouth face..sign of the horns + {0x1F919, 0x1F91E, prExtendedPictographic}, // 9.0 [6] (🤙..🤞) call me hand..crossed fingers + {0x1F91F, 0x1F91F, prExtendedPictographic}, // 10.0 [1] (🤟) love-you gesture + {0x1F920, 0x1F927, prExtendedPictographic}, // 9.0 [8] (🤠..🤧) cowboy hat face..sneezing face + {0x1F928, 0x1F92F, prExtendedPictographic}, // 10.0 [8] (🤨..🤯) face with raised eyebrow..exploding head + {0x1F930, 0x1F930, prExtendedPictographic}, // 9.0 [1] (🤰) pregnant woman + {0x1F931, 0x1F932, prExtendedPictographic}, // 10.0 [2] (🤱..🤲) breast-feeding..palms up together + {0x1F933, 0x1F93A, prExtendedPictographic}, // 9.0 [8] (🤳..🤺) selfie..person fencing + {0x1F93C, 0x1F93E, prExtendedPictographic}, // 9.0 [3] (🤼..🤾) people wrestling..person playing handball + {0x1F93F, 0x1F93F, prExtendedPictographic}, // 12.0 [1] (🤿) diving mask + {0x1F940, 0x1F945, prExtendedPictographic}, // 9.0 [6] (🥀..🥅) wilted flower..goal net + {0x1F947, 0x1F94B, prExtendedPictographic}, // 9.0 [5] (🥇..🥋) 1st place medal..martial arts uniform + {0x1F94C, 0x1F94C, prExtendedPictographic}, // 10.0 [1] (🥌) curling stone + {0x1F94D, 0x1F94F, prExtendedPictographic}, // 11.0 [3] (🥍..🥏) lacrosse..flying disc + {0x1F950, 0x1F95E, prExtendedPictographic}, // 9.0 [15] (🥐..🥞) croissant..pancakes + {0x1F95F, 0x1F96B, prExtendedPictographic}, // 10.0 [13] (🥟..🥫) dumpling..canned food + {0x1F96C, 0x1F970, prExtendedPictographic}, // 11.0 [5] (🥬..🥰) leafy green..smiling face with hearts + {0x1F971, 0x1F971, prExtendedPictographic}, // 12.0 [1] (🥱) yawning face + {0x1F972, 0x1F972, prExtendedPictographic}, // NA [1] (🥲) + {0x1F973, 0x1F976, prExtendedPictographic}, // 11.0 [4] (🥳..🥶) partying face..cold face + {0x1F977, 0x1F979, prExtendedPictographic}, // NA [3] (🥷..🥹) .. + {0x1F97A, 0x1F97A, prExtendedPictographic}, // 11.0 [1] (🥺) pleading face + {0x1F97B, 0x1F97B, prExtendedPictographic}, // 12.0 [1] (🥻) sari + {0x1F97C, 0x1F97F, prExtendedPictographic}, // 11.0 [4] (🥼..🥿) lab coat..flat shoe + {0x1F980, 0x1F984, prExtendedPictographic}, // 8.0 [5] (🦀..🦄) crab..unicorn + {0x1F985, 0x1F991, prExtendedPictographic}, // 9.0 [13] (🦅..🦑) eagle..squid + {0x1F992, 0x1F997, prExtendedPictographic}, // 10.0 [6] (🦒..🦗) giraffe..cricket + {0x1F998, 0x1F9A2, prExtendedPictographic}, // 11.0 [11] (🦘..🦢) kangaroo..swan + {0x1F9A3, 0x1F9A4, prExtendedPictographic}, // NA [2] (🦣..🦤) .. + {0x1F9A5, 0x1F9AA, prExtendedPictographic}, // 12.0 [6] (🦥..🦪) sloth..oyster + {0x1F9AB, 0x1F9AD, prExtendedPictographic}, // NA [3] (🦫..🦭) .. + {0x1F9AE, 0x1F9AF, prExtendedPictographic}, // 12.0 [2] (🦮..🦯) guide dog..probing cane + {0x1F9B0, 0x1F9B9, prExtendedPictographic}, // 11.0 [10] (🦰..🦹) red hair..supervillain + {0x1F9BA, 0x1F9BF, prExtendedPictographic}, // 12.0 [6] (🦺..🦿) safety vest..mechanical leg + {0x1F9C0, 0x1F9C0, prExtendedPictographic}, // 8.0 [1] (🧀) cheese wedge + {0x1F9C1, 0x1F9C2, prExtendedPictographic}, // 11.0 [2] (🧁..🧂) cupcake..salt + {0x1F9C3, 0x1F9CA, prExtendedPictographic}, // 12.0 [8] (🧃..🧊) beverage box..ice cube + {0x1F9CB, 0x1F9CC, prExtendedPictographic}, // NA [2] (🧋..🧌) .. + {0x1F9CD, 0x1F9CF, prExtendedPictographic}, // 12.0 [3] (🧍..🧏) person standing..deaf person + {0x1F9D0, 0x1F9E6, prExtendedPictographic}, // 10.0 [23] (🧐..🧦) face with monocle..socks + {0x1F9E7, 0x1F9FF, prExtendedPictographic}, // 11.0 [25] (🧧..🧿) red envelope..nazar amulet + {0x1FA00, 0x1FA53, prExtendedPictographic}, // 12.0 [84] (🨀..🩓) NEUTRAL CHESS KING..BLACK CHESS KNIGHT-BISHOP + {0x1FA54, 0x1FA5F, prExtendedPictographic}, // NA [12] (🩔..🩟) .. + {0x1FA60, 0x1FA6D, prExtendedPictographic}, // 11.0 [14] (🩠..🩭) XIANGQI RED GENERAL..XIANGQI BLACK SOLDIER + {0x1FA6E, 0x1FA6F, prExtendedPictographic}, // NA [2] (🩮..🩯) .. + {0x1FA70, 0x1FA73, prExtendedPictographic}, // 12.0 [4] (🩰..🩳) ballet shoes..shorts + {0x1FA74, 0x1FA77, prExtendedPictographic}, // NA [4] (🩴..🩷) .. + {0x1FA78, 0x1FA7A, prExtendedPictographic}, // 12.0 [3] (🩸..🩺) drop of blood..stethoscope + {0x1FA7B, 0x1FA7F, prExtendedPictographic}, // NA [5] (🩻..🩿) .. + {0x1FA80, 0x1FA82, prExtendedPictographic}, // 12.0 [3] (🪀..🪂) yo-yo..parachute + {0x1FA83, 0x1FA8F, prExtendedPictographic}, // NA [13] (🪃..🪏) .. + {0x1FA90, 0x1FA95, prExtendedPictographic}, // 12.0 [6] (🪐..🪕) ringed planet..banjo + {0x1FA96, 0x1FFFD, prExtendedPictographic}, // NA[1384] (🪖..🿽) .. + {0xE0000, 0xE0000, prControl}, // Cn + {0xE0001, 0xE0001, prControl}, // Cf LANGUAGE TAG + {0xE0002, 0xE001F, prControl}, // Cn [30] .. + {0xE0020, 0xE007F, prExtend}, // Cf [96] TAG SPACE..CANCEL TAG + {0xE0080, 0xE00FF, prControl}, // Cn [128] .. + {0xE0100, 0xE01EF, prExtend}, // Mn [240] VARIATION SELECTOR-17..VARIATION SELECTOR-256 + {0xE01F0, 0xE0FFF, prControl}, // Cn [3600] .. +} + +// property returns the Unicode property value (see constants above) of the +// given code point. +func property(r rune) int { + // Run a binary search. + from := 0 + to := len(codePoints) + for to > from { + middle := (from + to) / 2 + cpRange := codePoints[middle] + if int(r) < cpRange[0] { + to = middle + continue + } + if int(r) > cpRange[1] { + from = middle + 1 + continue + } + return cpRange[2] + } + return prAny +} diff --git a/vendor/github.com/russross/blackfriday/v2/.gitignore b/vendor/github.com/russross/blackfriday/v2/.gitignore new file mode 100644 index 000000000..75623dccc --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/.gitignore @@ -0,0 +1,8 @@ +*.out +*.swp +*.8 +*.6 +_obj +_test* +markdown +tags diff --git a/vendor/github.com/russross/blackfriday/v2/.travis.yml b/vendor/github.com/russross/blackfriday/v2/.travis.yml new file mode 100644 index 000000000..b0b525a5a --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/.travis.yml @@ -0,0 +1,17 @@ +sudo: false +language: go +go: + - "1.10.x" + - "1.11.x" + - tip +matrix: + fast_finish: true + allow_failures: + - go: tip +install: + - # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step). +script: + - go get -t -v ./... + - diff -u <(echo -n) <(gofmt -d -s .) + - go tool vet . + - go test -v ./... diff --git a/vendor/github.com/russross/blackfriday/v2/LICENSE.txt b/vendor/github.com/russross/blackfriday/v2/LICENSE.txt new file mode 100644 index 000000000..2885af360 --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/LICENSE.txt @@ -0,0 +1,29 @@ +Blackfriday is distributed under the Simplified BSD License: + +> Copyright © 2011 Russ Ross +> All rights reserved. +> +> Redistribution and use in source and binary forms, with or without +> modification, are permitted provided that the following conditions +> are met: +> +> 1. Redistributions of source code must retain the above copyright +> notice, this list of conditions and the following disclaimer. +> +> 2. Redistributions in binary form must reproduce the above +> copyright notice, this list of conditions and the following +> disclaimer in the documentation and/or other materials provided with +> the distribution. +> +> THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +> "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +> LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +> FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +> COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +> INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +> BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +> LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +> CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +> LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +> ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +> POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/russross/blackfriday/v2/README.md b/vendor/github.com/russross/blackfriday/v2/README.md new file mode 100644 index 000000000..d9c08a22f --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/README.md @@ -0,0 +1,335 @@ +Blackfriday +[![Build Status][BuildV2SVG]][BuildV2URL] +[![PkgGoDev][PkgGoDevV2SVG]][PkgGoDevV2URL] +=========== + +Blackfriday is a [Markdown][1] processor implemented in [Go][2]. It +is paranoid about its input (so you can safely feed it user-supplied +data), it is fast, it supports common extensions (tables, smart +punctuation substitutions, etc.), and it is safe for all utf-8 +(unicode) input. + +HTML output is currently supported, along with Smartypants +extensions. + +It started as a translation from C of [Sundown][3]. + + +Installation +------------ + +Blackfriday is compatible with modern Go releases in module mode. +With Go installed: + + go get github.com/russross/blackfriday/v2 + +will resolve and add the package to the current development module, +then build and install it. Alternatively, you can achieve the same +if you import it in a package: + + import "github.com/russross/blackfriday/v2" + +and `go get` without parameters. + +Legacy GOPATH mode is unsupported. + + +Versions +-------- + +Currently maintained and recommended version of Blackfriday is `v2`. It's being +developed on its own branch: https://github.com/russross/blackfriday/tree/v2 and the +documentation is available at +https://pkg.go.dev/github.com/russross/blackfriday/v2. + +It is `go get`-able in module mode at `github.com/russross/blackfriday/v2`. + +Version 2 offers a number of improvements over v1: + +* Cleaned up API +* A separate call to [`Parse`][4], which produces an abstract syntax tree for + the document +* Latest bug fixes +* Flexibility to easily add your own rendering extensions + +Potential drawbacks: + +* Our benchmarks show v2 to be slightly slower than v1. Currently in the + ballpark of around 15%. +* API breakage. If you can't afford modifying your code to adhere to the new API + and don't care too much about the new features, v2 is probably not for you. +* Several bug fixes are trailing behind and still need to be forward-ported to + v2. See issue [#348](https://github.com/russross/blackfriday/issues/348) for + tracking. + +If you are still interested in the legacy `v1`, you can import it from +`github.com/russross/blackfriday`. Documentation for the legacy v1 can be found +here: https://pkg.go.dev/github.com/russross/blackfriday. + + +Usage +----- + +For the most sensible markdown processing, it is as simple as getting your input +into a byte slice and calling: + +```go +output := blackfriday.Run(input) +``` + +Your input will be parsed and the output rendered with a set of most popular +extensions enabled. If you want the most basic feature set, corresponding with +the bare Markdown specification, use: + +```go +output := blackfriday.Run(input, blackfriday.WithNoExtensions()) +``` + +### Sanitize untrusted content + +Blackfriday itself does nothing to protect against malicious content. If you are +dealing with user-supplied markdown, we recommend running Blackfriday's output +through HTML sanitizer such as [Bluemonday][5]. + +Here's an example of simple usage of Blackfriday together with Bluemonday: + +```go +import ( + "github.com/microcosm-cc/bluemonday" + "github.com/russross/blackfriday/v2" +) + +// ... +unsafe := blackfriday.Run(input) +html := bluemonday.UGCPolicy().SanitizeBytes(unsafe) +``` + +### Custom options + +If you want to customize the set of options, use `blackfriday.WithExtensions`, +`blackfriday.WithRenderer` and `blackfriday.WithRefOverride`. + +### `blackfriday-tool` + +You can also check out `blackfriday-tool` for a more complete example +of how to use it. Download and install it using: + + go get github.com/russross/blackfriday-tool + +This is a simple command-line tool that allows you to process a +markdown file using a standalone program. You can also browse the +source directly on github if you are just looking for some example +code: + +* + +Note that if you have not already done so, installing +`blackfriday-tool` will be sufficient to download and install +blackfriday in addition to the tool itself. The tool binary will be +installed in `$GOPATH/bin`. This is a statically-linked binary that +can be copied to wherever you need it without worrying about +dependencies and library versions. + +### Sanitized anchor names + +Blackfriday includes an algorithm for creating sanitized anchor names +corresponding to a given input text. This algorithm is used to create +anchors for headings when `AutoHeadingIDs` extension is enabled. The +algorithm has a specification, so that other packages can create +compatible anchor names and links to those anchors. + +The specification is located at https://pkg.go.dev/github.com/russross/blackfriday/v2#hdr-Sanitized_Anchor_Names. + +[`SanitizedAnchorName`](https://pkg.go.dev/github.com/russross/blackfriday/v2#SanitizedAnchorName) exposes this functionality, and can be used to +create compatible links to the anchor names generated by blackfriday. +This algorithm is also implemented in a small standalone package at +[`github.com/shurcooL/sanitized_anchor_name`](https://pkg.go.dev/github.com/shurcooL/sanitized_anchor_name). It can be useful for clients +that want a small package and don't need full functionality of blackfriday. + + +Features +-------- + +All features of Sundown are supported, including: + +* **Compatibility**. The Markdown v1.0.3 test suite passes with + the `--tidy` option. Without `--tidy`, the differences are + mostly in whitespace and entity escaping, where blackfriday is + more consistent and cleaner. + +* **Common extensions**, including table support, fenced code + blocks, autolinks, strikethroughs, non-strict emphasis, etc. + +* **Safety**. Blackfriday is paranoid when parsing, making it safe + to feed untrusted user input without fear of bad things + happening. The test suite stress tests this and there are no + known inputs that make it crash. If you find one, please let me + know and send me the input that does it. + + NOTE: "safety" in this context means *runtime safety only*. In order to + protect yourself against JavaScript injection in untrusted content, see + [this example](https://github.com/russross/blackfriday#sanitize-untrusted-content). + +* **Fast processing**. It is fast enough to render on-demand in + most web applications without having to cache the output. + +* **Thread safety**. You can run multiple parsers in different + goroutines without ill effect. There is no dependence on global + shared state. + +* **Minimal dependencies**. Blackfriday only depends on standard + library packages in Go. The source code is pretty + self-contained, so it is easy to add to any project, including + Google App Engine projects. + +* **Standards compliant**. Output successfully validates using the + W3C validation tool for HTML 4.01 and XHTML 1.0 Transitional. + + +Extensions +---------- + +In addition to the standard markdown syntax, this package +implements the following extensions: + +* **Intra-word emphasis supression**. The `_` character is + commonly used inside words when discussing code, so having + markdown interpret it as an emphasis command is usually the + wrong thing. Blackfriday lets you treat all emphasis markers as + normal characters when they occur inside a word. + +* **Tables**. Tables can be created by drawing them in the input + using a simple syntax: + + ``` + Name | Age + --------|------ + Bob | 27 + Alice | 23 + ``` + +* **Fenced code blocks**. In addition to the normal 4-space + indentation to mark code blocks, you can explicitly mark them + and supply a language (to make syntax highlighting simple). Just + mark it like this: + + ```go + func getTrue() bool { + return true + } + ``` + + You can use 3 or more backticks to mark the beginning of the + block, and the same number to mark the end of the block. + + To preserve classes of fenced code blocks while using the bluemonday + HTML sanitizer, use the following policy: + + ```go + p := bluemonday.UGCPolicy() + p.AllowAttrs("class").Matching(regexp.MustCompile("^language-[a-zA-Z0-9]+$")).OnElements("code") + html := p.SanitizeBytes(unsafe) + ``` + +* **Definition lists**. A simple definition list is made of a single-line + term followed by a colon and the definition for that term. + + Cat + : Fluffy animal everyone likes + + Internet + : Vector of transmission for pictures of cats + + Terms must be separated from the previous definition by a blank line. + +* **Footnotes**. A marker in the text that will become a superscript number; + a footnote definition that will be placed in a list of footnotes at the + end of the document. A footnote looks like this: + + This is a footnote.[^1] + + [^1]: the footnote text. + +* **Autolinking**. Blackfriday can find URLs that have not been + explicitly marked as links and turn them into links. + +* **Strikethrough**. Use two tildes (`~~`) to mark text that + should be crossed out. + +* **Hard line breaks**. With this extension enabled newlines in the input + translate into line breaks in the output. This extension is off by default. + +* **Smart quotes**. Smartypants-style punctuation substitution is + supported, turning normal double- and single-quote marks into + curly quotes, etc. + +* **LaTeX-style dash parsing** is an additional option, where `--` + is translated into `–`, and `---` is translated into + `—`. This differs from most smartypants processors, which + turn a single hyphen into an ndash and a double hyphen into an + mdash. + +* **Smart fractions**, where anything that looks like a fraction + is translated into suitable HTML (instead of just a few special + cases like most smartypant processors). For example, `4/5` + becomes `45`, which renders as + 45. + + +Other renderers +--------------- + +Blackfriday is structured to allow alternative rendering engines. Here +are a few of note: + +* [github_flavored_markdown](https://pkg.go.dev/github.com/shurcooL/github_flavored_markdown): + provides a GitHub Flavored Markdown renderer with fenced code block + highlighting, clickable heading anchor links. + + It's not customizable, and its goal is to produce HTML output + equivalent to the [GitHub Markdown API endpoint](https://developer.github.com/v3/markdown/#render-a-markdown-document-in-raw-mode), + except the rendering is performed locally. + +* [markdownfmt](https://github.com/shurcooL/markdownfmt): like gofmt, + but for markdown. + +* [LaTeX output](https://gitlab.com/ambrevar/blackfriday-latex): + renders output as LaTeX. + +* [bfchroma](https://github.com/Depado/bfchroma/): provides convenience + integration with the [Chroma](https://github.com/alecthomas/chroma) code + highlighting library. bfchroma is only compatible with v2 of Blackfriday and + provides a drop-in renderer ready to use with Blackfriday, as well as + options and means for further customization. + +* [Blackfriday-Confluence](https://github.com/kentaro-m/blackfriday-confluence): provides a [Confluence Wiki Markup](https://confluence.atlassian.com/doc/confluence-wiki-markup-251003035.html) renderer. + +* [Blackfriday-Slack](https://github.com/karriereat/blackfriday-slack): converts markdown to slack message style + + +TODO +---- + +* More unit testing +* Improve Unicode support. It does not understand all Unicode + rules (about what constitutes a letter, a punctuation symbol, + etc.), so it may fail to detect word boundaries correctly in + some instances. It is safe on all UTF-8 input. + + +License +------- + +[Blackfriday is distributed under the Simplified BSD License](LICENSE.txt) + + + [1]: https://daringfireball.net/projects/markdown/ "Markdown" + [2]: https://golang.org/ "Go Language" + [3]: https://github.com/vmg/sundown "Sundown" + [4]: https://pkg.go.dev/github.com/russross/blackfriday/v2#Parse "Parse func" + [5]: https://github.com/microcosm-cc/bluemonday "Bluemonday" + + [BuildV2SVG]: https://travis-ci.org/russross/blackfriday.svg?branch=v2 + [BuildV2URL]: https://travis-ci.org/russross/blackfriday + [PkgGoDevV2SVG]: https://pkg.go.dev/badge/github.com/russross/blackfriday/v2 + [PkgGoDevV2URL]: https://pkg.go.dev/github.com/russross/blackfriday/v2 diff --git a/vendor/github.com/russross/blackfriday/v2/block.go b/vendor/github.com/russross/blackfriday/v2/block.go new file mode 100644 index 000000000..dcd61e6e3 --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/block.go @@ -0,0 +1,1612 @@ +// +// Blackfriday Markdown Processor +// Available at http://github.com/russross/blackfriday +// +// Copyright © 2011 Russ Ross . +// Distributed under the Simplified BSD License. +// See README.md for details. +// + +// +// Functions to parse block-level elements. +// + +package blackfriday + +import ( + "bytes" + "html" + "regexp" + "strings" + "unicode" +) + +const ( + charEntity = "&(?:#x[a-f0-9]{1,8}|#[0-9]{1,8}|[a-z][a-z0-9]{1,31});" + escapable = "[!\"#$%&'()*+,./:;<=>?@[\\\\\\]^_`{|}~-]" +) + +var ( + reBackslashOrAmp = regexp.MustCompile("[\\&]") + reEntityOrEscapedChar = regexp.MustCompile("(?i)\\\\" + escapable + "|" + charEntity) +) + +// Parse block-level data. +// Note: this function and many that it calls assume that +// the input buffer ends with a newline. +func (p *Markdown) block(data []byte) { + // this is called recursively: enforce a maximum depth + if p.nesting >= p.maxNesting { + return + } + p.nesting++ + + // parse out one block-level construct at a time + for len(data) > 0 { + // prefixed heading: + // + // # Heading 1 + // ## Heading 2 + // ... + // ###### Heading 6 + if p.isPrefixHeading(data) { + data = data[p.prefixHeading(data):] + continue + } + + // block of preformatted HTML: + // + //
+ // ... + //
+ if data[0] == '<' { + if i := p.html(data, true); i > 0 { + data = data[i:] + continue + } + } + + // title block + // + // % stuff + // % more stuff + // % even more stuff + if p.extensions&Titleblock != 0 { + if data[0] == '%' { + if i := p.titleBlock(data, true); i > 0 { + data = data[i:] + continue + } + } + } + + // blank lines. note: returns the # of bytes to skip + if i := p.isEmpty(data); i > 0 { + data = data[i:] + continue + } + + // indented code block: + // + // func max(a, b int) int { + // if a > b { + // return a + // } + // return b + // } + if p.codePrefix(data) > 0 { + data = data[p.code(data):] + continue + } + + // fenced code block: + // + // ``` go + // func fact(n int) int { + // if n <= 1 { + // return n + // } + // return n * fact(n-1) + // } + // ``` + if p.extensions&FencedCode != 0 { + if i := p.fencedCodeBlock(data, true); i > 0 { + data = data[i:] + continue + } + } + + // horizontal rule: + // + // ------ + // or + // ****** + // or + // ______ + if p.isHRule(data) { + p.addBlock(HorizontalRule, nil) + var i int + for i = 0; i < len(data) && data[i] != '\n'; i++ { + } + data = data[i:] + continue + } + + // block quote: + // + // > A big quote I found somewhere + // > on the web + if p.quotePrefix(data) > 0 { + data = data[p.quote(data):] + continue + } + + // table: + // + // Name | Age | Phone + // ------|-----|--------- + // Bob | 31 | 555-1234 + // Alice | 27 | 555-4321 + if p.extensions&Tables != 0 { + if i := p.table(data); i > 0 { + data = data[i:] + continue + } + } + + // an itemized/unordered list: + // + // * Item 1 + // * Item 2 + // + // also works with + or - + if p.uliPrefix(data) > 0 { + data = data[p.list(data, 0):] + continue + } + + // a numbered/ordered list: + // + // 1. Item 1 + // 2. Item 2 + if p.oliPrefix(data) > 0 { + data = data[p.list(data, ListTypeOrdered):] + continue + } + + // definition lists: + // + // Term 1 + // : Definition a + // : Definition b + // + // Term 2 + // : Definition c + if p.extensions&DefinitionLists != 0 { + if p.dliPrefix(data) > 0 { + data = data[p.list(data, ListTypeDefinition):] + continue + } + } + + // anything else must look like a normal paragraph + // note: this finds underlined headings, too + data = data[p.paragraph(data):] + } + + p.nesting-- +} + +func (p *Markdown) addBlock(typ NodeType, content []byte) *Node { + p.closeUnmatchedBlocks() + container := p.addChild(typ, 0) + container.content = content + return container +} + +func (p *Markdown) isPrefixHeading(data []byte) bool { + if data[0] != '#' { + return false + } + + if p.extensions&SpaceHeadings != 0 { + level := 0 + for level < 6 && level < len(data) && data[level] == '#' { + level++ + } + if level == len(data) || data[level] != ' ' { + return false + } + } + return true +} + +func (p *Markdown) prefixHeading(data []byte) int { + level := 0 + for level < 6 && level < len(data) && data[level] == '#' { + level++ + } + i := skipChar(data, level, ' ') + end := skipUntilChar(data, i, '\n') + skip := end + id := "" + if p.extensions&HeadingIDs != 0 { + j, k := 0, 0 + // find start/end of heading id + for j = i; j < end-1 && (data[j] != '{' || data[j+1] != '#'); j++ { + } + for k = j + 1; k < end && data[k] != '}'; k++ { + } + // extract heading id iff found + if j < end && k < end { + id = string(data[j+2 : k]) + end = j + skip = k + 1 + for end > 0 && data[end-1] == ' ' { + end-- + } + } + } + for end > 0 && data[end-1] == '#' { + if isBackslashEscaped(data, end-1) { + break + } + end-- + } + for end > 0 && data[end-1] == ' ' { + end-- + } + if end > i { + if id == "" && p.extensions&AutoHeadingIDs != 0 { + id = SanitizedAnchorName(string(data[i:end])) + } + block := p.addBlock(Heading, data[i:end]) + block.HeadingID = id + block.Level = level + } + return skip +} + +func (p *Markdown) isUnderlinedHeading(data []byte) int { + // test of level 1 heading + if data[0] == '=' { + i := skipChar(data, 1, '=') + i = skipChar(data, i, ' ') + if i < len(data) && data[i] == '\n' { + return 1 + } + return 0 + } + + // test of level 2 heading + if data[0] == '-' { + i := skipChar(data, 1, '-') + i = skipChar(data, i, ' ') + if i < len(data) && data[i] == '\n' { + return 2 + } + return 0 + } + + return 0 +} + +func (p *Markdown) titleBlock(data []byte, doRender bool) int { + if data[0] != '%' { + return 0 + } + splitData := bytes.Split(data, []byte("\n")) + var i int + for idx, b := range splitData { + if !bytes.HasPrefix(b, []byte("%")) { + i = idx // - 1 + break + } + } + + data = bytes.Join(splitData[0:i], []byte("\n")) + consumed := len(data) + data = bytes.TrimPrefix(data, []byte("% ")) + data = bytes.Replace(data, []byte("\n% "), []byte("\n"), -1) + block := p.addBlock(Heading, data) + block.Level = 1 + block.IsTitleblock = true + + return consumed +} + +func (p *Markdown) html(data []byte, doRender bool) int { + var i, j int + + // identify the opening tag + if data[0] != '<' { + return 0 + } + curtag, tagfound := p.htmlFindTag(data[1:]) + + // handle special cases + if !tagfound { + // check for an HTML comment + if size := p.htmlComment(data, doRender); size > 0 { + return size + } + + // check for an
tag + if size := p.htmlHr(data, doRender); size > 0 { + return size + } + + // no special case recognized + return 0 + } + + // look for an unindented matching closing tag + // followed by a blank line + found := false + /* + closetag := []byte("\n") + j = len(curtag) + 1 + for !found { + // scan for a closing tag at the beginning of a line + if skip := bytes.Index(data[j:], closetag); skip >= 0 { + j += skip + len(closetag) + } else { + break + } + + // see if it is the only thing on the line + if skip := p.isEmpty(data[j:]); skip > 0 { + // see if it is followed by a blank line/eof + j += skip + if j >= len(data) { + found = true + i = j + } else { + if skip := p.isEmpty(data[j:]); skip > 0 { + j += skip + found = true + i = j + } + } + } + } + */ + + // if not found, try a second pass looking for indented match + // but not if tag is "ins" or "del" (following original Markdown.pl) + if !found && curtag != "ins" && curtag != "del" { + i = 1 + for i < len(data) { + i++ + for i < len(data) && !(data[i-1] == '<' && data[i] == '/') { + i++ + } + + if i+2+len(curtag) >= len(data) { + break + } + + j = p.htmlFindEnd(curtag, data[i-1:]) + + if j > 0 { + i += j - 1 + found = true + break + } + } + } + + if !found { + return 0 + } + + // the end of the block has been found + if doRender { + // trim newlines + end := i + for end > 0 && data[end-1] == '\n' { + end-- + } + finalizeHTMLBlock(p.addBlock(HTMLBlock, data[:end])) + } + + return i +} + +func finalizeHTMLBlock(block *Node) { + block.Literal = block.content + block.content = nil +} + +// HTML comment, lax form +func (p *Markdown) htmlComment(data []byte, doRender bool) int { + i := p.inlineHTMLComment(data) + // needs to end with a blank line + if j := p.isEmpty(data[i:]); j > 0 { + size := i + j + if doRender { + // trim trailing newlines + end := size + for end > 0 && data[end-1] == '\n' { + end-- + } + block := p.addBlock(HTMLBlock, data[:end]) + finalizeHTMLBlock(block) + } + return size + } + return 0 +} + +// HR, which is the only self-closing block tag considered +func (p *Markdown) htmlHr(data []byte, doRender bool) int { + if len(data) < 4 { + return 0 + } + if data[0] != '<' || (data[1] != 'h' && data[1] != 'H') || (data[2] != 'r' && data[2] != 'R') { + return 0 + } + if data[3] != ' ' && data[3] != '/' && data[3] != '>' { + // not an
tag after all; at least not a valid one + return 0 + } + i := 3 + for i < len(data) && data[i] != '>' && data[i] != '\n' { + i++ + } + if i < len(data) && data[i] == '>' { + i++ + if j := p.isEmpty(data[i:]); j > 0 { + size := i + j + if doRender { + // trim newlines + end := size + for end > 0 && data[end-1] == '\n' { + end-- + } + finalizeHTMLBlock(p.addBlock(HTMLBlock, data[:end])) + } + return size + } + } + return 0 +} + +func (p *Markdown) htmlFindTag(data []byte) (string, bool) { + i := 0 + for i < len(data) && isalnum(data[i]) { + i++ + } + key := string(data[:i]) + if _, ok := blockTags[key]; ok { + return key, true + } + return "", false +} + +func (p *Markdown) htmlFindEnd(tag string, data []byte) int { + // assume data[0] == '<' && data[1] == '/' already tested + if tag == "hr" { + return 2 + } + // check if tag is a match + closetag := []byte("") + if !bytes.HasPrefix(data, closetag) { + return 0 + } + i := len(closetag) + + // check that the rest of the line is blank + skip := 0 + if skip = p.isEmpty(data[i:]); skip == 0 { + return 0 + } + i += skip + skip = 0 + + if i >= len(data) { + return i + } + + if p.extensions&LaxHTMLBlocks != 0 { + return i + } + if skip = p.isEmpty(data[i:]); skip == 0 { + // following line must be blank + return 0 + } + + return i + skip +} + +func (*Markdown) isEmpty(data []byte) int { + // it is okay to call isEmpty on an empty buffer + if len(data) == 0 { + return 0 + } + + var i int + for i = 0; i < len(data) && data[i] != '\n'; i++ { + if data[i] != ' ' && data[i] != '\t' { + return 0 + } + } + if i < len(data) && data[i] == '\n' { + i++ + } + return i +} + +func (*Markdown) isHRule(data []byte) bool { + i := 0 + + // skip up to three spaces + for i < 3 && data[i] == ' ' { + i++ + } + + // look at the hrule char + if data[i] != '*' && data[i] != '-' && data[i] != '_' { + return false + } + c := data[i] + + // the whole line must be the char or whitespace + n := 0 + for i < len(data) && data[i] != '\n' { + switch { + case data[i] == c: + n++ + case data[i] != ' ': + return false + } + i++ + } + + return n >= 3 +} + +// isFenceLine checks if there's a fence line (e.g., ``` or ``` go) at the beginning of data, +// and returns the end index if so, or 0 otherwise. It also returns the marker found. +// If info is not nil, it gets set to the syntax specified in the fence line. +func isFenceLine(data []byte, info *string, oldmarker string) (end int, marker string) { + i, size := 0, 0 + + // skip up to three spaces + for i < len(data) && i < 3 && data[i] == ' ' { + i++ + } + + // check for the marker characters: ~ or ` + if i >= len(data) { + return 0, "" + } + if data[i] != '~' && data[i] != '`' { + return 0, "" + } + + c := data[i] + + // the whole line must be the same char or whitespace + for i < len(data) && data[i] == c { + size++ + i++ + } + + // the marker char must occur at least 3 times + if size < 3 { + return 0, "" + } + marker = string(data[i-size : i]) + + // if this is the end marker, it must match the beginning marker + if oldmarker != "" && marker != oldmarker { + return 0, "" + } + + // TODO(shurcooL): It's probably a good idea to simplify the 2 code paths here + // into one, always get the info string, and discard it if the caller doesn't care. + if info != nil { + infoLength := 0 + i = skipChar(data, i, ' ') + + if i >= len(data) { + if i == len(data) { + return i, marker + } + return 0, "" + } + + infoStart := i + + if data[i] == '{' { + i++ + infoStart++ + + for i < len(data) && data[i] != '}' && data[i] != '\n' { + infoLength++ + i++ + } + + if i >= len(data) || data[i] != '}' { + return 0, "" + } + + // strip all whitespace at the beginning and the end + // of the {} block + for infoLength > 0 && isspace(data[infoStart]) { + infoStart++ + infoLength-- + } + + for infoLength > 0 && isspace(data[infoStart+infoLength-1]) { + infoLength-- + } + i++ + i = skipChar(data, i, ' ') + } else { + for i < len(data) && !isverticalspace(data[i]) { + infoLength++ + i++ + } + } + + *info = strings.TrimSpace(string(data[infoStart : infoStart+infoLength])) + } + + if i == len(data) { + return i, marker + } + if i > len(data) || data[i] != '\n' { + return 0, "" + } + return i + 1, marker // Take newline into account. +} + +// fencedCodeBlock returns the end index if data contains a fenced code block at the beginning, +// or 0 otherwise. It writes to out if doRender is true, otherwise it has no side effects. +// If doRender is true, a final newline is mandatory to recognize the fenced code block. +func (p *Markdown) fencedCodeBlock(data []byte, doRender bool) int { + var info string + beg, marker := isFenceLine(data, &info, "") + if beg == 0 || beg >= len(data) { + return 0 + } + fenceLength := beg - 1 + + var work bytes.Buffer + work.Write([]byte(info)) + work.WriteByte('\n') + + for { + // safe to assume beg < len(data) + + // check for the end of the code block + fenceEnd, _ := isFenceLine(data[beg:], nil, marker) + if fenceEnd != 0 { + beg += fenceEnd + break + } + + // copy the current line + end := skipUntilChar(data, beg, '\n') + 1 + + // did we reach the end of the buffer without a closing marker? + if end >= len(data) { + return 0 + } + + // verbatim copy to the working buffer + if doRender { + work.Write(data[beg:end]) + } + beg = end + } + + if doRender { + block := p.addBlock(CodeBlock, work.Bytes()) // TODO: get rid of temp buffer + block.IsFenced = true + block.FenceLength = fenceLength + finalizeCodeBlock(block) + } + + return beg +} + +func unescapeChar(str []byte) []byte { + if str[0] == '\\' { + return []byte{str[1]} + } + return []byte(html.UnescapeString(string(str))) +} + +func unescapeString(str []byte) []byte { + if reBackslashOrAmp.Match(str) { + return reEntityOrEscapedChar.ReplaceAllFunc(str, unescapeChar) + } + return str +} + +func finalizeCodeBlock(block *Node) { + if block.IsFenced { + newlinePos := bytes.IndexByte(block.content, '\n') + firstLine := block.content[:newlinePos] + rest := block.content[newlinePos+1:] + block.Info = unescapeString(bytes.Trim(firstLine, "\n")) + block.Literal = rest + } else { + block.Literal = block.content + } + block.content = nil +} + +func (p *Markdown) table(data []byte) int { + table := p.addBlock(Table, nil) + i, columns := p.tableHeader(data) + if i == 0 { + p.tip = table.Parent + table.Unlink() + return 0 + } + + p.addBlock(TableBody, nil) + + for i < len(data) { + pipes, rowStart := 0, i + for ; i < len(data) && data[i] != '\n'; i++ { + if data[i] == '|' { + pipes++ + } + } + + if pipes == 0 { + i = rowStart + break + } + + // include the newline in data sent to tableRow + if i < len(data) && data[i] == '\n' { + i++ + } + p.tableRow(data[rowStart:i], columns, false) + } + + return i +} + +// check if the specified position is preceded by an odd number of backslashes +func isBackslashEscaped(data []byte, i int) bool { + backslashes := 0 + for i-backslashes-1 >= 0 && data[i-backslashes-1] == '\\' { + backslashes++ + } + return backslashes&1 == 1 +} + +func (p *Markdown) tableHeader(data []byte) (size int, columns []CellAlignFlags) { + i := 0 + colCount := 1 + for i = 0; i < len(data) && data[i] != '\n'; i++ { + if data[i] == '|' && !isBackslashEscaped(data, i) { + colCount++ + } + } + + // doesn't look like a table header + if colCount == 1 { + return + } + + // include the newline in the data sent to tableRow + j := i + if j < len(data) && data[j] == '\n' { + j++ + } + header := data[:j] + + // column count ignores pipes at beginning or end of line + if data[0] == '|' { + colCount-- + } + if i > 2 && data[i-1] == '|' && !isBackslashEscaped(data, i-1) { + colCount-- + } + + columns = make([]CellAlignFlags, colCount) + + // move on to the header underline + i++ + if i >= len(data) { + return + } + + if data[i] == '|' && !isBackslashEscaped(data, i) { + i++ + } + i = skipChar(data, i, ' ') + + // each column header is of form: / *:?-+:? *|/ with # dashes + # colons >= 3 + // and trailing | optional on last column + col := 0 + for i < len(data) && data[i] != '\n' { + dashes := 0 + + if data[i] == ':' { + i++ + columns[col] |= TableAlignmentLeft + dashes++ + } + for i < len(data) && data[i] == '-' { + i++ + dashes++ + } + if i < len(data) && data[i] == ':' { + i++ + columns[col] |= TableAlignmentRight + dashes++ + } + for i < len(data) && data[i] == ' ' { + i++ + } + if i == len(data) { + return + } + // end of column test is messy + switch { + case dashes < 3: + // not a valid column + return + + case data[i] == '|' && !isBackslashEscaped(data, i): + // marker found, now skip past trailing whitespace + col++ + i++ + for i < len(data) && data[i] == ' ' { + i++ + } + + // trailing junk found after last column + if col >= colCount && i < len(data) && data[i] != '\n' { + return + } + + case (data[i] != '|' || isBackslashEscaped(data, i)) && col+1 < colCount: + // something else found where marker was required + return + + case data[i] == '\n': + // marker is optional for the last column + col++ + + default: + // trailing junk found after last column + return + } + } + if col != colCount { + return + } + + p.addBlock(TableHead, nil) + p.tableRow(header, columns, true) + size = i + if size < len(data) && data[size] == '\n' { + size++ + } + return +} + +func (p *Markdown) tableRow(data []byte, columns []CellAlignFlags, header bool) { + p.addBlock(TableRow, nil) + i, col := 0, 0 + + if data[i] == '|' && !isBackslashEscaped(data, i) { + i++ + } + + for col = 0; col < len(columns) && i < len(data); col++ { + for i < len(data) && data[i] == ' ' { + i++ + } + + cellStart := i + + for i < len(data) && (data[i] != '|' || isBackslashEscaped(data, i)) && data[i] != '\n' { + i++ + } + + cellEnd := i + + // skip the end-of-cell marker, possibly taking us past end of buffer + i++ + + for cellEnd > cellStart && cellEnd-1 < len(data) && data[cellEnd-1] == ' ' { + cellEnd-- + } + + cell := p.addBlock(TableCell, data[cellStart:cellEnd]) + cell.IsHeader = header + cell.Align = columns[col] + } + + // pad it out with empty columns to get the right number + for ; col < len(columns); col++ { + cell := p.addBlock(TableCell, nil) + cell.IsHeader = header + cell.Align = columns[col] + } + + // silently ignore rows with too many cells +} + +// returns blockquote prefix length +func (p *Markdown) quotePrefix(data []byte) int { + i := 0 + for i < 3 && i < len(data) && data[i] == ' ' { + i++ + } + if i < len(data) && data[i] == '>' { + if i+1 < len(data) && data[i+1] == ' ' { + return i + 2 + } + return i + 1 + } + return 0 +} + +// blockquote ends with at least one blank line +// followed by something without a blockquote prefix +func (p *Markdown) terminateBlockquote(data []byte, beg, end int) bool { + if p.isEmpty(data[beg:]) <= 0 { + return false + } + if end >= len(data) { + return true + } + return p.quotePrefix(data[end:]) == 0 && p.isEmpty(data[end:]) == 0 +} + +// parse a blockquote fragment +func (p *Markdown) quote(data []byte) int { + block := p.addBlock(BlockQuote, nil) + var raw bytes.Buffer + beg, end := 0, 0 + for beg < len(data) { + end = beg + // Step over whole lines, collecting them. While doing that, check for + // fenced code and if one's found, incorporate it altogether, + // irregardless of any contents inside it + for end < len(data) && data[end] != '\n' { + if p.extensions&FencedCode != 0 { + if i := p.fencedCodeBlock(data[end:], false); i > 0 { + // -1 to compensate for the extra end++ after the loop: + end += i - 1 + break + } + } + end++ + } + if end < len(data) && data[end] == '\n' { + end++ + } + if pre := p.quotePrefix(data[beg:]); pre > 0 { + // skip the prefix + beg += pre + } else if p.terminateBlockquote(data, beg, end) { + break + } + // this line is part of the blockquote + raw.Write(data[beg:end]) + beg = end + } + p.block(raw.Bytes()) + p.finalize(block) + return end +} + +// returns prefix length for block code +func (p *Markdown) codePrefix(data []byte) int { + if len(data) >= 1 && data[0] == '\t' { + return 1 + } + if len(data) >= 4 && data[0] == ' ' && data[1] == ' ' && data[2] == ' ' && data[3] == ' ' { + return 4 + } + return 0 +} + +func (p *Markdown) code(data []byte) int { + var work bytes.Buffer + + i := 0 + for i < len(data) { + beg := i + for i < len(data) && data[i] != '\n' { + i++ + } + if i < len(data) && data[i] == '\n' { + i++ + } + + blankline := p.isEmpty(data[beg:i]) > 0 + if pre := p.codePrefix(data[beg:i]); pre > 0 { + beg += pre + } else if !blankline { + // non-empty, non-prefixed line breaks the pre + i = beg + break + } + + // verbatim copy to the working buffer + if blankline { + work.WriteByte('\n') + } else { + work.Write(data[beg:i]) + } + } + + // trim all the \n off the end of work + workbytes := work.Bytes() + eol := len(workbytes) + for eol > 0 && workbytes[eol-1] == '\n' { + eol-- + } + if eol != len(workbytes) { + work.Truncate(eol) + } + + work.WriteByte('\n') + + block := p.addBlock(CodeBlock, work.Bytes()) // TODO: get rid of temp buffer + block.IsFenced = false + finalizeCodeBlock(block) + + return i +} + +// returns unordered list item prefix +func (p *Markdown) uliPrefix(data []byte) int { + i := 0 + // start with up to 3 spaces + for i < len(data) && i < 3 && data[i] == ' ' { + i++ + } + if i >= len(data)-1 { + return 0 + } + // need one of {'*', '+', '-'} followed by a space or a tab + if (data[i] != '*' && data[i] != '+' && data[i] != '-') || + (data[i+1] != ' ' && data[i+1] != '\t') { + return 0 + } + return i + 2 +} + +// returns ordered list item prefix +func (p *Markdown) oliPrefix(data []byte) int { + i := 0 + + // start with up to 3 spaces + for i < 3 && i < len(data) && data[i] == ' ' { + i++ + } + + // count the digits + start := i + for i < len(data) && data[i] >= '0' && data[i] <= '9' { + i++ + } + if start == i || i >= len(data)-1 { + return 0 + } + + // we need >= 1 digits followed by a dot and a space or a tab + if data[i] != '.' || !(data[i+1] == ' ' || data[i+1] == '\t') { + return 0 + } + return i + 2 +} + +// returns definition list item prefix +func (p *Markdown) dliPrefix(data []byte) int { + if len(data) < 2 { + return 0 + } + i := 0 + // need a ':' followed by a space or a tab + if data[i] != ':' || !(data[i+1] == ' ' || data[i+1] == '\t') { + return 0 + } + for i < len(data) && data[i] == ' ' { + i++ + } + return i + 2 +} + +// parse ordered or unordered list block +func (p *Markdown) list(data []byte, flags ListType) int { + i := 0 + flags |= ListItemBeginningOfList + block := p.addBlock(List, nil) + block.ListFlags = flags + block.Tight = true + + for i < len(data) { + skip := p.listItem(data[i:], &flags) + if flags&ListItemContainsBlock != 0 { + block.ListData.Tight = false + } + i += skip + if skip == 0 || flags&ListItemEndOfList != 0 { + break + } + flags &= ^ListItemBeginningOfList + } + + above := block.Parent + finalizeList(block) + p.tip = above + return i +} + +// Returns true if the list item is not the same type as its parent list +func (p *Markdown) listTypeChanged(data []byte, flags *ListType) bool { + if p.dliPrefix(data) > 0 && *flags&ListTypeDefinition == 0 { + return true + } else if p.oliPrefix(data) > 0 && *flags&ListTypeOrdered == 0 { + return true + } else if p.uliPrefix(data) > 0 && (*flags&ListTypeOrdered != 0 || *flags&ListTypeDefinition != 0) { + return true + } + return false +} + +// Returns true if block ends with a blank line, descending if needed +// into lists and sublists. +func endsWithBlankLine(block *Node) bool { + // TODO: figure this out. Always false now. + for block != nil { + //if block.lastLineBlank { + //return true + //} + t := block.Type + if t == List || t == Item { + block = block.LastChild + } else { + break + } + } + return false +} + +func finalizeList(block *Node) { + block.open = false + item := block.FirstChild + for item != nil { + // check for non-final list item ending with blank line: + if endsWithBlankLine(item) && item.Next != nil { + block.ListData.Tight = false + break + } + // recurse into children of list item, to see if there are spaces + // between any of them: + subItem := item.FirstChild + for subItem != nil { + if endsWithBlankLine(subItem) && (item.Next != nil || subItem.Next != nil) { + block.ListData.Tight = false + break + } + subItem = subItem.Next + } + item = item.Next + } +} + +// Parse a single list item. +// Assumes initial prefix is already removed if this is a sublist. +func (p *Markdown) listItem(data []byte, flags *ListType) int { + // keep track of the indentation of the first line + itemIndent := 0 + if data[0] == '\t' { + itemIndent += 4 + } else { + for itemIndent < 3 && data[itemIndent] == ' ' { + itemIndent++ + } + } + + var bulletChar byte = '*' + i := p.uliPrefix(data) + if i == 0 { + i = p.oliPrefix(data) + } else { + bulletChar = data[i-2] + } + if i == 0 { + i = p.dliPrefix(data) + // reset definition term flag + if i > 0 { + *flags &= ^ListTypeTerm + } + } + if i == 0 { + // if in definition list, set term flag and continue + if *flags&ListTypeDefinition != 0 { + *flags |= ListTypeTerm + } else { + return 0 + } + } + + // skip leading whitespace on first line + for i < len(data) && data[i] == ' ' { + i++ + } + + // find the end of the line + line := i + for i > 0 && i < len(data) && data[i-1] != '\n' { + i++ + } + + // get working buffer + var raw bytes.Buffer + + // put the first line into the working buffer + raw.Write(data[line:i]) + line = i + + // process the following lines + containsBlankLine := false + sublist := 0 + codeBlockMarker := "" + +gatherlines: + for line < len(data) { + i++ + + // find the end of this line + for i < len(data) && data[i-1] != '\n' { + i++ + } + + // if it is an empty line, guess that it is part of this item + // and move on to the next line + if p.isEmpty(data[line:i]) > 0 { + containsBlankLine = true + line = i + continue + } + + // calculate the indentation + indent := 0 + indentIndex := 0 + if data[line] == '\t' { + indentIndex++ + indent += 4 + } else { + for indent < 4 && line+indent < i && data[line+indent] == ' ' { + indent++ + indentIndex++ + } + } + + chunk := data[line+indentIndex : i] + + if p.extensions&FencedCode != 0 { + // determine if in or out of codeblock + // if in codeblock, ignore normal list processing + _, marker := isFenceLine(chunk, nil, codeBlockMarker) + if marker != "" { + if codeBlockMarker == "" { + // start of codeblock + codeBlockMarker = marker + } else { + // end of codeblock. + codeBlockMarker = "" + } + } + // we are in a codeblock, write line, and continue + if codeBlockMarker != "" || marker != "" { + raw.Write(data[line+indentIndex : i]) + line = i + continue gatherlines + } + } + + // evaluate how this line fits in + switch { + // is this a nested list item? + case (p.uliPrefix(chunk) > 0 && !p.isHRule(chunk)) || + p.oliPrefix(chunk) > 0 || + p.dliPrefix(chunk) > 0: + + // to be a nested list, it must be indented more + // if not, it is either a different kind of list + // or the next item in the same list + if indent <= itemIndent { + if p.listTypeChanged(chunk, flags) { + *flags |= ListItemEndOfList + } else if containsBlankLine { + *flags |= ListItemContainsBlock + } + + break gatherlines + } + + if containsBlankLine { + *flags |= ListItemContainsBlock + } + + // is this the first item in the nested list? + if sublist == 0 { + sublist = raw.Len() + } + + // is this a nested prefix heading? + case p.isPrefixHeading(chunk): + // if the heading is not indented, it is not nested in the list + // and thus ends the list + if containsBlankLine && indent < 4 { + *flags |= ListItemEndOfList + break gatherlines + } + *flags |= ListItemContainsBlock + + // anything following an empty line is only part + // of this item if it is indented 4 spaces + // (regardless of the indentation of the beginning of the item) + case containsBlankLine && indent < 4: + if *flags&ListTypeDefinition != 0 && i < len(data)-1 { + // is the next item still a part of this list? + next := i + for next < len(data) && data[next] != '\n' { + next++ + } + for next < len(data)-1 && data[next] == '\n' { + next++ + } + if i < len(data)-1 && data[i] != ':' && data[next] != ':' { + *flags |= ListItemEndOfList + } + } else { + *flags |= ListItemEndOfList + } + break gatherlines + + // a blank line means this should be parsed as a block + case containsBlankLine: + raw.WriteByte('\n') + *flags |= ListItemContainsBlock + } + + // if this line was preceded by one or more blanks, + // re-introduce the blank into the buffer + if containsBlankLine { + containsBlankLine = false + raw.WriteByte('\n') + } + + // add the line into the working buffer without prefix + raw.Write(data[line+indentIndex : i]) + + line = i + } + + rawBytes := raw.Bytes() + + block := p.addBlock(Item, nil) + block.ListFlags = *flags + block.Tight = false + block.BulletChar = bulletChar + block.Delimiter = '.' // Only '.' is possible in Markdown, but ')' will also be possible in CommonMark + + // render the contents of the list item + if *flags&ListItemContainsBlock != 0 && *flags&ListTypeTerm == 0 { + // intermediate render of block item, except for definition term + if sublist > 0 { + p.block(rawBytes[:sublist]) + p.block(rawBytes[sublist:]) + } else { + p.block(rawBytes) + } + } else { + // intermediate render of inline item + if sublist > 0 { + child := p.addChild(Paragraph, 0) + child.content = rawBytes[:sublist] + p.block(rawBytes[sublist:]) + } else { + child := p.addChild(Paragraph, 0) + child.content = rawBytes + } + } + return line +} + +// render a single paragraph that has already been parsed out +func (p *Markdown) renderParagraph(data []byte) { + if len(data) == 0 { + return + } + + // trim leading spaces + beg := 0 + for data[beg] == ' ' { + beg++ + } + + end := len(data) + // trim trailing newline + if data[len(data)-1] == '\n' { + end-- + } + + // trim trailing spaces + for end > beg && data[end-1] == ' ' { + end-- + } + + p.addBlock(Paragraph, data[beg:end]) +} + +func (p *Markdown) paragraph(data []byte) int { + // prev: index of 1st char of previous line + // line: index of 1st char of current line + // i: index of cursor/end of current line + var prev, line, i int + tabSize := TabSizeDefault + if p.extensions&TabSizeEight != 0 { + tabSize = TabSizeDouble + } + // keep going until we find something to mark the end of the paragraph + for i < len(data) { + // mark the beginning of the current line + prev = line + current := data[i:] + line = i + + // did we find a reference or a footnote? If so, end a paragraph + // preceding it and report that we have consumed up to the end of that + // reference: + if refEnd := isReference(p, current, tabSize); refEnd > 0 { + p.renderParagraph(data[:i]) + return i + refEnd + } + + // did we find a blank line marking the end of the paragraph? + if n := p.isEmpty(current); n > 0 { + // did this blank line followed by a definition list item? + if p.extensions&DefinitionLists != 0 { + if i < len(data)-1 && data[i+1] == ':' { + return p.list(data[prev:], ListTypeDefinition) + } + } + + p.renderParagraph(data[:i]) + return i + n + } + + // an underline under some text marks a heading, so our paragraph ended on prev line + if i > 0 { + if level := p.isUnderlinedHeading(current); level > 0 { + // render the paragraph + p.renderParagraph(data[:prev]) + + // ignore leading and trailing whitespace + eol := i - 1 + for prev < eol && data[prev] == ' ' { + prev++ + } + for eol > prev && data[eol-1] == ' ' { + eol-- + } + + id := "" + if p.extensions&AutoHeadingIDs != 0 { + id = SanitizedAnchorName(string(data[prev:eol])) + } + + block := p.addBlock(Heading, data[prev:eol]) + block.Level = level + block.HeadingID = id + + // find the end of the underline + for i < len(data) && data[i] != '\n' { + i++ + } + return i + } + } + + // if the next line starts a block of HTML, then the paragraph ends here + if p.extensions&LaxHTMLBlocks != 0 { + if data[i] == '<' && p.html(current, false) > 0 { + // rewind to before the HTML block + p.renderParagraph(data[:i]) + return i + } + } + + // if there's a prefixed heading or a horizontal rule after this, paragraph is over + if p.isPrefixHeading(current) || p.isHRule(current) { + p.renderParagraph(data[:i]) + return i + } + + // if there's a fenced code block, paragraph is over + if p.extensions&FencedCode != 0 { + if p.fencedCodeBlock(current, false) > 0 { + p.renderParagraph(data[:i]) + return i + } + } + + // if there's a definition list item, prev line is a definition term + if p.extensions&DefinitionLists != 0 { + if p.dliPrefix(current) != 0 { + ret := p.list(data[prev:], ListTypeDefinition) + return ret + } + } + + // if there's a list after this, paragraph is over + if p.extensions&NoEmptyLineBeforeBlock != 0 { + if p.uliPrefix(current) != 0 || + p.oliPrefix(current) != 0 || + p.quotePrefix(current) != 0 || + p.codePrefix(current) != 0 { + p.renderParagraph(data[:i]) + return i + } + } + + // otherwise, scan to the beginning of the next line + nl := bytes.IndexByte(data[i:], '\n') + if nl >= 0 { + i += nl + 1 + } else { + i += len(data[i:]) + } + } + + p.renderParagraph(data[:i]) + return i +} + +func skipChar(data []byte, start int, char byte) int { + i := start + for i < len(data) && data[i] == char { + i++ + } + return i +} + +func skipUntilChar(text []byte, start int, char byte) int { + i := start + for i < len(text) && text[i] != char { + i++ + } + return i +} + +// SanitizedAnchorName returns a sanitized anchor name for the given text. +// +// It implements the algorithm specified in the package comment. +func SanitizedAnchorName(text string) string { + var anchorName []rune + futureDash := false + for _, r := range text { + switch { + case unicode.IsLetter(r) || unicode.IsNumber(r): + if futureDash && len(anchorName) > 0 { + anchorName = append(anchorName, '-') + } + futureDash = false + anchorName = append(anchorName, unicode.ToLower(r)) + default: + futureDash = true + } + } + return string(anchorName) +} diff --git a/vendor/github.com/russross/blackfriday/v2/doc.go b/vendor/github.com/russross/blackfriday/v2/doc.go new file mode 100644 index 000000000..57ff152a0 --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/doc.go @@ -0,0 +1,46 @@ +// Package blackfriday is a markdown processor. +// +// It translates plain text with simple formatting rules into an AST, which can +// then be further processed to HTML (provided by Blackfriday itself) or other +// formats (provided by the community). +// +// The simplest way to invoke Blackfriday is to call the Run function. It will +// take a text input and produce a text output in HTML (or other format). +// +// A slightly more sophisticated way to use Blackfriday is to create a Markdown +// processor and to call Parse, which returns a syntax tree for the input +// document. You can leverage Blackfriday's parsing for content extraction from +// markdown documents. You can assign a custom renderer and set various options +// to the Markdown processor. +// +// If you're interested in calling Blackfriday from command line, see +// https://github.com/russross/blackfriday-tool. +// +// Sanitized Anchor Names +// +// Blackfriday includes an algorithm for creating sanitized anchor names +// corresponding to a given input text. This algorithm is used to create +// anchors for headings when AutoHeadingIDs extension is enabled. The +// algorithm is specified below, so that other packages can create +// compatible anchor names and links to those anchors. +// +// The algorithm iterates over the input text, interpreted as UTF-8, +// one Unicode code point (rune) at a time. All runes that are letters (category L) +// or numbers (category N) are considered valid characters. They are mapped to +// lower case, and included in the output. All other runes are considered +// invalid characters. Invalid characters that precede the first valid character, +// as well as invalid character that follow the last valid character +// are dropped completely. All other sequences of invalid characters +// between two valid characters are replaced with a single dash character '-'. +// +// SanitizedAnchorName exposes this functionality, and can be used to +// create compatible links to the anchor names generated by blackfriday. +// This algorithm is also implemented in a small standalone package at +// github.com/shurcooL/sanitized_anchor_name. It can be useful for clients +// that want a small package and don't need full functionality of blackfriday. +package blackfriday + +// NOTE: Keep Sanitized Anchor Name algorithm in sync with package +// github.com/shurcooL/sanitized_anchor_name. +// Otherwise, users of sanitized_anchor_name will get anchor names +// that are incompatible with those generated by blackfriday. diff --git a/vendor/github.com/russross/blackfriday/v2/entities.go b/vendor/github.com/russross/blackfriday/v2/entities.go new file mode 100644 index 000000000..a2c3edb69 --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/entities.go @@ -0,0 +1,2236 @@ +package blackfriday + +// Extracted from https://html.spec.whatwg.org/multipage/entities.json +var entities = map[string]bool{ + "Æ": true, + "Æ": true, + "&": true, + "&": true, + "Á": true, + "Á": true, + "Ă": true, + "Â": true, + "Â": true, + "А": true, + "𝔄": true, + "À": true, + "À": true, + "Α": true, + "Ā": true, + "⩓": true, + "Ą": true, + "𝔸": true, + "⁡": true, + "Å": true, + "Å": true, + "𝒜": true, + "≔": true, + "Ã": true, + "Ã": true, + "Ä": true, + "Ä": true, + "∖": true, + "⫧": true, + "⌆": true, + "Б": true, + "∵": true, + "ℬ": true, + "Β": true, + "𝔅": true, + "𝔹": true, + "˘": true, + "ℬ": true, + "≎": true, + "Ч": true, + "©": true, + "©": true, + "Ć": true, + "⋒": true, + "ⅅ": true, + "ℭ": true, + "Č": true, + "Ç": true, + "Ç": true, + "Ĉ": true, + "∰": true, + "Ċ": true, + "¸": true, + "·": true, + "ℭ": true, + "Χ": true, + "⊙": true, + "⊖": true, + "⊕": true, + "⊗": true, + "∲": true, + "”": true, + "’": true, + "∷": true, + "⩴": true, + "≡": true, + "∯": true, + "∮": true, + "ℂ": true, + "∐": true, + "∳": true, + "⨯": true, + "𝒞": true, + "⋓": true, + "≍": true, + "ⅅ": true, + "⤑": true, + "Ђ": true, + "Ѕ": true, + "Џ": true, + "‡": true, + "↡": true, + "⫤": true, + "Ď": true, + "Д": true, + "∇": true, + "Δ": true, + "𝔇": true, + "´": true, + "˙": true, + "˝": true, + "`": true, + "˜": true, + "⋄": true, + "ⅆ": true, + "𝔻": true, + "¨": true, + "⃜": true, + "≐": true, + "∯": true, + "¨": true, + "⇓": true, + "⇐": true, + "⇔": true, + "⫤": true, + "⟸": true, + "⟺": true, + "⟹": true, + "⇒": true, + "⊨": true, + "⇑": true, + "⇕": true, + "∥": true, + "↓": true, + "⤓": true, + "⇵": true, + "̑": true, + "⥐": true, + "⥞": true, + "↽": true, + "⥖": true, + "⥟": true, + "⇁": true, + "⥗": true, + "⊤": true, + "↧": true, + "⇓": true, + "𝒟": true, + "Đ": true, + "Ŋ": true, + "Ð": true, + "Ð": true, + "É": true, + "É": true, + "Ě": true, + "Ê": true, + "Ê": true, + "Э": true, + "Ė": true, + "𝔈": true, + "È": true, + "È": true, + "∈": true, + "Ē": true, + "◻": true, + "▫": true, + "Ę": true, + "𝔼": true, + "Ε": true, + "⩵": true, + "≂": true, + "⇌": true, + "ℰ": true, + "⩳": true, + "Η": true, + "Ë": true, + "Ë": true, + "∃": true, + "ⅇ": true, + "Ф": true, + "𝔉": true, + "◼": true, + "▪": true, + "𝔽": true, + "∀": true, + "ℱ": true, + "ℱ": true, + "Ѓ": true, + ">": true, + ">": true, + "Γ": true, + "Ϝ": true, + "Ğ": true, + "Ģ": true, + "Ĝ": true, + "Г": true, + "Ġ": true, + "𝔊": true, + "⋙": true, + "𝔾": true, + "≥": true, + "⋛": true, + "≧": true, + "⪢": true, + "≷": true, + "⩾": true, + "≳": true, + "𝒢": true, + "≫": true, + "Ъ": true, + "ˇ": true, + "^": true, + "Ĥ": true, + "ℌ": true, + "ℋ": true, + "ℍ": true, + "─": true, + "ℋ": true, + "Ħ": true, + "≎": true, + "≏": true, + "Е": true, + "IJ": true, + "Ё": true, + "Í": true, + "Í": true, + "Î": true, + "Î": true, + "И": true, + "İ": true, + "ℑ": true, + "Ì": true, + "Ì": true, + "ℑ": true, + "Ī": true, + "ⅈ": true, + "⇒": true, + "∬": true, + "∫": true, + "⋂": true, + "⁣": true, + "⁢": true, + "Į": true, + "𝕀": true, + "Ι": true, + "ℐ": true, + "Ĩ": true, + "І": true, + "Ï": true, + "Ï": true, + "Ĵ": true, + "Й": true, + "𝔍": true, + "𝕁": true, + "𝒥": true, + "Ј": true, + "Є": true, + "Х": true, + "Ќ": true, + "Κ": true, + "Ķ": true, + "К": true, + "𝔎": true, + "𝕂": true, + "𝒦": true, + "Љ": true, + "<": true, + "<": true, + "Ĺ": true, + "Λ": true, + "⟪": true, + "ℒ": true, + "↞": true, + "Ľ": true, + "Ļ": true, + "Л": true, + "⟨": true, + "←": true, + "⇤": true, + "⇆": true, + "⌈": true, + "⟦": true, + "⥡": true, + "⇃": true, + "⥙": true, + "⌊": true, + "↔": true, + "⥎": true, + "⊣": true, + "↤": true, + "⥚": true, + "⊲": true, + "⧏": true, + "⊴": true, + "⥑": true, + "⥠": true, + "↿": true, + "⥘": true, + "↼": true, + "⥒": true, + "⇐": true, + "⇔": true, + "⋚": true, + "≦": true, + "≶": true, + "⪡": true, + "⩽": true, + "≲": true, + "𝔏": true, + "⋘": true, + "⇚": true, + "Ŀ": true, + "⟵": true, + "⟷": true, + "⟶": true, + "⟸": true, + "⟺": true, + "⟹": true, + "𝕃": true, + "↙": true, + "↘": true, + "ℒ": true, + "↰": true, + "Ł": true, + "≪": true, + "⤅": true, + "М": true, + " ": true, + "ℳ": true, + "𝔐": true, + "∓": true, + "𝕄": true, + "ℳ": true, + "Μ": true, + "Њ": true, + "Ń": true, + "Ň": true, + "Ņ": true, + "Н": true, + "​": true, + "​": true, + "​": true, + "​": true, + "≫": true, + "≪": true, + " ": true, + "𝔑": true, + "⁠": true, + " ": true, + "ℕ": true, + "⫬": true, + "≢": true, + "≭": true, + "∦": true, + "∉": true, + "≠": true, + "≂̸": true, + "∄": true, + "≯": true, + "≱": true, + "≧̸": true, + "≫̸": true, + "≹": true, + "⩾̸": true, + "≵": true, + "≎̸": true, + "≏̸": true, + "⋪": true, + "⧏̸": true, + "⋬": true, + "≮": true, + "≰": true, + "≸": true, + "≪̸": true, + "⩽̸": true, + "≴": true, + "⪢̸": true, + "⪡̸": true, + "⊀": true, + "⪯̸": true, + "⋠": true, + "∌": true, + "⋫": true, + "⧐̸": true, + "⋭": true, + "⊏̸": true, + "⋢": true, + "⊐̸": true, + "⋣": true, + "⊂⃒": true, + "⊈": true, + "⊁": true, + "⪰̸": true, + "⋡": true, + "≿̸": true, + "⊃⃒": true, + "⊉": true, + "≁": true, + "≄": true, + "≇": true, + "≉": true, + "∤": true, + "𝒩": true, + "Ñ": true, + "Ñ": true, + "Ν": true, + "Œ": true, + "Ó": true, + "Ó": true, + "Ô": true, + "Ô": true, + "О": true, + "Ő": true, + "𝔒": true, + "Ò": true, + "Ò": true, + "Ō": true, + "Ω": true, + "Ο": true, + "𝕆": true, + "“": true, + "‘": true, + "⩔": true, + "𝒪": true, + "Ø": true, + "Ø": true, + "Õ": true, + "Õ": true, + "⨷": true, + "Ö": true, + "Ö": true, + "‾": true, + "⏞": true, + "⎴": true, + "⏜": true, + "∂": true, + "П": true, + "𝔓": true, + "Φ": true, + "Π": true, + "±": true, + "ℌ": true, + "ℙ": true, + "⪻": true, + "≺": true, + "⪯": true, + "≼": true, + "≾": true, + "″": true, + "∏": true, + "∷": true, + "∝": true, + "𝒫": true, + "Ψ": true, + """: true, + """: true, + "𝔔": true, + "ℚ": true, + "𝒬": true, + "⤐": true, + "®": true, + "®": true, + "Ŕ": true, + "⟫": true, + "↠": true, + "⤖": true, + "Ř": true, + "Ŗ": true, + "Р": true, + "ℜ": true, + "∋": true, + "⇋": true, + "⥯": true, + "ℜ": true, + "Ρ": true, + "⟩": true, + "→": true, + "⇥": true, + "⇄": true, + "⌉": true, + "⟧": true, + "⥝": true, + "⇂": true, + "⥕": true, + "⌋": true, + "⊢": true, + "↦": true, + "⥛": true, + "⊳": true, + "⧐": true, + "⊵": true, + "⥏": true, + "⥜": true, + "↾": true, + "⥔": true, + "⇀": true, + "⥓": true, + "⇒": true, + "ℝ": true, + "⥰": true, + "⇛": true, + "ℛ": true, + "↱": true, + "⧴": true, + "Щ": true, + "Ш": true, + "Ь": true, + "Ś": true, + "⪼": true, + "Š": true, + "Ş": true, + "Ŝ": true, + "С": true, + "𝔖": true, + "↓": true, + "←": true, + "→": true, + "↑": true, + "Σ": true, + "∘": true, + "𝕊": true, + "√": true, + "□": true, + "⊓": true, + "⊏": true, + "⊑": true, + "⊐": true, + "⊒": true, + "⊔": true, + "𝒮": true, + "⋆": true, + "⋐": true, + "⋐": true, + "⊆": true, + "≻": true, + "⪰": true, + "≽": true, + "≿": true, + "∋": true, + "∑": true, + "⋑": true, + "⊃": true, + "⊇": true, + "⋑": true, + "Þ": true, + "Þ": true, + "™": true, + "Ћ": true, + "Ц": true, + " ": true, + "Τ": true, + "Ť": true, + "Ţ": true, + "Т": true, + "𝔗": true, + "∴": true, + "Θ": true, + "  ": true, + " ": true, + "∼": true, + "≃": true, + "≅": true, + "≈": true, + "𝕋": true, + "⃛": true, + "𝒯": true, + "Ŧ": true, + "Ú": true, + "Ú": true, + "↟": true, + "⥉": true, + "Ў": true, + "Ŭ": true, + "Û": true, + "Û": true, + "У": true, + "Ű": true, + "𝔘": true, + "Ù": true, + "Ù": true, + "Ū": true, + "_": true, + "⏟": true, + "⎵": true, + "⏝": true, + "⋃": true, + "⊎": true, + "Ų": true, + "𝕌": true, + "↑": true, + "⤒": true, + "⇅": true, + "↕": true, + "⥮": true, + "⊥": true, + "↥": true, + "⇑": true, + "⇕": true, + "↖": true, + "↗": true, + "ϒ": true, + "Υ": true, + "Ů": true, + "𝒰": true, + "Ũ": true, + "Ü": true, + "Ü": true, + "⊫": true, + "⫫": true, + "В": true, + "⊩": true, + "⫦": true, + "⋁": true, + "‖": true, + "‖": true, + "∣": true, + "|": true, + "❘": true, + "≀": true, + " ": true, + "𝔙": true, + "𝕍": true, + "𝒱": true, + "⊪": true, + "Ŵ": true, + "⋀": true, + "𝔚": true, + "𝕎": true, + "𝒲": true, + "𝔛": true, + "Ξ": true, + "𝕏": true, + "𝒳": true, + "Я": true, + "Ї": true, + "Ю": true, + "Ý": true, + "Ý": true, + "Ŷ": true, + "Ы": true, + "𝔜": true, + "𝕐": true, + "𝒴": true, + "Ÿ": true, + "Ж": true, + "Ź": true, + "Ž": true, + "З": true, + "Ż": true, + "​": true, + "Ζ": true, + "ℨ": true, + "ℤ": true, + "𝒵": true, + "á": true, + "á": true, + "ă": true, + "∾": true, + "∾̳": true, + "∿": true, + "â": true, + "â": true, + "´": true, + "´": true, + "а": true, + "æ": true, + "æ": true, + "⁡": true, + "𝔞": true, + "à": true, + "à": true, + "ℵ": true, + "ℵ": true, + "α": true, + "ā": true, + "⨿": true, + "&": true, + "&": true, + "∧": true, + "⩕": true, + "⩜": true, + "⩘": true, + "⩚": true, + "∠": true, + "⦤": true, + "∠": true, + "∡": true, + "⦨": true, + "⦩": true, + "⦪": true, + "⦫": true, + "⦬": true, + "⦭": true, + "⦮": true, + "⦯": true, + "∟": true, + "⊾": true, + "⦝": true, + "∢": true, + "Å": true, + "⍼": true, + "ą": true, + "𝕒": true, + "≈": true, + "⩰": true, + "⩯": true, + "≊": true, + "≋": true, + "'": true, + "≈": true, + "≊": true, + "å": true, + "å": true, + "𝒶": true, + "*": true, + "≈": true, + "≍": true, + "ã": true, + "ã": true, + "ä": true, + "ä": true, + "∳": true, + "⨑": true, + "⫭": true, + "≌": true, + "϶": true, + "‵": true, + "∽": true, + "⋍": true, + "⊽": true, + "⌅": true, + "⌅": true, + "⎵": true, + "⎶": true, + "≌": true, + "б": true, + "„": true, + "∵": true, + "∵": true, + "⦰": true, + "϶": true, + "ℬ": true, + "β": true, + "ℶ": true, + "≬": true, + "𝔟": true, + "⋂": true, + "◯": true, + "⋃": true, + "⨀": true, + "⨁": true, + "⨂": true, + "⨆": true, + "★": true, + "▽": true, + "△": true, + "⨄": true, + "⋁": true, + "⋀": true, + "⤍": true, + "⧫": true, + "▪": true, + "▴": true, + "▾": true, + "◂": true, + "▸": true, + "␣": true, + "▒": true, + "░": true, + "▓": true, + "█": true, + "=⃥": true, + "≡⃥": true, + "⌐": true, + "𝕓": true, + "⊥": true, + "⊥": true, + "⋈": true, + "╗": true, + "╔": true, + "╖": true, + "╓": true, + "═": true, + "╦": true, + "╩": true, + "╤": true, + "╧": true, + "╝": true, + "╚": true, + "╜": true, + "╙": true, + "║": true, + "╬": true, + "╣": true, + "╠": true, + "╫": true, + "╢": true, + "╟": true, + "⧉": true, + "╕": true, + "╒": true, + "┐": true, + "┌": true, + "─": true, + "╥": true, + "╨": true, + "┬": true, + "┴": true, + "⊟": true, + "⊞": true, + "⊠": true, + "╛": true, + "╘": true, + "┘": true, + "└": true, + "│": true, + "╪": true, + "╡": true, + "╞": true, + "┼": true, + "┤": true, + "├": true, + "‵": true, + "˘": true, + "¦": true, + "¦": true, + "𝒷": true, + "⁏": true, + "∽": true, + "⋍": true, + "\": true, + "⧅": true, + "⟈": true, + "•": true, + "•": true, + "≎": true, + "⪮": true, + "≏": true, + "≏": true, + "ć": true, + "∩": true, + "⩄": true, + "⩉": true, + "⩋": true, + "⩇": true, + "⩀": true, + "∩︀": true, + "⁁": true, + "ˇ": true, + "⩍": true, + "č": true, + "ç": true, + "ç": true, + "ĉ": true, + "⩌": true, + "⩐": true, + "ċ": true, + "¸": true, + "¸": true, + "⦲": true, + "¢": true, + "¢": true, + "·": true, + "𝔠": true, + "ч": true, + "✓": true, + "✓": true, + "χ": true, + "○": true, + "⧃": true, + "ˆ": true, + "≗": true, + "↺": true, + "↻": true, + "®": true, + "Ⓢ": true, + "⊛": true, + "⊚": true, + "⊝": true, + "≗": true, + "⨐": true, + "⫯": true, + "⧂": true, + "♣": true, + "♣": true, + ":": true, + "≔": true, + "≔": true, + ",": true, + "@": true, + "∁": true, + "∘": true, + "∁": true, + "ℂ": true, + "≅": true, + "⩭": true, + "∮": true, + "𝕔": true, + "∐": true, + "©": true, + "©": true, + "℗": true, + "↵": true, + "✗": true, + "𝒸": true, + "⫏": true, + "⫑": true, + "⫐": true, + "⫒": true, + "⋯": true, + "⤸": true, + "⤵": true, + "⋞": true, + "⋟": true, + "↶": true, + "⤽": true, + "∪": true, + "⩈": true, + "⩆": true, + "⩊": true, + "⊍": true, + "⩅": true, + "∪︀": true, + "↷": true, + "⤼": true, + "⋞": true, + "⋟": true, + "⋎": true, + "⋏": true, + "¤": true, + "¤": true, + "↶": true, + "↷": true, + "⋎": true, + "⋏": true, + "∲": true, + "∱": true, + "⌭": true, + "⇓": true, + "⥥": true, + "†": true, + "ℸ": true, + "↓": true, + "‐": true, + "⊣": true, + "⤏": true, + "˝": true, + "ď": true, + "д": true, + "ⅆ": true, + "‡": true, + "⇊": true, + "⩷": true, + "°": true, + "°": true, + "δ": true, + "⦱": true, + "⥿": true, + "𝔡": true, + "⇃": true, + "⇂": true, + "⋄": true, + "⋄": true, + "♦": true, + "♦": true, + "¨": true, + "ϝ": true, + "⋲": true, + "÷": true, + "÷": true, + "÷": true, + "⋇": true, + "⋇": true, + "ђ": true, + "⌞": true, + "⌍": true, + "$": true, + "𝕕": true, + "˙": true, + "≐": true, + "≑": true, + "∸": true, + "∔": true, + "⊡": true, + "⌆": true, + "↓": true, + "⇊": true, + "⇃": true, + "⇂": true, + "⤐": true, + "⌟": true, + "⌌": true, + "𝒹": true, + "ѕ": true, + "⧶": true, + "đ": true, + "⋱": true, + "▿": true, + "▾": true, + "⇵": true, + "⥯": true, + "⦦": true, + "џ": true, + "⟿": true, + "⩷": true, + "≑": true, + "é": true, + "é": true, + "⩮": true, + "ě": true, + "≖": true, + "ê": true, + "ê": true, + "≕": true, + "э": true, + "ė": true, + "ⅇ": true, + "≒": true, + "𝔢": true, + "⪚": true, + "è": true, + "è": true, + "⪖": true, + "⪘": true, + "⪙": true, + "⏧": true, + "ℓ": true, + "⪕": true, + "⪗": true, + "ē": true, + "∅": true, + "∅": true, + "∅": true, + " ": true, + " ": true, + " ": true, + "ŋ": true, + " ": true, + "ę": true, + "𝕖": true, + "⋕": true, + "⧣": true, + "⩱": true, + "ε": true, + "ε": true, + "ϵ": true, + "≖": true, + "≕": true, + "≂": true, + "⪖": true, + "⪕": true, + "=": true, + "≟": true, + "≡": true, + "⩸": true, + "⧥": true, + "≓": true, + "⥱": true, + "ℯ": true, + "≐": true, + "≂": true, + "η": true, + "ð": true, + "ð": true, + "ë": true, + "ë": true, + "€": true, + "!": true, + "∃": true, + "ℰ": true, + "ⅇ": true, + "≒": true, + "ф": true, + "♀": true, + "ffi": true, + "ff": true, + "ffl": true, + "𝔣": true, + "fi": true, + "fj": true, + "♭": true, + "fl": true, + "▱": true, + "ƒ": true, + "𝕗": true, + "∀": true, + "⋔": true, + "⫙": true, + "⨍": true, + "½": true, + "½": true, + "⅓": true, + "¼": true, + "¼": true, + "⅕": true, + "⅙": true, + "⅛": true, + "⅔": true, + "⅖": true, + "¾": true, + "¾": true, + "⅗": true, + "⅜": true, + "⅘": true, + "⅚": true, + "⅝": true, + "⅞": true, + "⁄": true, + "⌢": true, + "𝒻": true, + "≧": true, + "⪌": true, + "ǵ": true, + "γ": true, + "ϝ": true, + "⪆": true, + "ğ": true, + "ĝ": true, + "г": true, + "ġ": true, + "≥": true, + "⋛": true, + "≥": true, + "≧": true, + "⩾": true, + "⩾": true, + "⪩": true, + "⪀": true, + "⪂": true, + "⪄": true, + "⋛︀": true, + "⪔": true, + "𝔤": true, + "≫": true, + "⋙": true, + "ℷ": true, + "ѓ": true, + "≷": true, + "⪒": true, + "⪥": true, + "⪤": true, + "≩": true, + "⪊": true, + "⪊": true, + "⪈": true, + "⪈": true, + "≩": true, + "⋧": true, + "𝕘": true, + "`": true, + "ℊ": true, + "≳": true, + "⪎": true, + "⪐": true, + ">": true, + ">": true, + "⪧": true, + "⩺": true, + "⋗": true, + "⦕": true, + "⩼": true, + "⪆": true, + "⥸": true, + "⋗": true, + "⋛": true, + "⪌": true, + "≷": true, + "≳": true, + "≩︀": true, + "≩︀": true, + "⇔": true, + " ": true, + "½": true, + "ℋ": true, + "ъ": true, + "↔": true, + "⥈": true, + "↭": true, + "ℏ": true, + "ĥ": true, + "♥": true, + "♥": true, + "…": true, + "⊹": true, + "𝔥": true, + "⤥": true, + "⤦": true, + "⇿": true, + "∻": true, + "↩": true, + "↪": true, + "𝕙": true, + "―": true, + "𝒽": true, + "ℏ": true, + "ħ": true, + "⁃": true, + "‐": true, + "í": true, + "í": true, + "⁣": true, + "î": true, + "î": true, + "и": true, + "е": true, + "¡": true, + "¡": true, + "⇔": true, + "𝔦": true, + "ì": true, + "ì": true, + "ⅈ": true, + "⨌": true, + "∭": true, + "⧜": true, + "℩": true, + "ij": true, + "ī": true, + "ℑ": true, + "ℐ": true, + "ℑ": true, + "ı": true, + "⊷": true, + "Ƶ": true, + "∈": true, + "℅": true, + "∞": true, + "⧝": true, + "ı": true, + "∫": true, + "⊺": true, + "ℤ": true, + "⊺": true, + "⨗": true, + "⨼": true, + "ё": true, + "į": true, + "𝕚": true, + "ι": true, + "⨼": true, + "¿": true, + "¿": true, + "𝒾": true, + "∈": true, + "⋹": true, + "⋵": true, + "⋴": true, + "⋳": true, + "∈": true, + "⁢": true, + "ĩ": true, + "і": true, + "ï": true, + "ï": true, + "ĵ": true, + "й": true, + "𝔧": true, + "ȷ": true, + "𝕛": true, + "𝒿": true, + "ј": true, + "є": true, + "κ": true, + "ϰ": true, + "ķ": true, + "к": true, + "𝔨": true, + "ĸ": true, + "х": true, + "ќ": true, + "𝕜": true, + "𝓀": true, + "⇚": true, + "⇐": true, + "⤛": true, + "⤎": true, + "≦": true, + "⪋": true, + "⥢": true, + "ĺ": true, + "⦴": true, + "ℒ": true, + "λ": true, + "⟨": true, + "⦑": true, + "⟨": true, + "⪅": true, + "«": true, + "«": true, + "←": true, + "⇤": true, + "⤟": true, + "⤝": true, + "↩": true, + "↫": true, + "⤹": true, + "⥳": true, + "↢": true, + "⪫": true, + "⤙": true, + "⪭": true, + "⪭︀": true, + "⤌": true, + "❲": true, + "{": true, + "[": true, + "⦋": true, + "⦏": true, + "⦍": true, + "ľ": true, + "ļ": true, + "⌈": true, + "{": true, + "л": true, + "⤶": true, + "“": true, + "„": true, + "⥧": true, + "⥋": true, + "↲": true, + "≤": true, + "←": true, + "↢": true, + "↽": true, + "↼": true, + "⇇": true, + "↔": true, + "⇆": true, + "⇋": true, + "↭": true, + "⋋": true, + "⋚": true, + "≤": true, + "≦": true, + "⩽": true, + "⩽": true, + "⪨": true, + "⩿": true, + "⪁": true, + "⪃": true, + "⋚︀": true, + "⪓": true, + "⪅": true, + "⋖": true, + "⋚": true, + "⪋": true, + "≶": true, + "≲": true, + "⥼": true, + "⌊": true, + "𝔩": true, + "≶": true, + "⪑": true, + "↽": true, + "↼": true, + "⥪": true, + "▄": true, + "љ": true, + "≪": true, + "⇇": true, + "⌞": true, + "⥫": true, + "◺": true, + "ŀ": true, + "⎰": true, + "⎰": true, + "≨": true, + "⪉": true, + "⪉": true, + "⪇": true, + "⪇": true, + "≨": true, + "⋦": true, + "⟬": true, + "⇽": true, + "⟦": true, + "⟵": true, + "⟷": true, + "⟼": true, + "⟶": true, + "↫": true, + "↬": true, + "⦅": true, + "𝕝": true, + "⨭": true, + "⨴": true, + "∗": true, + "_": true, + "◊": true, + "◊": true, + "⧫": true, + "(": true, + "⦓": true, + "⇆": true, + "⌟": true, + "⇋": true, + "⥭": true, + "‎": true, + "⊿": true, + "‹": true, + "𝓁": true, + "↰": true, + "≲": true, + "⪍": true, + "⪏": true, + "[": true, + "‘": true, + "‚": true, + "ł": true, + "<": true, + "<": true, + "⪦": true, + "⩹": true, + "⋖": true, + "⋋": true, + "⋉": true, + "⥶": true, + "⩻": true, + "⦖": true, + "◃": true, + "⊴": true, + "◂": true, + "⥊": true, + "⥦": true, + "≨︀": true, + "≨︀": true, + "∺": true, + "¯": true, + "¯": true, + "♂": true, + "✠": true, + "✠": true, + "↦": true, + "↦": true, + "↧": true, + "↤": true, + "↥": true, + "▮": true, + "⨩": true, + "м": true, + "—": true, + "∡": true, + "𝔪": true, + "℧": true, + "µ": true, + "µ": true, + "∣": true, + "*": true, + "⫰": true, + "·": true, + "·": true, + "−": true, + "⊟": true, + "∸": true, + "⨪": true, + "⫛": true, + "…": true, + "∓": true, + "⊧": true, + "𝕞": true, + "∓": true, + "𝓂": true, + "∾": true, + "μ": true, + "⊸": true, + "⊸": true, + "⋙̸": true, + "≫⃒": true, + "≫̸": true, + "⇍": true, + "⇎": true, + "⋘̸": true, + "≪⃒": true, + "≪̸": true, + "⇏": true, + "⊯": true, + "⊮": true, + "∇": true, + "ń": true, + "∠⃒": true, + "≉": true, + "⩰̸": true, + "≋̸": true, + "ʼn": true, + "≉": true, + "♮": true, + "♮": true, + "ℕ": true, + " ": true, + " ": true, + "≎̸": true, + "≏̸": true, + "⩃": true, + "ň": true, + "ņ": true, + "≇": true, + "⩭̸": true, + "⩂": true, + "н": true, + "–": true, + "≠": true, + "⇗": true, + "⤤": true, + "↗": true, + "↗": true, + "≐̸": true, + "≢": true, + "⤨": true, + "≂̸": true, + "∄": true, + "∄": true, + "𝔫": true, + "≧̸": true, + "≱": true, + "≱": true, + "≧̸": true, + "⩾̸": true, + "⩾̸": true, + "≵": true, + "≯": true, + "≯": true, + "⇎": true, + "↮": true, + "⫲": true, + "∋": true, + "⋼": true, + "⋺": true, + "∋": true, + "њ": true, + "⇍": true, + "≦̸": true, + "↚": true, + "‥": true, + "≰": true, + "↚": true, + "↮": true, + "≰": true, + "≦̸": true, + "⩽̸": true, + "⩽̸": true, + "≮": true, + "≴": true, + "≮": true, + "⋪": true, + "⋬": true, + "∤": true, + "𝕟": true, + "¬": true, + "¬": true, + "∉": true, + "⋹̸": true, + "⋵̸": true, + "∉": true, + "⋷": true, + "⋶": true, + "∌": true, + "∌": true, + "⋾": true, + "⋽": true, + "∦": true, + "∦": true, + "⫽⃥": true, + "∂̸": true, + "⨔": true, + "⊀": true, + "⋠": true, + "⪯̸": true, + "⊀": true, + "⪯̸": true, + "⇏": true, + "↛": true, + "⤳̸": true, + "↝̸": true, + "↛": true, + "⋫": true, + "⋭": true, + "⊁": true, + "⋡": true, + "⪰̸": true, + "𝓃": true, + "∤": true, + "∦": true, + "≁": true, + "≄": true, + "≄": true, + "∤": true, + "∦": true, + "⋢": true, + "⋣": true, + "⊄": true, + "⫅̸": true, + "⊈": true, + "⊂⃒": true, + "⊈": true, + "⫅̸": true, + "⊁": true, + "⪰̸": true, + "⊅": true, + "⫆̸": true, + "⊉": true, + "⊃⃒": true, + "⊉": true, + "⫆̸": true, + "≹": true, + "ñ": true, + "ñ": true, + "≸": true, + "⋪": true, + "⋬": true, + "⋫": true, + "⋭": true, + "ν": true, + "#": true, + "№": true, + " ": true, + "⊭": true, + "⤄": true, + "≍⃒": true, + "⊬": true, + "≥⃒": true, + ">⃒": true, + "⧞": true, + "⤂": true, + "≤⃒": true, + "<⃒": true, + "⊴⃒": true, + "⤃": true, + "⊵⃒": true, + "∼⃒": true, + "⇖": true, + "⤣": true, + "↖": true, + "↖": true, + "⤧": true, + "Ⓢ": true, + "ó": true, + "ó": true, + "⊛": true, + "⊚": true, + "ô": true, + "ô": true, + "о": true, + "⊝": true, + "ő": true, + "⨸": true, + "⊙": true, + "⦼": true, + "œ": true, + "⦿": true, + "𝔬": true, + "˛": true, + "ò": true, + "ò": true, + "⧁": true, + "⦵": true, + "Ω": true, + "∮": true, + "↺": true, + "⦾": true, + "⦻": true, + "‾": true, + "⧀": true, + "ō": true, + "ω": true, + "ο": true, + "⦶": true, + "⊖": true, + "𝕠": true, + "⦷": true, + "⦹": true, + "⊕": true, + "∨": true, + "↻": true, + "⩝": true, + "ℴ": true, + "ℴ": true, + "ª": true, + "ª": true, + "º": true, + "º": true, + "⊶": true, + "⩖": true, + "⩗": true, + "⩛": true, + "ℴ": true, + "ø": true, + "ø": true, + "⊘": true, + "õ": true, + "õ": true, + "⊗": true, + "⨶": true, + "ö": true, + "ö": true, + "⌽": true, + "∥": true, + "¶": true, + "¶": true, + "∥": true, + "⫳": true, + "⫽": true, + "∂": true, + "п": true, + "%": true, + ".": true, + "‰": true, + "⊥": true, + "‱": true, + "𝔭": true, + "φ": true, + "ϕ": true, + "ℳ": true, + "☎": true, + "π": true, + "⋔": true, + "ϖ": true, + "ℏ": true, + "ℎ": true, + "ℏ": true, + "+": true, + "⨣": true, + "⊞": true, + "⨢": true, + "∔": true, + "⨥": true, + "⩲": true, + "±": true, + "±": true, + "⨦": true, + "⨧": true, + "±": true, + "⨕": true, + "𝕡": true, + "£": true, + "£": true, + "≺": true, + "⪳": true, + "⪷": true, + "≼": true, + "⪯": true, + "≺": true, + "⪷": true, + "≼": true, + "⪯": true, + "⪹": true, + "⪵": true, + "⋨": true, + "≾": true, + "′": true, + "ℙ": true, + "⪵": true, + "⪹": true, + "⋨": true, + "∏": true, + "⌮": true, + "⌒": true, + "⌓": true, + "∝": true, + "∝": true, + "≾": true, + "⊰": true, + "𝓅": true, + "ψ": true, + " ": true, + "𝔮": true, + "⨌": true, + "𝕢": true, + "⁗": true, + "𝓆": true, + "ℍ": true, + "⨖": true, + "?": true, + "≟": true, + """: true, + """: true, + "⇛": true, + "⇒": true, + "⤜": true, + "⤏": true, + "⥤": true, + "∽̱": true, + "ŕ": true, + "√": true, + "⦳": true, + "⟩": true, + "⦒": true, + "⦥": true, + "⟩": true, + "»": true, + "»": true, + "→": true, + "⥵": true, + "⇥": true, + "⤠": true, + "⤳": true, + "⤞": true, + "↪": true, + "↬": true, + "⥅": true, + "⥴": true, + "↣": true, + "↝": true, + "⤚": true, + "∶": true, + "ℚ": true, + "⤍": true, + "❳": true, + "}": true, + "]": true, + "⦌": true, + "⦎": true, + "⦐": true, + "ř": true, + "ŗ": true, + "⌉": true, + "}": true, + "р": true, + "⤷": true, + "⥩": true, + "”": true, + "”": true, + "↳": true, + "ℜ": true, + "ℛ": true, + "ℜ": true, + "ℝ": true, + "▭": true, + "®": true, + "®": true, + "⥽": true, + "⌋": true, + "𝔯": true, + "⇁": true, + "⇀": true, + "⥬": true, + "ρ": true, + "ϱ": true, + "→": true, + "↣": true, + "⇁": true, + "⇀": true, + "⇄": true, + "⇌": true, + "⇉": true, + "↝": true, + "⋌": true, + "˚": true, + "≓": true, + "⇄": true, + "⇌": true, + "‏": true, + "⎱": true, + "⎱": true, + "⫮": true, + "⟭": true, + "⇾": true, + "⟧": true, + "⦆": true, + "𝕣": true, + "⨮": true, + "⨵": true, + ")": true, + "⦔": true, + "⨒": true, + "⇉": true, + "›": true, + "𝓇": true, + "↱": true, + "]": true, + "’": true, + "’": true, + "⋌": true, + "⋊": true, + "▹": true, + "⊵": true, + "▸": true, + "⧎": true, + "⥨": true, + "℞": true, + "ś": true, + "‚": true, + "≻": true, + "⪴": true, + "⪸": true, + "š": true, + "≽": true, + "⪰": true, + "ş": true, + "ŝ": true, + "⪶": true, + "⪺": true, + "⋩": true, + "⨓": true, + "≿": true, + "с": true, + "⋅": true, + "⊡": true, + "⩦": true, + "⇘": true, + "⤥": true, + "↘": true, + "↘": true, + "§": true, + "§": true, + ";": true, + "⤩": true, + "∖": true, + "∖": true, + "✶": true, + "𝔰": true, + "⌢": true, + "♯": true, + "щ": true, + "ш": true, + "∣": true, + "∥": true, + "­": true, + "­": true, + "σ": true, + "ς": true, + "ς": true, + "∼": true, + "⩪": true, + "≃": true, + "≃": true, + "⪞": true, + "⪠": true, + "⪝": true, + "⪟": true, + "≆": true, + "⨤": true, + "⥲": true, + "←": true, + "∖": true, + "⨳": true, + "⧤": true, + "∣": true, + "⌣": true, + "⪪": true, + "⪬": true, + "⪬︀": true, + "ь": true, + "/": true, + "⧄": true, + "⌿": true, + "𝕤": true, + "♠": true, + "♠": true, + "∥": true, + "⊓": true, + "⊓︀": true, + "⊔": true, + "⊔︀": true, + "⊏": true, + "⊑": true, + "⊏": true, + "⊑": true, + "⊐": true, + "⊒": true, + "⊐": true, + "⊒": true, + "□": true, + "□": true, + "▪": true, + "▪": true, + "→": true, + "𝓈": true, + "∖": true, + "⌣": true, + "⋆": true, + "☆": true, + "★": true, + "ϵ": true, + "ϕ": true, + "¯": true, + "⊂": true, + "⫅": true, + "⪽": true, + "⊆": true, + "⫃": true, + "⫁": true, + "⫋": true, + "⊊": true, + "⪿": true, + "⥹": true, + "⊂": true, + "⊆": true, + "⫅": true, + "⊊": true, + "⫋": true, + "⫇": true, + "⫕": true, + "⫓": true, + "≻": true, + "⪸": true, + "≽": true, + "⪰": true, + "⪺": true, + "⪶": true, + "⋩": true, + "≿": true, + "∑": true, + "♪": true, + "¹": true, + "¹": true, + "²": true, + "²": true, + "³": true, + "³": true, + "⊃": true, + "⫆": true, + "⪾": true, + "⫘": true, + "⊇": true, + "⫄": true, + "⟉": true, + "⫗": true, + "⥻": true, + "⫂": true, + "⫌": true, + "⊋": true, + "⫀": true, + "⊃": true, + "⊇": true, + "⫆": true, + "⊋": true, + "⫌": true, + "⫈": true, + "⫔": true, + "⫖": true, + "⇙": true, + "⤦": true, + "↙": true, + "↙": true, + "⤪": true, + "ß": true, + "ß": true, + "⌖": true, + "τ": true, + "⎴": true, + "ť": true, + "ţ": true, + "т": true, + "⃛": true, + "⌕": true, + "𝔱": true, + "∴": true, + "∴": true, + "θ": true, + "ϑ": true, + "ϑ": true, + "≈": true, + "∼": true, + " ": true, + "≈": true, + "∼": true, + "þ": true, + "þ": true, + "˜": true, + "×": true, + "×": true, + "⊠": true, + "⨱": true, + "⨰": true, + "∭": true, + "⤨": true, + "⊤": true, + "⌶": true, + "⫱": true, + "𝕥": true, + "⫚": true, + "⤩": true, + "‴": true, + "™": true, + "▵": true, + "▿": true, + "◃": true, + "⊴": true, + "≜": true, + "▹": true, + "⊵": true, + "◬": true, + "≜": true, + "⨺": true, + "⨹": true, + "⧍": true, + "⨻": true, + "⏢": true, + "𝓉": true, + "ц": true, + "ћ": true, + "ŧ": true, + "≬": true, + "↞": true, + "↠": true, + "⇑": true, + "⥣": true, + "ú": true, + "ú": true, + "↑": true, + "ў": true, + "ŭ": true, + "û": true, + "û": true, + "у": true, + "⇅": true, + "ű": true, + "⥮": true, + "⥾": true, + "𝔲": true, + "ù": true, + "ù": true, + "↿": true, + "↾": true, + "▀": true, + "⌜": true, + "⌜": true, + "⌏": true, + "◸": true, + "ū": true, + "¨": true, + "¨": true, + "ų": true, + "𝕦": true, + "↑": true, + "↕": true, + "↿": true, + "↾": true, + "⊎": true, + "υ": true, + "ϒ": true, + "υ": true, + "⇈": true, + "⌝": true, + "⌝": true, + "⌎": true, + "ů": true, + "◹": true, + "𝓊": true, + "⋰": true, + "ũ": true, + "▵": true, + "▴": true, + "⇈": true, + "ü": true, + "ü": true, + "⦧": true, + "⇕": true, + "⫨": true, + "⫩": true, + "⊨": true, + "⦜": true, + "ϵ": true, + "ϰ": true, + "∅": true, + "ϕ": true, + "ϖ": true, + "∝": true, + "↕": true, + "ϱ": true, + "ς": true, + "⊊︀": true, + "⫋︀": true, + "⊋︀": true, + "⫌︀": true, + "ϑ": true, + "⊲": true, + "⊳": true, + "в": true, + "⊢": true, + "∨": true, + "⊻": true, + "≚": true, + "⋮": true, + "|": true, + "|": true, + "𝔳": true, + "⊲": true, + "⊂⃒": true, + "⊃⃒": true, + "𝕧": true, + "∝": true, + "⊳": true, + "𝓋": true, + "⫋︀": true, + "⊊︀": true, + "⫌︀": true, + "⊋︀": true, + "⦚": true, + "ŵ": true, + "⩟": true, + "∧": true, + "≙": true, + "℘": true, + "𝔴": true, + "𝕨": true, + "℘": true, + "≀": true, + "≀": true, + "𝓌": true, + "⋂": true, + "◯": true, + "⋃": true, + "▽": true, + "𝔵": true, + "⟺": true, + "⟷": true, + "ξ": true, + "⟸": true, + "⟵": true, + "⟼": true, + "⋻": true, + "⨀": true, + "𝕩": true, + "⨁": true, + "⨂": true, + "⟹": true, + "⟶": true, + "𝓍": true, + "⨆": true, + "⨄": true, + "△": true, + "⋁": true, + "⋀": true, + "ý": true, + "ý": true, + "я": true, + "ŷ": true, + "ы": true, + "¥": true, + "¥": true, + "𝔶": true, + "ї": true, + "𝕪": true, + "𝓎": true, + "ю": true, + "ÿ": true, + "ÿ": true, + "ź": true, + "ž": true, + "з": true, + "ż": true, + "ℨ": true, + "ζ": true, + "𝔷": true, + "ж": true, + "⇝": true, + "𝕫": true, + "𝓏": true, + "‍": true, + "‌": true, +} diff --git a/vendor/github.com/russross/blackfriday/v2/esc.go b/vendor/github.com/russross/blackfriday/v2/esc.go new file mode 100644 index 000000000..6ab60102c --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/esc.go @@ -0,0 +1,70 @@ +package blackfriday + +import ( + "html" + "io" +) + +var htmlEscaper = [256][]byte{ + '&': []byte("&"), + '<': []byte("<"), + '>': []byte(">"), + '"': []byte("""), +} + +func escapeHTML(w io.Writer, s []byte) { + escapeEntities(w, s, false) +} + +func escapeAllHTML(w io.Writer, s []byte) { + escapeEntities(w, s, true) +} + +func escapeEntities(w io.Writer, s []byte, escapeValidEntities bool) { + var start, end int + for end < len(s) { + escSeq := htmlEscaper[s[end]] + if escSeq != nil { + isEntity, entityEnd := nodeIsEntity(s, end) + if isEntity && !escapeValidEntities { + w.Write(s[start : entityEnd+1]) + start = entityEnd + 1 + } else { + w.Write(s[start:end]) + w.Write(escSeq) + start = end + 1 + } + } + end++ + } + if start < len(s) && end <= len(s) { + w.Write(s[start:end]) + } +} + +func nodeIsEntity(s []byte, end int) (isEntity bool, endEntityPos int) { + isEntity = false + endEntityPos = end + 1 + + if s[end] == '&' { + for endEntityPos < len(s) { + if s[endEntityPos] == ';' { + if entities[string(s[end:endEntityPos+1])] { + isEntity = true + break + } + } + if !isalnum(s[endEntityPos]) && s[endEntityPos] != '&' && s[endEntityPos] != '#' { + break + } + endEntityPos++ + } + } + + return isEntity, endEntityPos +} + +func escLink(w io.Writer, text []byte) { + unesc := html.UnescapeString(string(text)) + escapeHTML(w, []byte(unesc)) +} diff --git a/vendor/github.com/russross/blackfriday/v2/go.mod b/vendor/github.com/russross/blackfriday/v2/go.mod new file mode 100644 index 000000000..620b74e0a --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/go.mod @@ -0,0 +1 @@ +module github.com/russross/blackfriday/v2 diff --git a/vendor/github.com/russross/blackfriday/v2/html.go b/vendor/github.com/russross/blackfriday/v2/html.go new file mode 100644 index 000000000..cb4f26e30 --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/html.go @@ -0,0 +1,952 @@ +// +// Blackfriday Markdown Processor +// Available at http://github.com/russross/blackfriday +// +// Copyright © 2011 Russ Ross . +// Distributed under the Simplified BSD License. +// See README.md for details. +// + +// +// +// HTML rendering backend +// +// + +package blackfriday + +import ( + "bytes" + "fmt" + "io" + "regexp" + "strings" +) + +// HTMLFlags control optional behavior of HTML renderer. +type HTMLFlags int + +// HTML renderer configuration options. +const ( + HTMLFlagsNone HTMLFlags = 0 + SkipHTML HTMLFlags = 1 << iota // Skip preformatted HTML blocks + SkipImages // Skip embedded images + SkipLinks // Skip all links + Safelink // Only link to trusted protocols + NofollowLinks // Only link with rel="nofollow" + NoreferrerLinks // Only link with rel="noreferrer" + NoopenerLinks // Only link with rel="noopener" + HrefTargetBlank // Add a blank target + CompletePage // Generate a complete HTML page + UseXHTML // Generate XHTML output instead of HTML + FootnoteReturnLinks // Generate a link at the end of a footnote to return to the source + Smartypants // Enable smart punctuation substitutions + SmartypantsFractions // Enable smart fractions (with Smartypants) + SmartypantsDashes // Enable smart dashes (with Smartypants) + SmartypantsLatexDashes // Enable LaTeX-style dashes (with Smartypants) + SmartypantsAngledQuotes // Enable angled double quotes (with Smartypants) for double quotes rendering + SmartypantsQuotesNBSP // Enable « French guillemets » (with Smartypants) + TOC // Generate a table of contents +) + +var ( + htmlTagRe = regexp.MustCompile("(?i)^" + htmlTag) +) + +const ( + htmlTag = "(?:" + openTag + "|" + closeTag + "|" + htmlComment + "|" + + processingInstruction + "|" + declaration + "|" + cdata + ")" + closeTag = "]" + openTag = "<" + tagName + attribute + "*" + "\\s*/?>" + attribute = "(?:" + "\\s+" + attributeName + attributeValueSpec + "?)" + attributeValue = "(?:" + unquotedValue + "|" + singleQuotedValue + "|" + doubleQuotedValue + ")" + attributeValueSpec = "(?:" + "\\s*=" + "\\s*" + attributeValue + ")" + attributeName = "[a-zA-Z_:][a-zA-Z0-9:._-]*" + cdata = "" + declaration = "]*>" + doubleQuotedValue = "\"[^\"]*\"" + htmlComment = "|" + processingInstruction = "[<][?].*?[?][>]" + singleQuotedValue = "'[^']*'" + tagName = "[A-Za-z][A-Za-z0-9-]*" + unquotedValue = "[^\"'=<>`\\x00-\\x20]+" +) + +// HTMLRendererParameters is a collection of supplementary parameters tweaking +// the behavior of various parts of HTML renderer. +type HTMLRendererParameters struct { + // Prepend this text to each relative URL. + AbsolutePrefix string + // Add this text to each footnote anchor, to ensure uniqueness. + FootnoteAnchorPrefix string + // Show this text inside the tag for a footnote return link, if the + // HTML_FOOTNOTE_RETURN_LINKS flag is enabled. If blank, the string + // [return] is used. + FootnoteReturnLinkContents string + // If set, add this text to the front of each Heading ID, to ensure + // uniqueness. + HeadingIDPrefix string + // If set, add this text to the back of each Heading ID, to ensure uniqueness. + HeadingIDSuffix string + // Increase heading levels: if the offset is 1,

becomes

etc. + // Negative offset is also valid. + // Resulting levels are clipped between 1 and 6. + HeadingLevelOffset int + + Title string // Document title (used if CompletePage is set) + CSS string // Optional CSS file URL (used if CompletePage is set) + Icon string // Optional icon file URL (used if CompletePage is set) + + Flags HTMLFlags // Flags allow customizing this renderer's behavior +} + +// HTMLRenderer is a type that implements the Renderer interface for HTML output. +// +// Do not create this directly, instead use the NewHTMLRenderer function. +type HTMLRenderer struct { + HTMLRendererParameters + + closeTag string // how to end singleton tags: either " />" or ">" + + // Track heading IDs to prevent ID collision in a single generation. + headingIDs map[string]int + + lastOutputLen int + disableTags int + + sr *SPRenderer +} + +const ( + xhtmlClose = " />" + htmlClose = ">" +) + +// NewHTMLRenderer creates and configures an HTMLRenderer object, which +// satisfies the Renderer interface. +func NewHTMLRenderer(params HTMLRendererParameters) *HTMLRenderer { + // configure the rendering engine + closeTag := htmlClose + if params.Flags&UseXHTML != 0 { + closeTag = xhtmlClose + } + + if params.FootnoteReturnLinkContents == "" { + // U+FE0E is VARIATION SELECTOR-15. + // It suppresses automatic emoji presentation of the preceding + // U+21A9 LEFTWARDS ARROW WITH HOOK on iOS and iPadOS. + params.FootnoteReturnLinkContents = "↩\ufe0e" + } + + return &HTMLRenderer{ + HTMLRendererParameters: params, + + closeTag: closeTag, + headingIDs: make(map[string]int), + + sr: NewSmartypantsRenderer(params.Flags), + } +} + +func isHTMLTag(tag []byte, tagname string) bool { + found, _ := findHTMLTagPos(tag, tagname) + return found +} + +// Look for a character, but ignore it when it's in any kind of quotes, it +// might be JavaScript +func skipUntilCharIgnoreQuotes(html []byte, start int, char byte) int { + inSingleQuote := false + inDoubleQuote := false + inGraveQuote := false + i := start + for i < len(html) { + switch { + case html[i] == char && !inSingleQuote && !inDoubleQuote && !inGraveQuote: + return i + case html[i] == '\'': + inSingleQuote = !inSingleQuote + case html[i] == '"': + inDoubleQuote = !inDoubleQuote + case html[i] == '`': + inGraveQuote = !inGraveQuote + } + i++ + } + return start +} + +func findHTMLTagPos(tag []byte, tagname string) (bool, int) { + i := 0 + if i < len(tag) && tag[0] != '<' { + return false, -1 + } + i++ + i = skipSpace(tag, i) + + if i < len(tag) && tag[i] == '/' { + i++ + } + + i = skipSpace(tag, i) + j := 0 + for ; i < len(tag); i, j = i+1, j+1 { + if j >= len(tagname) { + break + } + + if strings.ToLower(string(tag[i]))[0] != tagname[j] { + return false, -1 + } + } + + if i == len(tag) { + return false, -1 + } + + rightAngle := skipUntilCharIgnoreQuotes(tag, i, '>') + if rightAngle >= i { + return true, rightAngle + } + + return false, -1 +} + +func skipSpace(tag []byte, i int) int { + for i < len(tag) && isspace(tag[i]) { + i++ + } + return i +} + +func isRelativeLink(link []byte) (yes bool) { + // a tag begin with '#' + if link[0] == '#' { + return true + } + + // link begin with '/' but not '//', the second maybe a protocol relative link + if len(link) >= 2 && link[0] == '/' && link[1] != '/' { + return true + } + + // only the root '/' + if len(link) == 1 && link[0] == '/' { + return true + } + + // current directory : begin with "./" + if bytes.HasPrefix(link, []byte("./")) { + return true + } + + // parent directory : begin with "../" + if bytes.HasPrefix(link, []byte("../")) { + return true + } + + return false +} + +func (r *HTMLRenderer) ensureUniqueHeadingID(id string) string { + for count, found := r.headingIDs[id]; found; count, found = r.headingIDs[id] { + tmp := fmt.Sprintf("%s-%d", id, count+1) + + if _, tmpFound := r.headingIDs[tmp]; !tmpFound { + r.headingIDs[id] = count + 1 + id = tmp + } else { + id = id + "-1" + } + } + + if _, found := r.headingIDs[id]; !found { + r.headingIDs[id] = 0 + } + + return id +} + +func (r *HTMLRenderer) addAbsPrefix(link []byte) []byte { + if r.AbsolutePrefix != "" && isRelativeLink(link) && link[0] != '.' { + newDest := r.AbsolutePrefix + if link[0] != '/' { + newDest += "/" + } + newDest += string(link) + return []byte(newDest) + } + return link +} + +func appendLinkAttrs(attrs []string, flags HTMLFlags, link []byte) []string { + if isRelativeLink(link) { + return attrs + } + val := []string{} + if flags&NofollowLinks != 0 { + val = append(val, "nofollow") + } + if flags&NoreferrerLinks != 0 { + val = append(val, "noreferrer") + } + if flags&NoopenerLinks != 0 { + val = append(val, "noopener") + } + if flags&HrefTargetBlank != 0 { + attrs = append(attrs, "target=\"_blank\"") + } + if len(val) == 0 { + return attrs + } + attr := fmt.Sprintf("rel=%q", strings.Join(val, " ")) + return append(attrs, attr) +} + +func isMailto(link []byte) bool { + return bytes.HasPrefix(link, []byte("mailto:")) +} + +func needSkipLink(flags HTMLFlags, dest []byte) bool { + if flags&SkipLinks != 0 { + return true + } + return flags&Safelink != 0 && !isSafeLink(dest) && !isMailto(dest) +} + +func isSmartypantable(node *Node) bool { + pt := node.Parent.Type + return pt != Link && pt != CodeBlock && pt != Code +} + +func appendLanguageAttr(attrs []string, info []byte) []string { + if len(info) == 0 { + return attrs + } + endOfLang := bytes.IndexAny(info, "\t ") + if endOfLang < 0 { + endOfLang = len(info) + } + return append(attrs, fmt.Sprintf("class=\"language-%s\"", info[:endOfLang])) +} + +func (r *HTMLRenderer) tag(w io.Writer, name []byte, attrs []string) { + w.Write(name) + if len(attrs) > 0 { + w.Write(spaceBytes) + w.Write([]byte(strings.Join(attrs, " "))) + } + w.Write(gtBytes) + r.lastOutputLen = 1 +} + +func footnoteRef(prefix string, node *Node) []byte { + urlFrag := prefix + string(slugify(node.Destination)) + anchor := fmt.Sprintf(`%d`, urlFrag, node.NoteID) + return []byte(fmt.Sprintf(`%s`, urlFrag, anchor)) +} + +func footnoteItem(prefix string, slug []byte) []byte { + return []byte(fmt.Sprintf(`
  • `, prefix, slug)) +} + +func footnoteReturnLink(prefix, returnLink string, slug []byte) []byte { + const format = ` %s` + return []byte(fmt.Sprintf(format, prefix, slug, returnLink)) +} + +func itemOpenCR(node *Node) bool { + if node.Prev == nil { + return false + } + ld := node.Parent.ListData + return !ld.Tight && ld.ListFlags&ListTypeDefinition == 0 +} + +func skipParagraphTags(node *Node) bool { + grandparent := node.Parent.Parent + if grandparent == nil || grandparent.Type != List { + return false + } + tightOrTerm := grandparent.Tight || node.Parent.ListFlags&ListTypeTerm != 0 + return grandparent.Type == List && tightOrTerm +} + +func cellAlignment(align CellAlignFlags) string { + switch align { + case TableAlignmentLeft: + return "left" + case TableAlignmentRight: + return "right" + case TableAlignmentCenter: + return "center" + default: + return "" + } +} + +func (r *HTMLRenderer) out(w io.Writer, text []byte) { + if r.disableTags > 0 { + w.Write(htmlTagRe.ReplaceAll(text, []byte{})) + } else { + w.Write(text) + } + r.lastOutputLen = len(text) +} + +func (r *HTMLRenderer) cr(w io.Writer) { + if r.lastOutputLen > 0 { + r.out(w, nlBytes) + } +} + +var ( + nlBytes = []byte{'\n'} + gtBytes = []byte{'>'} + spaceBytes = []byte{' '} +) + +var ( + brTag = []byte("
    ") + brXHTMLTag = []byte("
    ") + emTag = []byte("") + emCloseTag = []byte("") + strongTag = []byte("") + strongCloseTag = []byte("") + delTag = []byte("") + delCloseTag = []byte("") + ttTag = []byte("") + ttCloseTag = []byte("") + aTag = []byte("") + preTag = []byte("
    ")
    +	preCloseTag        = []byte("
    ") + codeTag = []byte("") + codeCloseTag = []byte("") + pTag = []byte("

    ") + pCloseTag = []byte("

    ") + blockquoteTag = []byte("
    ") + blockquoteCloseTag = []byte("
    ") + hrTag = []byte("
    ") + hrXHTMLTag = []byte("
    ") + ulTag = []byte("
      ") + ulCloseTag = []byte("
    ") + olTag = []byte("
      ") + olCloseTag = []byte("
    ") + dlTag = []byte("
    ") + dlCloseTag = []byte("
    ") + liTag = []byte("
  • ") + liCloseTag = []byte("
  • ") + ddTag = []byte("
    ") + ddCloseTag = []byte("
    ") + dtTag = []byte("
    ") + dtCloseTag = []byte("
    ") + tableTag = []byte("") + tableCloseTag = []byte("
    ") + tdTag = []byte("") + thTag = []byte("") + theadTag = []byte("") + theadCloseTag = []byte("") + tbodyTag = []byte("") + tbodyCloseTag = []byte("") + trTag = []byte("") + trCloseTag = []byte("") + h1Tag = []byte("") + h2Tag = []byte("") + h3Tag = []byte("") + h4Tag = []byte("") + h5Tag = []byte("") + h6Tag = []byte("") + + footnotesDivBytes = []byte("\n
    \n\n") + footnotesCloseDivBytes = []byte("\n
    \n") +) + +func headingTagsFromLevel(level int) ([]byte, []byte) { + if level <= 1 { + return h1Tag, h1CloseTag + } + switch level { + case 2: + return h2Tag, h2CloseTag + case 3: + return h3Tag, h3CloseTag + case 4: + return h4Tag, h4CloseTag + case 5: + return h5Tag, h5CloseTag + } + return h6Tag, h6CloseTag +} + +func (r *HTMLRenderer) outHRTag(w io.Writer) { + if r.Flags&UseXHTML == 0 { + r.out(w, hrTag) + } else { + r.out(w, hrXHTMLTag) + } +} + +// RenderNode is a default renderer of a single node of a syntax tree. For +// block nodes it will be called twice: first time with entering=true, second +// time with entering=false, so that it could know when it's working on an open +// tag and when on close. It writes the result to w. +// +// The return value is a way to tell the calling walker to adjust its walk +// pattern: e.g. it can terminate the traversal by returning Terminate. Or it +// can ask the walker to skip a subtree of this node by returning SkipChildren. +// The typical behavior is to return GoToNext, which asks for the usual +// traversal to the next node. +func (r *HTMLRenderer) RenderNode(w io.Writer, node *Node, entering bool) WalkStatus { + attrs := []string{} + switch node.Type { + case Text: + if r.Flags&Smartypants != 0 { + var tmp bytes.Buffer + escapeHTML(&tmp, node.Literal) + r.sr.Process(w, tmp.Bytes()) + } else { + if node.Parent.Type == Link { + escLink(w, node.Literal) + } else { + escapeHTML(w, node.Literal) + } + } + case Softbreak: + r.cr(w) + // TODO: make it configurable via out(renderer.softbreak) + case Hardbreak: + if r.Flags&UseXHTML == 0 { + r.out(w, brTag) + } else { + r.out(w, brXHTMLTag) + } + r.cr(w) + case Emph: + if entering { + r.out(w, emTag) + } else { + r.out(w, emCloseTag) + } + case Strong: + if entering { + r.out(w, strongTag) + } else { + r.out(w, strongCloseTag) + } + case Del: + if entering { + r.out(w, delTag) + } else { + r.out(w, delCloseTag) + } + case HTMLSpan: + if r.Flags&SkipHTML != 0 { + break + } + r.out(w, node.Literal) + case Link: + // mark it but don't link it if it is not a safe link: no smartypants + dest := node.LinkData.Destination + if needSkipLink(r.Flags, dest) { + if entering { + r.out(w, ttTag) + } else { + r.out(w, ttCloseTag) + } + } else { + if entering { + dest = r.addAbsPrefix(dest) + var hrefBuf bytes.Buffer + hrefBuf.WriteString("href=\"") + escLink(&hrefBuf, dest) + hrefBuf.WriteByte('"') + attrs = append(attrs, hrefBuf.String()) + if node.NoteID != 0 { + r.out(w, footnoteRef(r.FootnoteAnchorPrefix, node)) + break + } + attrs = appendLinkAttrs(attrs, r.Flags, dest) + if len(node.LinkData.Title) > 0 { + var titleBuff bytes.Buffer + titleBuff.WriteString("title=\"") + escapeHTML(&titleBuff, node.LinkData.Title) + titleBuff.WriteByte('"') + attrs = append(attrs, titleBuff.String()) + } + r.tag(w, aTag, attrs) + } else { + if node.NoteID != 0 { + break + } + r.out(w, aCloseTag) + } + } + case Image: + if r.Flags&SkipImages != 0 { + return SkipChildren + } + if entering { + dest := node.LinkData.Destination + dest = r.addAbsPrefix(dest) + if r.disableTags == 0 { + //if options.safe && potentiallyUnsafe(dest) { + //out(w, ``)
+				//} else {
+				r.out(w, []byte(`<img src=`)) + } + } + case Code: + r.out(w, codeTag) + escapeAllHTML(w, node.Literal) + r.out(w, codeCloseTag) + case Document: + break + case Paragraph: + if skipParagraphTags(node) { + break + } + if entering { + // TODO: untangle this clusterfuck about when the newlines need + // to be added and when not. + if node.Prev != nil { + switch node.Prev.Type { + case HTMLBlock, List, Paragraph, Heading, CodeBlock, BlockQuote, HorizontalRule: + r.cr(w) + } + } + if node.Parent.Type == BlockQuote && node.Prev == nil { + r.cr(w) + } + r.out(w, pTag) + } else { + r.out(w, pCloseTag) + if !(node.Parent.Type == Item && node.Next == nil) { + r.cr(w) + } + } + case BlockQuote: + if entering { + r.cr(w) + r.out(w, blockquoteTag) + } else { + r.out(w, blockquoteCloseTag) + r.cr(w) + } + case HTMLBlock: + if r.Flags&SkipHTML != 0 { + break + } + r.cr(w) + r.out(w, node.Literal) + r.cr(w) + case Heading: + headingLevel := r.HTMLRendererParameters.HeadingLevelOffset + node.Level + openTag, closeTag := headingTagsFromLevel(headingLevel) + if entering { + if node.IsTitleblock { + attrs = append(attrs, `class="title"`) + } + if node.HeadingID != "" { + id := r.ensureUniqueHeadingID(node.HeadingID) + if r.HeadingIDPrefix != "" { + id = r.HeadingIDPrefix + id + } + if r.HeadingIDSuffix != "" { + id = id + r.HeadingIDSuffix + } + attrs = append(attrs, fmt.Sprintf(`id="%s"`, id)) + } + r.cr(w) + r.tag(w, openTag, attrs) + } else { + r.out(w, closeTag) + if !(node.Parent.Type == Item && node.Next == nil) { + r.cr(w) + } + } + case HorizontalRule: + r.cr(w) + r.outHRTag(w) + r.cr(w) + case List: + openTag := ulTag + closeTag := ulCloseTag + if node.ListFlags&ListTypeOrdered != 0 { + openTag = olTag + closeTag = olCloseTag + } + if node.ListFlags&ListTypeDefinition != 0 { + openTag = dlTag + closeTag = dlCloseTag + } + if entering { + if node.IsFootnotesList { + r.out(w, footnotesDivBytes) + r.outHRTag(w) + r.cr(w) + } + r.cr(w) + if node.Parent.Type == Item && node.Parent.Parent.Tight { + r.cr(w) + } + r.tag(w, openTag[:len(openTag)-1], attrs) + r.cr(w) + } else { + r.out(w, closeTag) + //cr(w) + //if node.parent.Type != Item { + // cr(w) + //} + if node.Parent.Type == Item && node.Next != nil { + r.cr(w) + } + if node.Parent.Type == Document || node.Parent.Type == BlockQuote { + r.cr(w) + } + if node.IsFootnotesList { + r.out(w, footnotesCloseDivBytes) + } + } + case Item: + openTag := liTag + closeTag := liCloseTag + if node.ListFlags&ListTypeDefinition != 0 { + openTag = ddTag + closeTag = ddCloseTag + } + if node.ListFlags&ListTypeTerm != 0 { + openTag = dtTag + closeTag = dtCloseTag + } + if entering { + if itemOpenCR(node) { + r.cr(w) + } + if node.ListData.RefLink != nil { + slug := slugify(node.ListData.RefLink) + r.out(w, footnoteItem(r.FootnoteAnchorPrefix, slug)) + break + } + r.out(w, openTag) + } else { + if node.ListData.RefLink != nil { + slug := slugify(node.ListData.RefLink) + if r.Flags&FootnoteReturnLinks != 0 { + r.out(w, footnoteReturnLink(r.FootnoteAnchorPrefix, r.FootnoteReturnLinkContents, slug)) + } + } + r.out(w, closeTag) + r.cr(w) + } + case CodeBlock: + attrs = appendLanguageAttr(attrs, node.Info) + r.cr(w) + r.out(w, preTag) + r.tag(w, codeTag[:len(codeTag)-1], attrs) + escapeAllHTML(w, node.Literal) + r.out(w, codeCloseTag) + r.out(w, preCloseTag) + if node.Parent.Type != Item { + r.cr(w) + } + case Table: + if entering { + r.cr(w) + r.out(w, tableTag) + } else { + r.out(w, tableCloseTag) + r.cr(w) + } + case TableCell: + openTag := tdTag + closeTag := tdCloseTag + if node.IsHeader { + openTag = thTag + closeTag = thCloseTag + } + if entering { + align := cellAlignment(node.Align) + if align != "" { + attrs = append(attrs, fmt.Sprintf(`align="%s"`, align)) + } + if node.Prev == nil { + r.cr(w) + } + r.tag(w, openTag, attrs) + } else { + r.out(w, closeTag) + r.cr(w) + } + case TableHead: + if entering { + r.cr(w) + r.out(w, theadTag) + } else { + r.out(w, theadCloseTag) + r.cr(w) + } + case TableBody: + if entering { + r.cr(w) + r.out(w, tbodyTag) + // XXX: this is to adhere to a rather silly test. Should fix test. + if node.FirstChild == nil { + r.cr(w) + } + } else { + r.out(w, tbodyCloseTag) + r.cr(w) + } + case TableRow: + if entering { + r.cr(w) + r.out(w, trTag) + } else { + r.out(w, trCloseTag) + r.cr(w) + } + default: + panic("Unknown node type " + node.Type.String()) + } + return GoToNext +} + +// RenderHeader writes HTML document preamble and TOC if requested. +func (r *HTMLRenderer) RenderHeader(w io.Writer, ast *Node) { + r.writeDocumentHeader(w) + if r.Flags&TOC != 0 { + r.writeTOC(w, ast) + } +} + +// RenderFooter writes HTML document footer. +func (r *HTMLRenderer) RenderFooter(w io.Writer, ast *Node) { + if r.Flags&CompletePage == 0 { + return + } + io.WriteString(w, "\n\n\n") +} + +func (r *HTMLRenderer) writeDocumentHeader(w io.Writer) { + if r.Flags&CompletePage == 0 { + return + } + ending := "" + if r.Flags&UseXHTML != 0 { + io.WriteString(w, "\n") + io.WriteString(w, "\n") + ending = " /" + } else { + io.WriteString(w, "\n") + io.WriteString(w, "\n") + } + io.WriteString(w, "\n") + io.WriteString(w, " ") + if r.Flags&Smartypants != 0 { + r.sr.Process(w, []byte(r.Title)) + } else { + escapeHTML(w, []byte(r.Title)) + } + io.WriteString(w, "\n") + io.WriteString(w, " \n") + io.WriteString(w, " \n") + if r.CSS != "" { + io.WriteString(w, " \n") + } + if r.Icon != "" { + io.WriteString(w, " \n") + } + io.WriteString(w, "\n") + io.WriteString(w, "\n\n") +} + +func (r *HTMLRenderer) writeTOC(w io.Writer, ast *Node) { + buf := bytes.Buffer{} + + inHeading := false + tocLevel := 0 + headingCount := 0 + + ast.Walk(func(node *Node, entering bool) WalkStatus { + if node.Type == Heading && !node.HeadingData.IsTitleblock { + inHeading = entering + if entering { + node.HeadingID = fmt.Sprintf("toc_%d", headingCount) + if node.Level == tocLevel { + buf.WriteString("\n\n
  • ") + } else if node.Level < tocLevel { + for node.Level < tocLevel { + tocLevel-- + buf.WriteString("
  • \n") + } + buf.WriteString("\n\n
  • ") + } else { + for node.Level > tocLevel { + tocLevel++ + buf.WriteString("\n") + } + + if buf.Len() > 0 { + io.WriteString(w, "\n") + } + r.lastOutputLen = buf.Len() +} diff --git a/vendor/github.com/russross/blackfriday/v2/inline.go b/vendor/github.com/russross/blackfriday/v2/inline.go new file mode 100644 index 000000000..d45bd9417 --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/inline.go @@ -0,0 +1,1228 @@ +// +// Blackfriday Markdown Processor +// Available at http://github.com/russross/blackfriday +// +// Copyright © 2011 Russ Ross . +// Distributed under the Simplified BSD License. +// See README.md for details. +// + +// +// Functions to parse inline elements. +// + +package blackfriday + +import ( + "bytes" + "regexp" + "strconv" +) + +var ( + urlRe = `((https?|ftp):\/\/|\/)[-A-Za-z0-9+&@#\/%?=~_|!:,.;\(\)]+` + anchorRe = regexp.MustCompile(`^(]+")?\s?>` + urlRe + `<\/a>)`) + + // https://www.w3.org/TR/html5/syntax.html#character-references + // highest unicode code point in 17 planes (2^20): 1,114,112d = + // 7 dec digits or 6 hex digits + // named entity references can be 2-31 characters with stuff like < + // at one end and ∳ at the other. There + // are also sometimes numbers at the end, although this isn't inherent + // in the specification; there are never numbers anywhere else in + // current character references, though; see ¾ and ▒, etc. + // https://www.w3.org/TR/html5/syntax.html#named-character-references + // + // entity := "&" (named group | number ref) ";" + // named group := [a-zA-Z]{2,31}[0-9]{0,2} + // number ref := "#" (dec ref | hex ref) + // dec ref := [0-9]{1,7} + // hex ref := ("x" | "X") [0-9a-fA-F]{1,6} + htmlEntityRe = regexp.MustCompile(`&([a-zA-Z]{2,31}[0-9]{0,2}|#([0-9]{1,7}|[xX][0-9a-fA-F]{1,6}));`) +) + +// Functions to parse text within a block +// Each function returns the number of chars taken care of +// data is the complete block being rendered +// offset is the number of valid chars before the current cursor + +func (p *Markdown) inline(currBlock *Node, data []byte) { + // handlers might call us recursively: enforce a maximum depth + if p.nesting >= p.maxNesting || len(data) == 0 { + return + } + p.nesting++ + beg, end := 0, 0 + for end < len(data) { + handler := p.inlineCallback[data[end]] + if handler != nil { + if consumed, node := handler(p, data, end); consumed == 0 { + // No action from the callback. + end++ + } else { + // Copy inactive chars into the output. + currBlock.AppendChild(text(data[beg:end])) + if node != nil { + currBlock.AppendChild(node) + } + // Skip past whatever the callback used. + beg = end + consumed + end = beg + } + } else { + end++ + } + } + if beg < len(data) { + if data[end-1] == '\n' { + end-- + } + currBlock.AppendChild(text(data[beg:end])) + } + p.nesting-- +} + +// single and double emphasis parsing +func emphasis(p *Markdown, data []byte, offset int) (int, *Node) { + data = data[offset:] + c := data[0] + + if len(data) > 2 && data[1] != c { + // whitespace cannot follow an opening emphasis; + // strikethrough only takes two characters '~~' + if c == '~' || isspace(data[1]) { + return 0, nil + } + ret, node := helperEmphasis(p, data[1:], c) + if ret == 0 { + return 0, nil + } + + return ret + 1, node + } + + if len(data) > 3 && data[1] == c && data[2] != c { + if isspace(data[2]) { + return 0, nil + } + ret, node := helperDoubleEmphasis(p, data[2:], c) + if ret == 0 { + return 0, nil + } + + return ret + 2, node + } + + if len(data) > 4 && data[1] == c && data[2] == c && data[3] != c { + if c == '~' || isspace(data[3]) { + return 0, nil + } + ret, node := helperTripleEmphasis(p, data, 3, c) + if ret == 0 { + return 0, nil + } + + return ret + 3, node + } + + return 0, nil +} + +func codeSpan(p *Markdown, data []byte, offset int) (int, *Node) { + data = data[offset:] + + nb := 0 + + // count the number of backticks in the delimiter + for nb < len(data) && data[nb] == '`' { + nb++ + } + + // find the next delimiter + i, end := 0, 0 + for end = nb; end < len(data) && i < nb; end++ { + if data[end] == '`' { + i++ + } else { + i = 0 + } + } + + // no matching delimiter? + if i < nb && end >= len(data) { + return 0, nil + } + + // trim outside whitespace + fBegin := nb + for fBegin < end && data[fBegin] == ' ' { + fBegin++ + } + + fEnd := end - nb + for fEnd > fBegin && data[fEnd-1] == ' ' { + fEnd-- + } + + // render the code span + if fBegin != fEnd { + code := NewNode(Code) + code.Literal = data[fBegin:fEnd] + return end, code + } + + return end, nil +} + +// newline preceded by two spaces becomes
    +func maybeLineBreak(p *Markdown, data []byte, offset int) (int, *Node) { + origOffset := offset + for offset < len(data) && data[offset] == ' ' { + offset++ + } + + if offset < len(data) && data[offset] == '\n' { + if offset-origOffset >= 2 { + return offset - origOffset + 1, NewNode(Hardbreak) + } + return offset - origOffset, nil + } + return 0, nil +} + +// newline without two spaces works when HardLineBreak is enabled +func lineBreak(p *Markdown, data []byte, offset int) (int, *Node) { + if p.extensions&HardLineBreak != 0 { + return 1, NewNode(Hardbreak) + } + return 0, nil +} + +type linkType int + +const ( + linkNormal linkType = iota + linkImg + linkDeferredFootnote + linkInlineFootnote +) + +func isReferenceStyleLink(data []byte, pos int, t linkType) bool { + if t == linkDeferredFootnote { + return false + } + return pos < len(data)-1 && data[pos] == '[' && data[pos+1] != '^' +} + +func maybeImage(p *Markdown, data []byte, offset int) (int, *Node) { + if offset < len(data)-1 && data[offset+1] == '[' { + return link(p, data, offset) + } + return 0, nil +} + +func maybeInlineFootnote(p *Markdown, data []byte, offset int) (int, *Node) { + if offset < len(data)-1 && data[offset+1] == '[' { + return link(p, data, offset) + } + return 0, nil +} + +// '[': parse a link or an image or a footnote +func link(p *Markdown, data []byte, offset int) (int, *Node) { + // no links allowed inside regular links, footnote, and deferred footnotes + if p.insideLink && (offset > 0 && data[offset-1] == '[' || len(data)-1 > offset && data[offset+1] == '^') { + return 0, nil + } + + var t linkType + switch { + // special case: ![^text] == deferred footnote (that follows something with + // an exclamation point) + case p.extensions&Footnotes != 0 && len(data)-1 > offset && data[offset+1] == '^': + t = linkDeferredFootnote + // ![alt] == image + case offset >= 0 && data[offset] == '!': + t = linkImg + offset++ + // ^[text] == inline footnote + // [^refId] == deferred footnote + case p.extensions&Footnotes != 0: + if offset >= 0 && data[offset] == '^' { + t = linkInlineFootnote + offset++ + } else if len(data)-1 > offset && data[offset+1] == '^' { + t = linkDeferredFootnote + } + // [text] == regular link + default: + t = linkNormal + } + + data = data[offset:] + + var ( + i = 1 + noteID int + title, link, altContent []byte + textHasNl = false + ) + + if t == linkDeferredFootnote { + i++ + } + + // look for the matching closing bracket + for level := 1; level > 0 && i < len(data); i++ { + switch { + case data[i] == '\n': + textHasNl = true + + case isBackslashEscaped(data, i): + continue + + case data[i] == '[': + level++ + + case data[i] == ']': + level-- + if level <= 0 { + i-- // compensate for extra i++ in for loop + } + } + } + + if i >= len(data) { + return 0, nil + } + + txtE := i + i++ + var footnoteNode *Node + + // skip any amount of whitespace or newline + // (this is much more lax than original markdown syntax) + for i < len(data) && isspace(data[i]) { + i++ + } + + // inline style link + switch { + case i < len(data) && data[i] == '(': + // skip initial whitespace + i++ + + for i < len(data) && isspace(data[i]) { + i++ + } + + linkB := i + + // look for link end: ' " ) + findlinkend: + for i < len(data) { + switch { + case data[i] == '\\': + i += 2 + + case data[i] == ')' || data[i] == '\'' || data[i] == '"': + break findlinkend + + default: + i++ + } + } + + if i >= len(data) { + return 0, nil + } + linkE := i + + // look for title end if present + titleB, titleE := 0, 0 + if data[i] == '\'' || data[i] == '"' { + i++ + titleB = i + + findtitleend: + for i < len(data) { + switch { + case data[i] == '\\': + i += 2 + + case data[i] == ')': + break findtitleend + + default: + i++ + } + } + + if i >= len(data) { + return 0, nil + } + + // skip whitespace after title + titleE = i - 1 + for titleE > titleB && isspace(data[titleE]) { + titleE-- + } + + // check for closing quote presence + if data[titleE] != '\'' && data[titleE] != '"' { + titleB, titleE = 0, 0 + linkE = i + } + } + + // remove whitespace at the end of the link + for linkE > linkB && isspace(data[linkE-1]) { + linkE-- + } + + // remove optional angle brackets around the link + if data[linkB] == '<' { + linkB++ + } + if data[linkE-1] == '>' { + linkE-- + } + + // build escaped link and title + if linkE > linkB { + link = data[linkB:linkE] + } + + if titleE > titleB { + title = data[titleB:titleE] + } + + i++ + + // reference style link + case isReferenceStyleLink(data, i, t): + var id []byte + altContentConsidered := false + + // look for the id + i++ + linkB := i + for i < len(data) && data[i] != ']' { + i++ + } + if i >= len(data) { + return 0, nil + } + linkE := i + + // find the reference + if linkB == linkE { + if textHasNl { + var b bytes.Buffer + + for j := 1; j < txtE; j++ { + switch { + case data[j] != '\n': + b.WriteByte(data[j]) + case data[j-1] != ' ': + b.WriteByte(' ') + } + } + + id = b.Bytes() + } else { + id = data[1:txtE] + altContentConsidered = true + } + } else { + id = data[linkB:linkE] + } + + // find the reference with matching id + lr, ok := p.getRef(string(id)) + if !ok { + return 0, nil + } + + // keep link and title from reference + link = lr.link + title = lr.title + if altContentConsidered { + altContent = lr.text + } + i++ + + // shortcut reference style link or reference or inline footnote + default: + var id []byte + + // craft the id + if textHasNl { + var b bytes.Buffer + + for j := 1; j < txtE; j++ { + switch { + case data[j] != '\n': + b.WriteByte(data[j]) + case data[j-1] != ' ': + b.WriteByte(' ') + } + } + + id = b.Bytes() + } else { + if t == linkDeferredFootnote { + id = data[2:txtE] // get rid of the ^ + } else { + id = data[1:txtE] + } + } + + footnoteNode = NewNode(Item) + if t == linkInlineFootnote { + // create a new reference + noteID = len(p.notes) + 1 + + var fragment []byte + if len(id) > 0 { + if len(id) < 16 { + fragment = make([]byte, len(id)) + } else { + fragment = make([]byte, 16) + } + copy(fragment, slugify(id)) + } else { + fragment = append([]byte("footnote-"), []byte(strconv.Itoa(noteID))...) + } + + ref := &reference{ + noteID: noteID, + hasBlock: false, + link: fragment, + title: id, + footnote: footnoteNode, + } + + p.notes = append(p.notes, ref) + + link = ref.link + title = ref.title + } else { + // find the reference with matching id + lr, ok := p.getRef(string(id)) + if !ok { + return 0, nil + } + + if t == linkDeferredFootnote { + lr.noteID = len(p.notes) + 1 + lr.footnote = footnoteNode + p.notes = append(p.notes, lr) + } + + // keep link and title from reference + link = lr.link + // if inline footnote, title == footnote contents + title = lr.title + noteID = lr.noteID + } + + // rewind the whitespace + i = txtE + 1 + } + + var uLink []byte + if t == linkNormal || t == linkImg { + if len(link) > 0 { + var uLinkBuf bytes.Buffer + unescapeText(&uLinkBuf, link) + uLink = uLinkBuf.Bytes() + } + + // links need something to click on and somewhere to go + if len(uLink) == 0 || (t == linkNormal && txtE <= 1) { + return 0, nil + } + } + + // call the relevant rendering function + var linkNode *Node + switch t { + case linkNormal: + linkNode = NewNode(Link) + linkNode.Destination = normalizeURI(uLink) + linkNode.Title = title + if len(altContent) > 0 { + linkNode.AppendChild(text(altContent)) + } else { + // links cannot contain other links, so turn off link parsing + // temporarily and recurse + insideLink := p.insideLink + p.insideLink = true + p.inline(linkNode, data[1:txtE]) + p.insideLink = insideLink + } + + case linkImg: + linkNode = NewNode(Image) + linkNode.Destination = uLink + linkNode.Title = title + linkNode.AppendChild(text(data[1:txtE])) + i++ + + case linkInlineFootnote, linkDeferredFootnote: + linkNode = NewNode(Link) + linkNode.Destination = link + linkNode.Title = title + linkNode.NoteID = noteID + linkNode.Footnote = footnoteNode + if t == linkInlineFootnote { + i++ + } + + default: + return 0, nil + } + + return i, linkNode +} + +func (p *Markdown) inlineHTMLComment(data []byte) int { + if len(data) < 5 { + return 0 + } + if data[0] != '<' || data[1] != '!' || data[2] != '-' || data[3] != '-' { + return 0 + } + i := 5 + // scan for an end-of-comment marker, across lines if necessary + for i < len(data) && !(data[i-2] == '-' && data[i-1] == '-' && data[i] == '>') { + i++ + } + // no end-of-comment marker + if i >= len(data) { + return 0 + } + return i + 1 +} + +func stripMailto(link []byte) []byte { + if bytes.HasPrefix(link, []byte("mailto://")) { + return link[9:] + } else if bytes.HasPrefix(link, []byte("mailto:")) { + return link[7:] + } else { + return link + } +} + +// autolinkType specifies a kind of autolink that gets detected. +type autolinkType int + +// These are the possible flag values for the autolink renderer. +const ( + notAutolink autolinkType = iota + normalAutolink + emailAutolink +) + +// '<' when tags or autolinks are allowed +func leftAngle(p *Markdown, data []byte, offset int) (int, *Node) { + data = data[offset:] + altype, end := tagLength(data) + if size := p.inlineHTMLComment(data); size > 0 { + end = size + } + if end > 2 { + if altype != notAutolink { + var uLink bytes.Buffer + unescapeText(&uLink, data[1:end+1-2]) + if uLink.Len() > 0 { + link := uLink.Bytes() + node := NewNode(Link) + node.Destination = link + if altype == emailAutolink { + node.Destination = append([]byte("mailto:"), link...) + } + node.AppendChild(text(stripMailto(link))) + return end, node + } + } else { + htmlTag := NewNode(HTMLSpan) + htmlTag.Literal = data[:end] + return end, htmlTag + } + } + + return end, nil +} + +// '\\' backslash escape +var escapeChars = []byte("\\`*_{}[]()#+-.!:|&<>~") + +func escape(p *Markdown, data []byte, offset int) (int, *Node) { + data = data[offset:] + + if len(data) > 1 { + if p.extensions&BackslashLineBreak != 0 && data[1] == '\n' { + return 2, NewNode(Hardbreak) + } + if bytes.IndexByte(escapeChars, data[1]) < 0 { + return 0, nil + } + + return 2, text(data[1:2]) + } + + return 2, nil +} + +func unescapeText(ob *bytes.Buffer, src []byte) { + i := 0 + for i < len(src) { + org := i + for i < len(src) && src[i] != '\\' { + i++ + } + + if i > org { + ob.Write(src[org:i]) + } + + if i+1 >= len(src) { + break + } + + ob.WriteByte(src[i+1]) + i += 2 + } +} + +// '&' escaped when it doesn't belong to an entity +// valid entities are assumed to be anything matching &#?[A-Za-z0-9]+; +func entity(p *Markdown, data []byte, offset int) (int, *Node) { + data = data[offset:] + + end := 1 + + if end < len(data) && data[end] == '#' { + end++ + } + + for end < len(data) && isalnum(data[end]) { + end++ + } + + if end < len(data) && data[end] == ';' { + end++ // real entity + } else { + return 0, nil // lone '&' + } + + ent := data[:end] + // undo & escaping or it will be converted to &amp; by another + // escaper in the renderer + if bytes.Equal(ent, []byte("&")) { + ent = []byte{'&'} + } + + return end, text(ent) +} + +func linkEndsWithEntity(data []byte, linkEnd int) bool { + entityRanges := htmlEntityRe.FindAllIndex(data[:linkEnd], -1) + return entityRanges != nil && entityRanges[len(entityRanges)-1][1] == linkEnd +} + +// hasPrefixCaseInsensitive is a custom implementation of +// strings.HasPrefix(strings.ToLower(s), prefix) +// we rolled our own because ToLower pulls in a huge machinery of lowercasing +// anything from Unicode and that's very slow. Since this func will only be +// used on ASCII protocol prefixes, we can take shortcuts. +func hasPrefixCaseInsensitive(s, prefix []byte) bool { + if len(s) < len(prefix) { + return false + } + delta := byte('a' - 'A') + for i, b := range prefix { + if b != s[i] && b != s[i]+delta { + return false + } + } + return true +} + +var protocolPrefixes = [][]byte{ + []byte("http://"), + []byte("https://"), + []byte("ftp://"), + []byte("file://"), + []byte("mailto:"), +} + +const shortestPrefix = 6 // len("ftp://"), the shortest of the above + +func maybeAutoLink(p *Markdown, data []byte, offset int) (int, *Node) { + // quick check to rule out most false hits + if p.insideLink || len(data) < offset+shortestPrefix { + return 0, nil + } + for _, prefix := range protocolPrefixes { + endOfHead := offset + 8 // 8 is the len() of the longest prefix + if endOfHead > len(data) { + endOfHead = len(data) + } + if hasPrefixCaseInsensitive(data[offset:endOfHead], prefix) { + return autoLink(p, data, offset) + } + } + return 0, nil +} + +func autoLink(p *Markdown, data []byte, offset int) (int, *Node) { + // Now a more expensive check to see if we're not inside an anchor element + anchorStart := offset + offsetFromAnchor := 0 + for anchorStart > 0 && data[anchorStart] != '<' { + anchorStart-- + offsetFromAnchor++ + } + + anchorStr := anchorRe.Find(data[anchorStart:]) + if anchorStr != nil { + anchorClose := NewNode(HTMLSpan) + anchorClose.Literal = anchorStr[offsetFromAnchor:] + return len(anchorStr) - offsetFromAnchor, anchorClose + } + + // scan backward for a word boundary + rewind := 0 + for offset-rewind > 0 && rewind <= 7 && isletter(data[offset-rewind-1]) { + rewind++ + } + if rewind > 6 { // longest supported protocol is "mailto" which has 6 letters + return 0, nil + } + + origData := data + data = data[offset-rewind:] + + if !isSafeLink(data) { + return 0, nil + } + + linkEnd := 0 + for linkEnd < len(data) && !isEndOfLink(data[linkEnd]) { + linkEnd++ + } + + // Skip punctuation at the end of the link + if (data[linkEnd-1] == '.' || data[linkEnd-1] == ',') && data[linkEnd-2] != '\\' { + linkEnd-- + } + + // But don't skip semicolon if it's a part of escaped entity: + if data[linkEnd-1] == ';' && data[linkEnd-2] != '\\' && !linkEndsWithEntity(data, linkEnd) { + linkEnd-- + } + + // See if the link finishes with a punctuation sign that can be closed. + var copen byte + switch data[linkEnd-1] { + case '"': + copen = '"' + case '\'': + copen = '\'' + case ')': + copen = '(' + case ']': + copen = '[' + case '}': + copen = '{' + default: + copen = 0 + } + + if copen != 0 { + bufEnd := offset - rewind + linkEnd - 2 + + openDelim := 1 + + /* Try to close the final punctuation sign in this same line; + * if we managed to close it outside of the URL, that means that it's + * not part of the URL. If it closes inside the URL, that means it + * is part of the URL. + * + * Examples: + * + * foo http://www.pokemon.com/Pikachu_(Electric) bar + * => http://www.pokemon.com/Pikachu_(Electric) + * + * foo (http://www.pokemon.com/Pikachu_(Electric)) bar + * => http://www.pokemon.com/Pikachu_(Electric) + * + * foo http://www.pokemon.com/Pikachu_(Electric)) bar + * => http://www.pokemon.com/Pikachu_(Electric)) + * + * (foo http://www.pokemon.com/Pikachu_(Electric)) bar + * => foo http://www.pokemon.com/Pikachu_(Electric) + */ + + for bufEnd >= 0 && origData[bufEnd] != '\n' && openDelim != 0 { + if origData[bufEnd] == data[linkEnd-1] { + openDelim++ + } + + if origData[bufEnd] == copen { + openDelim-- + } + + bufEnd-- + } + + if openDelim == 0 { + linkEnd-- + } + } + + var uLink bytes.Buffer + unescapeText(&uLink, data[:linkEnd]) + + if uLink.Len() > 0 { + node := NewNode(Link) + node.Destination = uLink.Bytes() + node.AppendChild(text(uLink.Bytes())) + return linkEnd, node + } + + return linkEnd, nil +} + +func isEndOfLink(char byte) bool { + return isspace(char) || char == '<' +} + +var validUris = [][]byte{[]byte("http://"), []byte("https://"), []byte("ftp://"), []byte("mailto://")} +var validPaths = [][]byte{[]byte("/"), []byte("./"), []byte("../")} + +func isSafeLink(link []byte) bool { + for _, path := range validPaths { + if len(link) >= len(path) && bytes.Equal(link[:len(path)], path) { + if len(link) == len(path) { + return true + } else if isalnum(link[len(path)]) { + return true + } + } + } + + for _, prefix := range validUris { + // TODO: handle unicode here + // case-insensitive prefix test + if len(link) > len(prefix) && bytes.Equal(bytes.ToLower(link[:len(prefix)]), prefix) && isalnum(link[len(prefix)]) { + return true + } + } + + return false +} + +// return the length of the given tag, or 0 is it's not valid +func tagLength(data []byte) (autolink autolinkType, end int) { + var i, j int + + // a valid tag can't be shorter than 3 chars + if len(data) < 3 { + return notAutolink, 0 + } + + // begins with a '<' optionally followed by '/', followed by letter or number + if data[0] != '<' { + return notAutolink, 0 + } + if data[1] == '/' { + i = 2 + } else { + i = 1 + } + + if !isalnum(data[i]) { + return notAutolink, 0 + } + + // scheme test + autolink = notAutolink + + // try to find the beginning of an URI + for i < len(data) && (isalnum(data[i]) || data[i] == '.' || data[i] == '+' || data[i] == '-') { + i++ + } + + if i > 1 && i < len(data) && data[i] == '@' { + if j = isMailtoAutoLink(data[i:]); j != 0 { + return emailAutolink, i + j + } + } + + if i > 2 && i < len(data) && data[i] == ':' { + autolink = normalAutolink + i++ + } + + // complete autolink test: no whitespace or ' or " + switch { + case i >= len(data): + autolink = notAutolink + case autolink != notAutolink: + j = i + + for i < len(data) { + if data[i] == '\\' { + i += 2 + } else if data[i] == '>' || data[i] == '\'' || data[i] == '"' || isspace(data[i]) { + break + } else { + i++ + } + + } + + if i >= len(data) { + return autolink, 0 + } + if i > j && data[i] == '>' { + return autolink, i + 1 + } + + // one of the forbidden chars has been found + autolink = notAutolink + } + i += bytes.IndexByte(data[i:], '>') + if i < 0 { + return autolink, 0 + } + return autolink, i + 1 +} + +// look for the address part of a mail autolink and '>' +// this is less strict than the original markdown e-mail address matching +func isMailtoAutoLink(data []byte) int { + nb := 0 + + // address is assumed to be: [-@._a-zA-Z0-9]+ with exactly one '@' + for i := 0; i < len(data); i++ { + if isalnum(data[i]) { + continue + } + + switch data[i] { + case '@': + nb++ + + case '-', '.', '_': + break + + case '>': + if nb == 1 { + return i + 1 + } + return 0 + default: + return 0 + } + } + + return 0 +} + +// look for the next emph char, skipping other constructs +func helperFindEmphChar(data []byte, c byte) int { + i := 0 + + for i < len(data) { + for i < len(data) && data[i] != c && data[i] != '`' && data[i] != '[' { + i++ + } + if i >= len(data) { + return 0 + } + // do not count escaped chars + if i != 0 && data[i-1] == '\\' { + i++ + continue + } + if data[i] == c { + return i + } + + if data[i] == '`' { + // skip a code span + tmpI := 0 + i++ + for i < len(data) && data[i] != '`' { + if tmpI == 0 && data[i] == c { + tmpI = i + } + i++ + } + if i >= len(data) { + return tmpI + } + i++ + } else if data[i] == '[' { + // skip a link + tmpI := 0 + i++ + for i < len(data) && data[i] != ']' { + if tmpI == 0 && data[i] == c { + tmpI = i + } + i++ + } + i++ + for i < len(data) && (data[i] == ' ' || data[i] == '\n') { + i++ + } + if i >= len(data) { + return tmpI + } + if data[i] != '[' && data[i] != '(' { // not a link + if tmpI > 0 { + return tmpI + } + continue + } + cc := data[i] + i++ + for i < len(data) && data[i] != cc { + if tmpI == 0 && data[i] == c { + return i + } + i++ + } + if i >= len(data) { + return tmpI + } + i++ + } + } + return 0 +} + +func helperEmphasis(p *Markdown, data []byte, c byte) (int, *Node) { + i := 0 + + // skip one symbol if coming from emph3 + if len(data) > 1 && data[0] == c && data[1] == c { + i = 1 + } + + for i < len(data) { + length := helperFindEmphChar(data[i:], c) + if length == 0 { + return 0, nil + } + i += length + if i >= len(data) { + return 0, nil + } + + if i+1 < len(data) && data[i+1] == c { + i++ + continue + } + + if data[i] == c && !isspace(data[i-1]) { + + if p.extensions&NoIntraEmphasis != 0 { + if !(i+1 == len(data) || isspace(data[i+1]) || ispunct(data[i+1])) { + continue + } + } + + emph := NewNode(Emph) + p.inline(emph, data[:i]) + return i + 1, emph + } + } + + return 0, nil +} + +func helperDoubleEmphasis(p *Markdown, data []byte, c byte) (int, *Node) { + i := 0 + + for i < len(data) { + length := helperFindEmphChar(data[i:], c) + if length == 0 { + return 0, nil + } + i += length + + if i+1 < len(data) && data[i] == c && data[i+1] == c && i > 0 && !isspace(data[i-1]) { + nodeType := Strong + if c == '~' { + nodeType = Del + } + node := NewNode(nodeType) + p.inline(node, data[:i]) + return i + 2, node + } + i++ + } + return 0, nil +} + +func helperTripleEmphasis(p *Markdown, data []byte, offset int, c byte) (int, *Node) { + i := 0 + origData := data + data = data[offset:] + + for i < len(data) { + length := helperFindEmphChar(data[i:], c) + if length == 0 { + return 0, nil + } + i += length + + // skip whitespace preceded symbols + if data[i] != c || isspace(data[i-1]) { + continue + } + + switch { + case i+2 < len(data) && data[i+1] == c && data[i+2] == c: + // triple symbol found + strong := NewNode(Strong) + em := NewNode(Emph) + strong.AppendChild(em) + p.inline(em, data[:i]) + return i + 3, strong + case (i+1 < len(data) && data[i+1] == c): + // double symbol found, hand over to emph1 + length, node := helperEmphasis(p, origData[offset-2:], c) + if length == 0 { + return 0, nil + } + return length - 2, node + default: + // single symbol found, hand over to emph2 + length, node := helperDoubleEmphasis(p, origData[offset-1:], c) + if length == 0 { + return 0, nil + } + return length - 1, node + } + } + return 0, nil +} + +func text(s []byte) *Node { + node := NewNode(Text) + node.Literal = s + return node +} + +func normalizeURI(s []byte) []byte { + return s // TODO: implement +} diff --git a/vendor/github.com/russross/blackfriday/v2/markdown.go b/vendor/github.com/russross/blackfriday/v2/markdown.go new file mode 100644 index 000000000..58d2e4538 --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/markdown.go @@ -0,0 +1,950 @@ +// Blackfriday Markdown Processor +// Available at http://github.com/russross/blackfriday +// +// Copyright © 2011 Russ Ross . +// Distributed under the Simplified BSD License. +// See README.md for details. + +package blackfriday + +import ( + "bytes" + "fmt" + "io" + "strings" + "unicode/utf8" +) + +// +// Markdown parsing and processing +// + +// Version string of the package. Appears in the rendered document when +// CompletePage flag is on. +const Version = "2.0" + +// Extensions is a bitwise or'ed collection of enabled Blackfriday's +// extensions. +type Extensions int + +// These are the supported markdown parsing extensions. +// OR these values together to select multiple extensions. +const ( + NoExtensions Extensions = 0 + NoIntraEmphasis Extensions = 1 << iota // Ignore emphasis markers inside words + Tables // Render tables + FencedCode // Render fenced code blocks + Autolink // Detect embedded URLs that are not explicitly marked + Strikethrough // Strikethrough text using ~~test~~ + LaxHTMLBlocks // Loosen up HTML block parsing rules + SpaceHeadings // Be strict about prefix heading rules + HardLineBreak // Translate newlines into line breaks + TabSizeEight // Expand tabs to eight spaces instead of four + Footnotes // Pandoc-style footnotes + NoEmptyLineBeforeBlock // No need to insert an empty line to start a (code, quote, ordered list, unordered list) block + HeadingIDs // specify heading IDs with {#id} + Titleblock // Titleblock ala pandoc + AutoHeadingIDs // Create the heading ID from the text + BackslashLineBreak // Translate trailing backslashes into line breaks + DefinitionLists // Render definition lists + + CommonHTMLFlags HTMLFlags = UseXHTML | Smartypants | + SmartypantsFractions | SmartypantsDashes | SmartypantsLatexDashes + + CommonExtensions Extensions = NoIntraEmphasis | Tables | FencedCode | + Autolink | Strikethrough | SpaceHeadings | HeadingIDs | + BackslashLineBreak | DefinitionLists +) + +// ListType contains bitwise or'ed flags for list and list item objects. +type ListType int + +// These are the possible flag values for the ListItem renderer. +// Multiple flag values may be ORed together. +// These are mostly of interest if you are writing a new output format. +const ( + ListTypeOrdered ListType = 1 << iota + ListTypeDefinition + ListTypeTerm + + ListItemContainsBlock + ListItemBeginningOfList // TODO: figure out if this is of any use now + ListItemEndOfList +) + +// CellAlignFlags holds a type of alignment in a table cell. +type CellAlignFlags int + +// These are the possible flag values for the table cell renderer. +// Only a single one of these values will be used; they are not ORed together. +// These are mostly of interest if you are writing a new output format. +const ( + TableAlignmentLeft CellAlignFlags = 1 << iota + TableAlignmentRight + TableAlignmentCenter = (TableAlignmentLeft | TableAlignmentRight) +) + +// The size of a tab stop. +const ( + TabSizeDefault = 4 + TabSizeDouble = 8 +) + +// blockTags is a set of tags that are recognized as HTML block tags. +// Any of these can be included in markdown text without special escaping. +var blockTags = map[string]struct{}{ + "blockquote": {}, + "del": {}, + "div": {}, + "dl": {}, + "fieldset": {}, + "form": {}, + "h1": {}, + "h2": {}, + "h3": {}, + "h4": {}, + "h5": {}, + "h6": {}, + "iframe": {}, + "ins": {}, + "math": {}, + "noscript": {}, + "ol": {}, + "pre": {}, + "p": {}, + "script": {}, + "style": {}, + "table": {}, + "ul": {}, + + // HTML5 + "address": {}, + "article": {}, + "aside": {}, + "canvas": {}, + "figcaption": {}, + "figure": {}, + "footer": {}, + "header": {}, + "hgroup": {}, + "main": {}, + "nav": {}, + "output": {}, + "progress": {}, + "section": {}, + "video": {}, +} + +// Renderer is the rendering interface. This is mostly of interest if you are +// implementing a new rendering format. +// +// Only an HTML implementation is provided in this repository, see the README +// for external implementations. +type Renderer interface { + // RenderNode is the main rendering method. It will be called once for + // every leaf node and twice for every non-leaf node (first with + // entering=true, then with entering=false). The method should write its + // rendition of the node to the supplied writer w. + RenderNode(w io.Writer, node *Node, entering bool) WalkStatus + + // RenderHeader is a method that allows the renderer to produce some + // content preceding the main body of the output document. The header is + // understood in the broad sense here. For example, the default HTML + // renderer will write not only the HTML document preamble, but also the + // table of contents if it was requested. + // + // The method will be passed an entire document tree, in case a particular + // implementation needs to inspect it to produce output. + // + // The output should be written to the supplied writer w. If your + // implementation has no header to write, supply an empty implementation. + RenderHeader(w io.Writer, ast *Node) + + // RenderFooter is a symmetric counterpart of RenderHeader. + RenderFooter(w io.Writer, ast *Node) +} + +// Callback functions for inline parsing. One such function is defined +// for each character that triggers a response when parsing inline data. +type inlineParser func(p *Markdown, data []byte, offset int) (int, *Node) + +// Markdown is a type that holds extensions and the runtime state used by +// Parse, and the renderer. You can not use it directly, construct it with New. +type Markdown struct { + renderer Renderer + referenceOverride ReferenceOverrideFunc + refs map[string]*reference + inlineCallback [256]inlineParser + extensions Extensions + nesting int + maxNesting int + insideLink bool + + // Footnotes need to be ordered as well as available to quickly check for + // presence. If a ref is also a footnote, it's stored both in refs and here + // in notes. Slice is nil if footnotes not enabled. + notes []*reference + + doc *Node + tip *Node // = doc + oldTip *Node + lastMatchedContainer *Node // = doc + allClosed bool +} + +func (p *Markdown) getRef(refid string) (ref *reference, found bool) { + if p.referenceOverride != nil { + r, overridden := p.referenceOverride(refid) + if overridden { + if r == nil { + return nil, false + } + return &reference{ + link: []byte(r.Link), + title: []byte(r.Title), + noteID: 0, + hasBlock: false, + text: []byte(r.Text)}, true + } + } + // refs are case insensitive + ref, found = p.refs[strings.ToLower(refid)] + return ref, found +} + +func (p *Markdown) finalize(block *Node) { + above := block.Parent + block.open = false + p.tip = above +} + +func (p *Markdown) addChild(node NodeType, offset uint32) *Node { + return p.addExistingChild(NewNode(node), offset) +} + +func (p *Markdown) addExistingChild(node *Node, offset uint32) *Node { + for !p.tip.canContain(node.Type) { + p.finalize(p.tip) + } + p.tip.AppendChild(node) + p.tip = node + return node +} + +func (p *Markdown) closeUnmatchedBlocks() { + if !p.allClosed { + for p.oldTip != p.lastMatchedContainer { + parent := p.oldTip.Parent + p.finalize(p.oldTip) + p.oldTip = parent + } + p.allClosed = true + } +} + +// +// +// Public interface +// +// + +// Reference represents the details of a link. +// See the documentation in Options for more details on use-case. +type Reference struct { + // Link is usually the URL the reference points to. + Link string + // Title is the alternate text describing the link in more detail. + Title string + // Text is the optional text to override the ref with if the syntax used was + // [refid][] + Text string +} + +// ReferenceOverrideFunc is expected to be called with a reference string and +// return either a valid Reference type that the reference string maps to or +// nil. If overridden is false, the default reference logic will be executed. +// See the documentation in Options for more details on use-case. +type ReferenceOverrideFunc func(reference string) (ref *Reference, overridden bool) + +// New constructs a Markdown processor. You can use the same With* functions as +// for Run() to customize parser's behavior and the renderer. +func New(opts ...Option) *Markdown { + var p Markdown + for _, opt := range opts { + opt(&p) + } + p.refs = make(map[string]*reference) + p.maxNesting = 16 + p.insideLink = false + docNode := NewNode(Document) + p.doc = docNode + p.tip = docNode + p.oldTip = docNode + p.lastMatchedContainer = docNode + p.allClosed = true + // register inline parsers + p.inlineCallback[' '] = maybeLineBreak + p.inlineCallback['*'] = emphasis + p.inlineCallback['_'] = emphasis + if p.extensions&Strikethrough != 0 { + p.inlineCallback['~'] = emphasis + } + p.inlineCallback['`'] = codeSpan + p.inlineCallback['\n'] = lineBreak + p.inlineCallback['['] = link + p.inlineCallback['<'] = leftAngle + p.inlineCallback['\\'] = escape + p.inlineCallback['&'] = entity + p.inlineCallback['!'] = maybeImage + p.inlineCallback['^'] = maybeInlineFootnote + if p.extensions&Autolink != 0 { + p.inlineCallback['h'] = maybeAutoLink + p.inlineCallback['m'] = maybeAutoLink + p.inlineCallback['f'] = maybeAutoLink + p.inlineCallback['H'] = maybeAutoLink + p.inlineCallback['M'] = maybeAutoLink + p.inlineCallback['F'] = maybeAutoLink + } + if p.extensions&Footnotes != 0 { + p.notes = make([]*reference, 0) + } + return &p +} + +// Option customizes the Markdown processor's default behavior. +type Option func(*Markdown) + +// WithRenderer allows you to override the default renderer. +func WithRenderer(r Renderer) Option { + return func(p *Markdown) { + p.renderer = r + } +} + +// WithExtensions allows you to pick some of the many extensions provided by +// Blackfriday. You can bitwise OR them. +func WithExtensions(e Extensions) Option { + return func(p *Markdown) { + p.extensions = e + } +} + +// WithNoExtensions turns off all extensions and custom behavior. +func WithNoExtensions() Option { + return func(p *Markdown) { + p.extensions = NoExtensions + p.renderer = NewHTMLRenderer(HTMLRendererParameters{ + Flags: HTMLFlagsNone, + }) + } +} + +// WithRefOverride sets an optional function callback that is called every +// time a reference is resolved. +// +// In Markdown, the link reference syntax can be made to resolve a link to +// a reference instead of an inline URL, in one of the following ways: +// +// * [link text][refid] +// * [refid][] +// +// Usually, the refid is defined at the bottom of the Markdown document. If +// this override function is provided, the refid is passed to the override +// function first, before consulting the defined refids at the bottom. If +// the override function indicates an override did not occur, the refids at +// the bottom will be used to fill in the link details. +func WithRefOverride(o ReferenceOverrideFunc) Option { + return func(p *Markdown) { + p.referenceOverride = o + } +} + +// Run is the main entry point to Blackfriday. It parses and renders a +// block of markdown-encoded text. +// +// The simplest invocation of Run takes one argument, input: +// output := Run(input) +// This will parse the input with CommonExtensions enabled and render it with +// the default HTMLRenderer (with CommonHTMLFlags). +// +// Variadic arguments opts can customize the default behavior. Since Markdown +// type does not contain exported fields, you can not use it directly. Instead, +// use the With* functions. For example, this will call the most basic +// functionality, with no extensions: +// output := Run(input, WithNoExtensions()) +// +// You can use any number of With* arguments, even contradicting ones. They +// will be applied in order of appearance and the latter will override the +// former: +// output := Run(input, WithNoExtensions(), WithExtensions(exts), +// WithRenderer(yourRenderer)) +func Run(input []byte, opts ...Option) []byte { + r := NewHTMLRenderer(HTMLRendererParameters{ + Flags: CommonHTMLFlags, + }) + optList := []Option{WithRenderer(r), WithExtensions(CommonExtensions)} + optList = append(optList, opts...) + parser := New(optList...) + ast := parser.Parse(input) + var buf bytes.Buffer + parser.renderer.RenderHeader(&buf, ast) + ast.Walk(func(node *Node, entering bool) WalkStatus { + return parser.renderer.RenderNode(&buf, node, entering) + }) + parser.renderer.RenderFooter(&buf, ast) + return buf.Bytes() +} + +// Parse is an entry point to the parsing part of Blackfriday. It takes an +// input markdown document and produces a syntax tree for its contents. This +// tree can then be rendered with a default or custom renderer, or +// analyzed/transformed by the caller to whatever non-standard needs they have. +// The return value is the root node of the syntax tree. +func (p *Markdown) Parse(input []byte) *Node { + p.block(input) + // Walk the tree and finish up some of unfinished blocks + for p.tip != nil { + p.finalize(p.tip) + } + // Walk the tree again and process inline markdown in each block + p.doc.Walk(func(node *Node, entering bool) WalkStatus { + if node.Type == Paragraph || node.Type == Heading || node.Type == TableCell { + p.inline(node, node.content) + node.content = nil + } + return GoToNext + }) + p.parseRefsToAST() + return p.doc +} + +func (p *Markdown) parseRefsToAST() { + if p.extensions&Footnotes == 0 || len(p.notes) == 0 { + return + } + p.tip = p.doc + block := p.addBlock(List, nil) + block.IsFootnotesList = true + block.ListFlags = ListTypeOrdered + flags := ListItemBeginningOfList + // Note: this loop is intentionally explicit, not range-form. This is + // because the body of the loop will append nested footnotes to p.notes and + // we need to process those late additions. Range form would only walk over + // the fixed initial set. + for i := 0; i < len(p.notes); i++ { + ref := p.notes[i] + p.addExistingChild(ref.footnote, 0) + block := ref.footnote + block.ListFlags = flags | ListTypeOrdered + block.RefLink = ref.link + if ref.hasBlock { + flags |= ListItemContainsBlock + p.block(ref.title) + } else { + p.inline(block, ref.title) + } + flags &^= ListItemBeginningOfList | ListItemContainsBlock + } + above := block.Parent + finalizeList(block) + p.tip = above + block.Walk(func(node *Node, entering bool) WalkStatus { + if node.Type == Paragraph || node.Type == Heading { + p.inline(node, node.content) + node.content = nil + } + return GoToNext + }) +} + +// +// Link references +// +// This section implements support for references that (usually) appear +// as footnotes in a document, and can be referenced anywhere in the document. +// The basic format is: +// +// [1]: http://www.google.com/ "Google" +// [2]: http://www.github.com/ "Github" +// +// Anywhere in the document, the reference can be linked by referring to its +// label, i.e., 1 and 2 in this example, as in: +// +// This library is hosted on [Github][2], a git hosting site. +// +// Actual footnotes as specified in Pandoc and supported by some other Markdown +// libraries such as php-markdown are also taken care of. They look like this: +// +// This sentence needs a bit of further explanation.[^note] +// +// [^note]: This is the explanation. +// +// Footnotes should be placed at the end of the document in an ordered list. +// Finally, there are inline footnotes such as: +// +// Inline footnotes^[Also supported.] provide a quick inline explanation, +// but are rendered at the bottom of the document. +// + +// reference holds all information necessary for a reference-style links or +// footnotes. +// +// Consider this markdown with reference-style links: +// +// [link][ref] +// +// [ref]: /url/ "tooltip title" +// +// It will be ultimately converted to this HTML: +// +//

    link

    +// +// And a reference structure will be populated as follows: +// +// p.refs["ref"] = &reference{ +// link: "/url/", +// title: "tooltip title", +// } +// +// Alternatively, reference can contain information about a footnote. Consider +// this markdown: +// +// Text needing a footnote.[^a] +// +// [^a]: This is the note +// +// A reference structure will be populated as follows: +// +// p.refs["a"] = &reference{ +// link: "a", +// title: "This is the note", +// noteID: , +// } +// +// TODO: As you can see, it begs for splitting into two dedicated structures +// for refs and for footnotes. +type reference struct { + link []byte + title []byte + noteID int // 0 if not a footnote ref + hasBlock bool + footnote *Node // a link to the Item node within a list of footnotes + + text []byte // only gets populated by refOverride feature with Reference.Text +} + +func (r *reference) String() string { + return fmt.Sprintf("{link: %q, title: %q, text: %q, noteID: %d, hasBlock: %v}", + r.link, r.title, r.text, r.noteID, r.hasBlock) +} + +// Check whether or not data starts with a reference link. +// If so, it is parsed and stored in the list of references +// (in the render struct). +// Returns the number of bytes to skip to move past it, +// or zero if the first line is not a reference. +func isReference(p *Markdown, data []byte, tabSize int) int { + // up to 3 optional leading spaces + if len(data) < 4 { + return 0 + } + i := 0 + for i < 3 && data[i] == ' ' { + i++ + } + + noteID := 0 + + // id part: anything but a newline between brackets + if data[i] != '[' { + return 0 + } + i++ + if p.extensions&Footnotes != 0 { + if i < len(data) && data[i] == '^' { + // we can set it to anything here because the proper noteIds will + // be assigned later during the second pass. It just has to be != 0 + noteID = 1 + i++ + } + } + idOffset := i + for i < len(data) && data[i] != '\n' && data[i] != '\r' && data[i] != ']' { + i++ + } + if i >= len(data) || data[i] != ']' { + return 0 + } + idEnd := i + // footnotes can have empty ID, like this: [^], but a reference can not be + // empty like this: []. Break early if it's not a footnote and there's no ID + if noteID == 0 && idOffset == idEnd { + return 0 + } + // spacer: colon (space | tab)* newline? (space | tab)* + i++ + if i >= len(data) || data[i] != ':' { + return 0 + } + i++ + for i < len(data) && (data[i] == ' ' || data[i] == '\t') { + i++ + } + if i < len(data) && (data[i] == '\n' || data[i] == '\r') { + i++ + if i < len(data) && data[i] == '\n' && data[i-1] == '\r' { + i++ + } + } + for i < len(data) && (data[i] == ' ' || data[i] == '\t') { + i++ + } + if i >= len(data) { + return 0 + } + + var ( + linkOffset, linkEnd int + titleOffset, titleEnd int + lineEnd int + raw []byte + hasBlock bool + ) + + if p.extensions&Footnotes != 0 && noteID != 0 { + linkOffset, linkEnd, raw, hasBlock = scanFootnote(p, data, i, tabSize) + lineEnd = linkEnd + } else { + linkOffset, linkEnd, titleOffset, titleEnd, lineEnd = scanLinkRef(p, data, i) + } + if lineEnd == 0 { + return 0 + } + + // a valid ref has been found + + ref := &reference{ + noteID: noteID, + hasBlock: hasBlock, + } + + if noteID > 0 { + // reusing the link field for the id since footnotes don't have links + ref.link = data[idOffset:idEnd] + // if footnote, it's not really a title, it's the contained text + ref.title = raw + } else { + ref.link = data[linkOffset:linkEnd] + ref.title = data[titleOffset:titleEnd] + } + + // id matches are case-insensitive + id := string(bytes.ToLower(data[idOffset:idEnd])) + + p.refs[id] = ref + + return lineEnd +} + +func scanLinkRef(p *Markdown, data []byte, i int) (linkOffset, linkEnd, titleOffset, titleEnd, lineEnd int) { + // link: whitespace-free sequence, optionally between angle brackets + if data[i] == '<' { + i++ + } + linkOffset = i + for i < len(data) && data[i] != ' ' && data[i] != '\t' && data[i] != '\n' && data[i] != '\r' { + i++ + } + linkEnd = i + if data[linkOffset] == '<' && data[linkEnd-1] == '>' { + linkOffset++ + linkEnd-- + } + + // optional spacer: (space | tab)* (newline | '\'' | '"' | '(' ) + for i < len(data) && (data[i] == ' ' || data[i] == '\t') { + i++ + } + if i < len(data) && data[i] != '\n' && data[i] != '\r' && data[i] != '\'' && data[i] != '"' && data[i] != '(' { + return + } + + // compute end-of-line + if i >= len(data) || data[i] == '\r' || data[i] == '\n' { + lineEnd = i + } + if i+1 < len(data) && data[i] == '\r' && data[i+1] == '\n' { + lineEnd++ + } + + // optional (space|tab)* spacer after a newline + if lineEnd > 0 { + i = lineEnd + 1 + for i < len(data) && (data[i] == ' ' || data[i] == '\t') { + i++ + } + } + + // optional title: any non-newline sequence enclosed in '"() alone on its line + if i+1 < len(data) && (data[i] == '\'' || data[i] == '"' || data[i] == '(') { + i++ + titleOffset = i + + // look for EOL + for i < len(data) && data[i] != '\n' && data[i] != '\r' { + i++ + } + if i+1 < len(data) && data[i] == '\n' && data[i+1] == '\r' { + titleEnd = i + 1 + } else { + titleEnd = i + } + + // step back + i-- + for i > titleOffset && (data[i] == ' ' || data[i] == '\t') { + i-- + } + if i > titleOffset && (data[i] == '\'' || data[i] == '"' || data[i] == ')') { + lineEnd = titleEnd + titleEnd = i + } + } + + return +} + +// The first bit of this logic is the same as Parser.listItem, but the rest +// is much simpler. This function simply finds the entire block and shifts it +// over by one tab if it is indeed a block (just returns the line if it's not). +// blockEnd is the end of the section in the input buffer, and contents is the +// extracted text that was shifted over one tab. It will need to be rendered at +// the end of the document. +func scanFootnote(p *Markdown, data []byte, i, indentSize int) (blockStart, blockEnd int, contents []byte, hasBlock bool) { + if i == 0 || len(data) == 0 { + return + } + + // skip leading whitespace on first line + for i < len(data) && data[i] == ' ' { + i++ + } + + blockStart = i + + // find the end of the line + blockEnd = i + for i < len(data) && data[i-1] != '\n' { + i++ + } + + // get working buffer + var raw bytes.Buffer + + // put the first line into the working buffer + raw.Write(data[blockEnd:i]) + blockEnd = i + + // process the following lines + containsBlankLine := false + +gatherLines: + for blockEnd < len(data) { + i++ + + // find the end of this line + for i < len(data) && data[i-1] != '\n' { + i++ + } + + // if it is an empty line, guess that it is part of this item + // and move on to the next line + if p.isEmpty(data[blockEnd:i]) > 0 { + containsBlankLine = true + blockEnd = i + continue + } + + n := 0 + if n = isIndented(data[blockEnd:i], indentSize); n == 0 { + // this is the end of the block. + // we don't want to include this last line in the index. + break gatherLines + } + + // if there were blank lines before this one, insert a new one now + if containsBlankLine { + raw.WriteByte('\n') + containsBlankLine = false + } + + // get rid of that first tab, write to buffer + raw.Write(data[blockEnd+n : i]) + hasBlock = true + + blockEnd = i + } + + if data[blockEnd-1] != '\n' { + raw.WriteByte('\n') + } + + contents = raw.Bytes() + + return +} + +// +// +// Miscellaneous helper functions +// +// + +// Test if a character is a punctuation symbol. +// Taken from a private function in regexp in the stdlib. +func ispunct(c byte) bool { + for _, r := range []byte("!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~") { + if c == r { + return true + } + } + return false +} + +// Test if a character is a whitespace character. +func isspace(c byte) bool { + return ishorizontalspace(c) || isverticalspace(c) +} + +// Test if a character is a horizontal whitespace character. +func ishorizontalspace(c byte) bool { + return c == ' ' || c == '\t' +} + +// Test if a character is a vertical character. +func isverticalspace(c byte) bool { + return c == '\n' || c == '\r' || c == '\f' || c == '\v' +} + +// Test if a character is letter. +func isletter(c byte) bool { + return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') +} + +// Test if a character is a letter or a digit. +// TODO: check when this is looking for ASCII alnum and when it should use unicode +func isalnum(c byte) bool { + return (c >= '0' && c <= '9') || isletter(c) +} + +// Replace tab characters with spaces, aligning to the next TAB_SIZE column. +// always ends output with a newline +func expandTabs(out *bytes.Buffer, line []byte, tabSize int) { + // first, check for common cases: no tabs, or only tabs at beginning of line + i, prefix := 0, 0 + slowcase := false + for i = 0; i < len(line); i++ { + if line[i] == '\t' { + if prefix == i { + prefix++ + } else { + slowcase = true + break + } + } + } + + // no need to decode runes if all tabs are at the beginning of the line + if !slowcase { + for i = 0; i < prefix*tabSize; i++ { + out.WriteByte(' ') + } + out.Write(line[prefix:]) + return + } + + // the slow case: we need to count runes to figure out how + // many spaces to insert for each tab + column := 0 + i = 0 + for i < len(line) { + start := i + for i < len(line) && line[i] != '\t' { + _, size := utf8.DecodeRune(line[i:]) + i += size + column++ + } + + if i > start { + out.Write(line[start:i]) + } + + if i >= len(line) { + break + } + + for { + out.WriteByte(' ') + column++ + if column%tabSize == 0 { + break + } + } + + i++ + } +} + +// Find if a line counts as indented or not. +// Returns number of characters the indent is (0 = not indented). +func isIndented(data []byte, indentSize int) int { + if len(data) == 0 { + return 0 + } + if data[0] == '\t' { + return 1 + } + if len(data) < indentSize { + return 0 + } + for i := 0; i < indentSize; i++ { + if data[i] != ' ' { + return 0 + } + } + return indentSize +} + +// Create a url-safe slug for fragments +func slugify(in []byte) []byte { + if len(in) == 0 { + return in + } + out := make([]byte, 0, len(in)) + sym := false + + for _, ch := range in { + if isalnum(ch) { + sym = false + out = append(out, ch) + } else if sym { + continue + } else { + out = append(out, '-') + sym = true + } + } + var a, b int + var ch byte + for a, ch = range out { + if ch != '-' { + break + } + } + for b = len(out) - 1; b > 0; b-- { + if out[b] != '-' { + break + } + } + return out[a : b+1] +} diff --git a/vendor/github.com/russross/blackfriday/v2/node.go b/vendor/github.com/russross/blackfriday/v2/node.go new file mode 100644 index 000000000..04e6050ce --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/node.go @@ -0,0 +1,360 @@ +package blackfriday + +import ( + "bytes" + "fmt" +) + +// NodeType specifies a type of a single node of a syntax tree. Usually one +// node (and its type) corresponds to a single markdown feature, e.g. emphasis +// or code block. +type NodeType int + +// Constants for identifying different types of nodes. See NodeType. +const ( + Document NodeType = iota + BlockQuote + List + Item + Paragraph + Heading + HorizontalRule + Emph + Strong + Del + Link + Image + Text + HTMLBlock + CodeBlock + Softbreak + Hardbreak + Code + HTMLSpan + Table + TableCell + TableHead + TableBody + TableRow +) + +var nodeTypeNames = []string{ + Document: "Document", + BlockQuote: "BlockQuote", + List: "List", + Item: "Item", + Paragraph: "Paragraph", + Heading: "Heading", + HorizontalRule: "HorizontalRule", + Emph: "Emph", + Strong: "Strong", + Del: "Del", + Link: "Link", + Image: "Image", + Text: "Text", + HTMLBlock: "HTMLBlock", + CodeBlock: "CodeBlock", + Softbreak: "Softbreak", + Hardbreak: "Hardbreak", + Code: "Code", + HTMLSpan: "HTMLSpan", + Table: "Table", + TableCell: "TableCell", + TableHead: "TableHead", + TableBody: "TableBody", + TableRow: "TableRow", +} + +func (t NodeType) String() string { + return nodeTypeNames[t] +} + +// ListData contains fields relevant to a List and Item node type. +type ListData struct { + ListFlags ListType + Tight bool // Skip

    s around list item data if true + BulletChar byte // '*', '+' or '-' in bullet lists + Delimiter byte // '.' or ')' after the number in ordered lists + RefLink []byte // If not nil, turns this list item into a footnote item and triggers different rendering + IsFootnotesList bool // This is a list of footnotes +} + +// LinkData contains fields relevant to a Link node type. +type LinkData struct { + Destination []byte // Destination is what goes into a href + Title []byte // Title is the tooltip thing that goes in a title attribute + NoteID int // NoteID contains a serial number of a footnote, zero if it's not a footnote + Footnote *Node // If it's a footnote, this is a direct link to the footnote Node. Otherwise nil. +} + +// CodeBlockData contains fields relevant to a CodeBlock node type. +type CodeBlockData struct { + IsFenced bool // Specifies whether it's a fenced code block or an indented one + Info []byte // This holds the info string + FenceChar byte + FenceLength int + FenceOffset int +} + +// TableCellData contains fields relevant to a TableCell node type. +type TableCellData struct { + IsHeader bool // This tells if it's under the header row + Align CellAlignFlags // This holds the value for align attribute +} + +// HeadingData contains fields relevant to a Heading node type. +type HeadingData struct { + Level int // This holds the heading level number + HeadingID string // This might hold heading ID, if present + IsTitleblock bool // Specifies whether it's a title block +} + +// Node is a single element in the abstract syntax tree of the parsed document. +// It holds connections to the structurally neighboring nodes and, for certain +// types of nodes, additional information that might be needed when rendering. +type Node struct { + Type NodeType // Determines the type of the node + Parent *Node // Points to the parent + FirstChild *Node // Points to the first child, if any + LastChild *Node // Points to the last child, if any + Prev *Node // Previous sibling; nil if it's the first child + Next *Node // Next sibling; nil if it's the last child + + Literal []byte // Text contents of the leaf nodes + + HeadingData // Populated if Type is Heading + ListData // Populated if Type is List + CodeBlockData // Populated if Type is CodeBlock + LinkData // Populated if Type is Link + TableCellData // Populated if Type is TableCell + + content []byte // Markdown content of the block nodes + open bool // Specifies an open block node that has not been finished to process yet +} + +// NewNode allocates a node of a specified type. +func NewNode(typ NodeType) *Node { + return &Node{ + Type: typ, + open: true, + } +} + +func (n *Node) String() string { + ellipsis := "" + snippet := n.Literal + if len(snippet) > 16 { + snippet = snippet[:16] + ellipsis = "..." + } + return fmt.Sprintf("%s: '%s%s'", n.Type, snippet, ellipsis) +} + +// Unlink removes node 'n' from the tree. +// It panics if the node is nil. +func (n *Node) Unlink() { + if n.Prev != nil { + n.Prev.Next = n.Next + } else if n.Parent != nil { + n.Parent.FirstChild = n.Next + } + if n.Next != nil { + n.Next.Prev = n.Prev + } else if n.Parent != nil { + n.Parent.LastChild = n.Prev + } + n.Parent = nil + n.Next = nil + n.Prev = nil +} + +// AppendChild adds a node 'child' as a child of 'n'. +// It panics if either node is nil. +func (n *Node) AppendChild(child *Node) { + child.Unlink() + child.Parent = n + if n.LastChild != nil { + n.LastChild.Next = child + child.Prev = n.LastChild + n.LastChild = child + } else { + n.FirstChild = child + n.LastChild = child + } +} + +// InsertBefore inserts 'sibling' immediately before 'n'. +// It panics if either node is nil. +func (n *Node) InsertBefore(sibling *Node) { + sibling.Unlink() + sibling.Prev = n.Prev + if sibling.Prev != nil { + sibling.Prev.Next = sibling + } + sibling.Next = n + n.Prev = sibling + sibling.Parent = n.Parent + if sibling.Prev == nil { + sibling.Parent.FirstChild = sibling + } +} + +// IsContainer returns true if 'n' can contain children. +func (n *Node) IsContainer() bool { + switch n.Type { + case Document: + fallthrough + case BlockQuote: + fallthrough + case List: + fallthrough + case Item: + fallthrough + case Paragraph: + fallthrough + case Heading: + fallthrough + case Emph: + fallthrough + case Strong: + fallthrough + case Del: + fallthrough + case Link: + fallthrough + case Image: + fallthrough + case Table: + fallthrough + case TableHead: + fallthrough + case TableBody: + fallthrough + case TableRow: + fallthrough + case TableCell: + return true + default: + return false + } +} + +// IsLeaf returns true if 'n' is a leaf node. +func (n *Node) IsLeaf() bool { + return !n.IsContainer() +} + +func (n *Node) canContain(t NodeType) bool { + if n.Type == List { + return t == Item + } + if n.Type == Document || n.Type == BlockQuote || n.Type == Item { + return t != Item + } + if n.Type == Table { + return t == TableHead || t == TableBody + } + if n.Type == TableHead || n.Type == TableBody { + return t == TableRow + } + if n.Type == TableRow { + return t == TableCell + } + return false +} + +// WalkStatus allows NodeVisitor to have some control over the tree traversal. +// It is returned from NodeVisitor and different values allow Node.Walk to +// decide which node to go to next. +type WalkStatus int + +const ( + // GoToNext is the default traversal of every node. + GoToNext WalkStatus = iota + // SkipChildren tells walker to skip all children of current node. + SkipChildren + // Terminate tells walker to terminate the traversal. + Terminate +) + +// NodeVisitor is a callback to be called when traversing the syntax tree. +// Called twice for every node: once with entering=true when the branch is +// first visited, then with entering=false after all the children are done. +type NodeVisitor func(node *Node, entering bool) WalkStatus + +// Walk is a convenience method that instantiates a walker and starts a +// traversal of subtree rooted at n. +func (n *Node) Walk(visitor NodeVisitor) { + w := newNodeWalker(n) + for w.current != nil { + status := visitor(w.current, w.entering) + switch status { + case GoToNext: + w.next() + case SkipChildren: + w.entering = false + w.next() + case Terminate: + return + } + } +} + +type nodeWalker struct { + current *Node + root *Node + entering bool +} + +func newNodeWalker(root *Node) *nodeWalker { + return &nodeWalker{ + current: root, + root: root, + entering: true, + } +} + +func (nw *nodeWalker) next() { + if (!nw.current.IsContainer() || !nw.entering) && nw.current == nw.root { + nw.current = nil + return + } + if nw.entering && nw.current.IsContainer() { + if nw.current.FirstChild != nil { + nw.current = nw.current.FirstChild + nw.entering = true + } else { + nw.entering = false + } + } else if nw.current.Next == nil { + nw.current = nw.current.Parent + nw.entering = false + } else { + nw.current = nw.current.Next + nw.entering = true + } +} + +func dump(ast *Node) { + fmt.Println(dumpString(ast)) +} + +func dumpR(ast *Node, depth int) string { + if ast == nil { + return "" + } + indent := bytes.Repeat([]byte("\t"), depth) + content := ast.Literal + if content == nil { + content = ast.content + } + result := fmt.Sprintf("%s%s(%q)\n", indent, ast.Type, content) + for n := ast.FirstChild; n != nil; n = n.Next { + result += dumpR(n, depth+1) + } + return result +} + +func dumpString(ast *Node) string { + return dumpR(ast, 0) +} diff --git a/vendor/github.com/russross/blackfriday/v2/smartypants.go b/vendor/github.com/russross/blackfriday/v2/smartypants.go new file mode 100644 index 000000000..3a220e942 --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/smartypants.go @@ -0,0 +1,457 @@ +// +// Blackfriday Markdown Processor +// Available at http://github.com/russross/blackfriday +// +// Copyright © 2011 Russ Ross . +// Distributed under the Simplified BSD License. +// See README.md for details. +// + +// +// +// SmartyPants rendering +// +// + +package blackfriday + +import ( + "bytes" + "io" +) + +// SPRenderer is a struct containing state of a Smartypants renderer. +type SPRenderer struct { + inSingleQuote bool + inDoubleQuote bool + callbacks [256]smartCallback +} + +func wordBoundary(c byte) bool { + return c == 0 || isspace(c) || ispunct(c) +} + +func tolower(c byte) byte { + if c >= 'A' && c <= 'Z' { + return c - 'A' + 'a' + } + return c +} + +func isdigit(c byte) bool { + return c >= '0' && c <= '9' +} + +func smartQuoteHelper(out *bytes.Buffer, previousChar byte, nextChar byte, quote byte, isOpen *bool, addNBSP bool) bool { + // edge of the buffer is likely to be a tag that we don't get to see, + // so we treat it like text sometimes + + // enumerate all sixteen possibilities for (previousChar, nextChar) + // each can be one of {0, space, punct, other} + switch { + case previousChar == 0 && nextChar == 0: + // context is not any help here, so toggle + *isOpen = !*isOpen + case isspace(previousChar) && nextChar == 0: + // [ "] might be [ "foo...] + *isOpen = true + case ispunct(previousChar) && nextChar == 0: + // [!"] hmm... could be [Run!"] or [("...] + *isOpen = false + case /* isnormal(previousChar) && */ nextChar == 0: + // [a"] is probably a close + *isOpen = false + case previousChar == 0 && isspace(nextChar): + // [" ] might be [...foo" ] + *isOpen = false + case isspace(previousChar) && isspace(nextChar): + // [ " ] context is not any help here, so toggle + *isOpen = !*isOpen + case ispunct(previousChar) && isspace(nextChar): + // [!" ] is probably a close + *isOpen = false + case /* isnormal(previousChar) && */ isspace(nextChar): + // [a" ] this is one of the easy cases + *isOpen = false + case previousChar == 0 && ispunct(nextChar): + // ["!] hmm... could be ["$1.95] or ["!...] + *isOpen = false + case isspace(previousChar) && ispunct(nextChar): + // [ "!] looks more like [ "$1.95] + *isOpen = true + case ispunct(previousChar) && ispunct(nextChar): + // [!"!] context is not any help here, so toggle + *isOpen = !*isOpen + case /* isnormal(previousChar) && */ ispunct(nextChar): + // [a"!] is probably a close + *isOpen = false + case previousChar == 0 /* && isnormal(nextChar) */ : + // ["a] is probably an open + *isOpen = true + case isspace(previousChar) /* && isnormal(nextChar) */ : + // [ "a] this is one of the easy cases + *isOpen = true + case ispunct(previousChar) /* && isnormal(nextChar) */ : + // [!"a] is probably an open + *isOpen = true + default: + // [a'b] maybe a contraction? + *isOpen = false + } + + // Note that with the limited lookahead, this non-breaking + // space will also be appended to single double quotes. + if addNBSP && !*isOpen { + out.WriteString(" ") + } + + out.WriteByte('&') + if *isOpen { + out.WriteByte('l') + } else { + out.WriteByte('r') + } + out.WriteByte(quote) + out.WriteString("quo;") + + if addNBSP && *isOpen { + out.WriteString(" ") + } + + return true +} + +func (r *SPRenderer) smartSingleQuote(out *bytes.Buffer, previousChar byte, text []byte) int { + if len(text) >= 2 { + t1 := tolower(text[1]) + + if t1 == '\'' { + nextChar := byte(0) + if len(text) >= 3 { + nextChar = text[2] + } + if smartQuoteHelper(out, previousChar, nextChar, 'd', &r.inDoubleQuote, false) { + return 1 + } + } + + if (t1 == 's' || t1 == 't' || t1 == 'm' || t1 == 'd') && (len(text) < 3 || wordBoundary(text[2])) { + out.WriteString("’") + return 0 + } + + if len(text) >= 3 { + t2 := tolower(text[2]) + + if ((t1 == 'r' && t2 == 'e') || (t1 == 'l' && t2 == 'l') || (t1 == 'v' && t2 == 'e')) && + (len(text) < 4 || wordBoundary(text[3])) { + out.WriteString("’") + return 0 + } + } + } + + nextChar := byte(0) + if len(text) > 1 { + nextChar = text[1] + } + if smartQuoteHelper(out, previousChar, nextChar, 's', &r.inSingleQuote, false) { + return 0 + } + + out.WriteByte(text[0]) + return 0 +} + +func (r *SPRenderer) smartParens(out *bytes.Buffer, previousChar byte, text []byte) int { + if len(text) >= 3 { + t1 := tolower(text[1]) + t2 := tolower(text[2]) + + if t1 == 'c' && t2 == ')' { + out.WriteString("©") + return 2 + } + + if t1 == 'r' && t2 == ')' { + out.WriteString("®") + return 2 + } + + if len(text) >= 4 && t1 == 't' && t2 == 'm' && text[3] == ')' { + out.WriteString("™") + return 3 + } + } + + out.WriteByte(text[0]) + return 0 +} + +func (r *SPRenderer) smartDash(out *bytes.Buffer, previousChar byte, text []byte) int { + if len(text) >= 2 { + if text[1] == '-' { + out.WriteString("—") + return 1 + } + + if wordBoundary(previousChar) && wordBoundary(text[1]) { + out.WriteString("–") + return 0 + } + } + + out.WriteByte(text[0]) + return 0 +} + +func (r *SPRenderer) smartDashLatex(out *bytes.Buffer, previousChar byte, text []byte) int { + if len(text) >= 3 && text[1] == '-' && text[2] == '-' { + out.WriteString("—") + return 2 + } + if len(text) >= 2 && text[1] == '-' { + out.WriteString("–") + return 1 + } + + out.WriteByte(text[0]) + return 0 +} + +func (r *SPRenderer) smartAmpVariant(out *bytes.Buffer, previousChar byte, text []byte, quote byte, addNBSP bool) int { + if bytes.HasPrefix(text, []byte(""")) { + nextChar := byte(0) + if len(text) >= 7 { + nextChar = text[6] + } + if smartQuoteHelper(out, previousChar, nextChar, quote, &r.inDoubleQuote, addNBSP) { + return 5 + } + } + + if bytes.HasPrefix(text, []byte("�")) { + return 3 + } + + out.WriteByte('&') + return 0 +} + +func (r *SPRenderer) smartAmp(angledQuotes, addNBSP bool) func(*bytes.Buffer, byte, []byte) int { + var quote byte = 'd' + if angledQuotes { + quote = 'a' + } + + return func(out *bytes.Buffer, previousChar byte, text []byte) int { + return r.smartAmpVariant(out, previousChar, text, quote, addNBSP) + } +} + +func (r *SPRenderer) smartPeriod(out *bytes.Buffer, previousChar byte, text []byte) int { + if len(text) >= 3 && text[1] == '.' && text[2] == '.' { + out.WriteString("…") + return 2 + } + + if len(text) >= 5 && text[1] == ' ' && text[2] == '.' && text[3] == ' ' && text[4] == '.' { + out.WriteString("…") + return 4 + } + + out.WriteByte(text[0]) + return 0 +} + +func (r *SPRenderer) smartBacktick(out *bytes.Buffer, previousChar byte, text []byte) int { + if len(text) >= 2 && text[1] == '`' { + nextChar := byte(0) + if len(text) >= 3 { + nextChar = text[2] + } + if smartQuoteHelper(out, previousChar, nextChar, 'd', &r.inDoubleQuote, false) { + return 1 + } + } + + out.WriteByte(text[0]) + return 0 +} + +func (r *SPRenderer) smartNumberGeneric(out *bytes.Buffer, previousChar byte, text []byte) int { + if wordBoundary(previousChar) && previousChar != '/' && len(text) >= 3 { + // is it of the form digits/digits(word boundary)?, i.e., \d+/\d+\b + // note: check for regular slash (/) or fraction slash (⁄, 0x2044, or 0xe2 81 84 in utf-8) + // and avoid changing dates like 1/23/2005 into fractions. + numEnd := 0 + for len(text) > numEnd && isdigit(text[numEnd]) { + numEnd++ + } + if numEnd == 0 { + out.WriteByte(text[0]) + return 0 + } + denStart := numEnd + 1 + if len(text) > numEnd+3 && text[numEnd] == 0xe2 && text[numEnd+1] == 0x81 && text[numEnd+2] == 0x84 { + denStart = numEnd + 3 + } else if len(text) < numEnd+2 || text[numEnd] != '/' { + out.WriteByte(text[0]) + return 0 + } + denEnd := denStart + for len(text) > denEnd && isdigit(text[denEnd]) { + denEnd++ + } + if denEnd == denStart { + out.WriteByte(text[0]) + return 0 + } + if len(text) == denEnd || wordBoundary(text[denEnd]) && text[denEnd] != '/' { + out.WriteString("") + out.Write(text[:numEnd]) + out.WriteString("") + out.Write(text[denStart:denEnd]) + out.WriteString("") + return denEnd - 1 + } + } + + out.WriteByte(text[0]) + return 0 +} + +func (r *SPRenderer) smartNumber(out *bytes.Buffer, previousChar byte, text []byte) int { + if wordBoundary(previousChar) && previousChar != '/' && len(text) >= 3 { + if text[0] == '1' && text[1] == '/' && text[2] == '2' { + if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' { + out.WriteString("½") + return 2 + } + } + + if text[0] == '1' && text[1] == '/' && text[2] == '4' { + if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' || (len(text) >= 5 && tolower(text[3]) == 't' && tolower(text[4]) == 'h') { + out.WriteString("¼") + return 2 + } + } + + if text[0] == '3' && text[1] == '/' && text[2] == '4' { + if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' || (len(text) >= 6 && tolower(text[3]) == 't' && tolower(text[4]) == 'h' && tolower(text[5]) == 's') { + out.WriteString("¾") + return 2 + } + } + } + + out.WriteByte(text[0]) + return 0 +} + +func (r *SPRenderer) smartDoubleQuoteVariant(out *bytes.Buffer, previousChar byte, text []byte, quote byte) int { + nextChar := byte(0) + if len(text) > 1 { + nextChar = text[1] + } + if !smartQuoteHelper(out, previousChar, nextChar, quote, &r.inDoubleQuote, false) { + out.WriteString(""") + } + + return 0 +} + +func (r *SPRenderer) smartDoubleQuote(out *bytes.Buffer, previousChar byte, text []byte) int { + return r.smartDoubleQuoteVariant(out, previousChar, text, 'd') +} + +func (r *SPRenderer) smartAngledDoubleQuote(out *bytes.Buffer, previousChar byte, text []byte) int { + return r.smartDoubleQuoteVariant(out, previousChar, text, 'a') +} + +func (r *SPRenderer) smartLeftAngle(out *bytes.Buffer, previousChar byte, text []byte) int { + i := 0 + + for i < len(text) && text[i] != '>' { + i++ + } + + out.Write(text[:i+1]) + return i +} + +type smartCallback func(out *bytes.Buffer, previousChar byte, text []byte) int + +// NewSmartypantsRenderer constructs a Smartypants renderer object. +func NewSmartypantsRenderer(flags HTMLFlags) *SPRenderer { + var ( + r SPRenderer + + smartAmpAngled = r.smartAmp(true, false) + smartAmpAngledNBSP = r.smartAmp(true, true) + smartAmpRegular = r.smartAmp(false, false) + smartAmpRegularNBSP = r.smartAmp(false, true) + + addNBSP = flags&SmartypantsQuotesNBSP != 0 + ) + + if flags&SmartypantsAngledQuotes == 0 { + r.callbacks['"'] = r.smartDoubleQuote + if !addNBSP { + r.callbacks['&'] = smartAmpRegular + } else { + r.callbacks['&'] = smartAmpRegularNBSP + } + } else { + r.callbacks['"'] = r.smartAngledDoubleQuote + if !addNBSP { + r.callbacks['&'] = smartAmpAngled + } else { + r.callbacks['&'] = smartAmpAngledNBSP + } + } + r.callbacks['\''] = r.smartSingleQuote + r.callbacks['('] = r.smartParens + if flags&SmartypantsDashes != 0 { + if flags&SmartypantsLatexDashes == 0 { + r.callbacks['-'] = r.smartDash + } else { + r.callbacks['-'] = r.smartDashLatex + } + } + r.callbacks['.'] = r.smartPeriod + if flags&SmartypantsFractions == 0 { + r.callbacks['1'] = r.smartNumber + r.callbacks['3'] = r.smartNumber + } else { + for ch := '1'; ch <= '9'; ch++ { + r.callbacks[ch] = r.smartNumberGeneric + } + } + r.callbacks['<'] = r.smartLeftAngle + r.callbacks['`'] = r.smartBacktick + return &r +} + +// Process is the entry point of the Smartypants renderer. +func (r *SPRenderer) Process(w io.Writer, text []byte) { + mark := 0 + for i := 0; i < len(text); i++ { + if action := r.callbacks[text[i]]; action != nil { + if i > mark { + w.Write(text[mark:i]) + } + previousChar := byte(0) + if i > 0 { + previousChar = text[i-1] + } + var tmp bytes.Buffer + i += action(&tmp, previousChar, text[i:]) + w.Write(tmp.Bytes()) + mark = i + 1 + } + } + if mark < len(text) { + w.Write(text[mark:]) + } +} diff --git a/vendor/github.com/urfave/cli/v2/.flake8 b/vendor/github.com/urfave/cli/v2/.flake8 new file mode 100644 index 000000000..6deafc261 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/.flake8 @@ -0,0 +1,2 @@ +[flake8] +max-line-length = 120 diff --git a/vendor/github.com/urfave/cli/v2/.gitignore b/vendor/github.com/urfave/cli/v2/.gitignore new file mode 100644 index 000000000..2d5e149b4 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/.gitignore @@ -0,0 +1,7 @@ +*.coverprofile +*.orig +node_modules/ +vendor +.idea +internal/*/built-example +coverage.txt diff --git a/vendor/github.com/urfave/cli/v2/CODE_OF_CONDUCT.md b/vendor/github.com/urfave/cli/v2/CODE_OF_CONDUCT.md new file mode 100644 index 000000000..41ba294f6 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/CODE_OF_CONDUCT.md @@ -0,0 +1,74 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +education, socio-economic status, nationality, personal appearance, race, +religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or + advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting Dan Buch at dan@meatballhat.com. All complaints will be +reviewed and investigated and will result in a response that is deemed necessary +and appropriate to the circumstances. The project team is obligated to maintain +confidentiality with regard to the reporter of an incident. Further details of +specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html + +[homepage]: https://www.contributor-covenant.org + diff --git a/vendor/github.com/urfave/cli/v2/LICENSE b/vendor/github.com/urfave/cli/v2/LICENSE new file mode 100644 index 000000000..42a597e29 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2016 Jeremy Saenz & Contributors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/urfave/cli/v2/README.md b/vendor/github.com/urfave/cli/v2/README.md new file mode 100644 index 000000000..408668bc3 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/README.md @@ -0,0 +1,70 @@ +cli +=== + +[![GoDoc](https://godoc.org/github.com/urfave/cli?status.svg)](https://godoc.org/github.com/urfave/cli) +[![codebeat](https://codebeat.co/badges/0a8f30aa-f975-404b-b878-5fab3ae1cc5f)](https://codebeat.co/projects/github-com-urfave-cli) +[![Go Report Card](https://goreportcard.com/badge/urfave/cli)](https://goreportcard.com/report/urfave/cli) +[![codecov](https://codecov.io/gh/urfave/cli/branch/master/graph/badge.svg)](https://codecov.io/gh/urfave/cli) + +cli is a simple, fast, and fun package for building command line apps in Go. The +goal is to enable developers to write fast and distributable command line +applications in an expressive way. + +## Usage Documentation + +Usage documentation exists for each major version. Don't know what version you're on? You're probably using the version from the `master` branch, which is currently `v2`. + +- `v2` - [./docs/v2/manual.md](./docs/v2/manual.md) +- `v1` - [./docs/v1/manual.md](./docs/v1/manual.md) + +Guides for migrating to newer versions: + +- `v1-to-v2` - [./docs/migrate-v1-to-v2.md](./docs/migrate-v1-to-v2.md) + +## Installation + +Using this package requires a working Go environment. [See the install instructions for Go](http://golang.org/doc/install.html). + +Go Modules are required when using this package. [See the go blog guide on using Go Modules](https://blog.golang.org/using-go-modules). + +### Using `v2` releases + +``` +$ GO111MODULE=on go get github.com/urfave/cli/v2 +``` + +```go +... +import ( + "github.com/urfave/cli/v2" // imports as package "cli" +) +... +``` + +### Using `v1` releases + +``` +$ GO111MODULE=on go get github.com/urfave/cli +``` + +```go +... +import ( + "github.com/urfave/cli" +) +... +``` + +### GOPATH + +Make sure your `PATH` includes the `$GOPATH/bin` directory so your commands can +be easily used: +``` +export PATH=$PATH:$GOPATH/bin +``` + +### Supported platforms + +cli is tested against multiple versions of Go on Linux, and against the latest +released version of Go on OS X and Windows. This project uses Github Actions for +builds. To see our currently supported go versions and platforms, look at the [./.github/workflows/cli.yml](https://github.com/urfave/cli/blob/master/.github/workflows/cli.yml). diff --git a/vendor/github.com/urfave/cli/v2/app.go b/vendor/github.com/urfave/cli/v2/app.go new file mode 100644 index 000000000..f801fce91 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/app.go @@ -0,0 +1,540 @@ +package cli + +import ( + "context" + "flag" + "fmt" + "io" + "os" + "path/filepath" + "sort" + "time" +) + +var ( + changeLogURL = "https://github.com/urfave/cli/blob/master/docs/CHANGELOG.md" + appActionDeprecationURL = fmt.Sprintf("%s#deprecated-cli-app-action-signature", changeLogURL) + contactSysadmin = "This is an error in the application. Please contact the distributor of this application if this is not you." + errInvalidActionType = NewExitError("ERROR invalid Action type. "+ + fmt.Sprintf("Must be `func(*Context`)` or `func(*Context) error). %s", contactSysadmin)+ + fmt.Sprintf("See %s", appActionDeprecationURL), 2) +) + +// App is the main structure of a cli application. It is recommended that +// an app be created with the cli.NewApp() function +type App struct { + // The name of the program. Defaults to path.Base(os.Args[0]) + Name string + // Full name of command for help, defaults to Name + HelpName string + // Description of the program. + Usage string + // Text to override the USAGE section of help + UsageText string + // Description of the program argument format. + ArgsUsage string + // Version of the program + Version string + // Description of the program + Description string + // List of commands to execute + Commands []*Command + // List of flags to parse + Flags []Flag + // Boolean to enable bash completion commands + EnableBashCompletion bool + // Boolean to hide built-in help command and help flag + HideHelp bool + // Boolean to hide built-in help command but keep help flag. + // Ignored if HideHelp is true. + HideHelpCommand bool + // Boolean to hide built-in version flag and the VERSION section of help + HideVersion bool + // categories contains the categorized commands and is populated on app startup + categories CommandCategories + // An action to execute when the shell completion flag is set + BashComplete BashCompleteFunc + // An action to execute before any subcommands are run, but after the context is ready + // If a non-nil error is returned, no subcommands are run + Before BeforeFunc + // An action to execute after any subcommands are run, but after the subcommand has finished + // It is run even if Action() panics + After AfterFunc + // The action to execute when no subcommands are specified + Action ActionFunc + // Execute this function if the proper command cannot be found + CommandNotFound CommandNotFoundFunc + // Execute this function if a usage error occurs + OnUsageError OnUsageErrorFunc + // Compilation date + Compiled time.Time + // List of all authors who contributed + Authors []*Author + // Copyright of the binary if any + Copyright string + // Reader reader to write input to (useful for tests) + Reader io.Reader + // Writer writer to write output to + Writer io.Writer + // ErrWriter writes error output + ErrWriter io.Writer + // ExitErrHandler processes any error encountered while running an App before + // it is returned to the caller. If no function is provided, HandleExitCoder + // is used as the default behavior. + ExitErrHandler ExitErrHandlerFunc + // Other custom info + Metadata map[string]interface{} + // Carries a function which returns app specific info. + ExtraInfo func() map[string]string + // CustomAppHelpTemplate the text template for app help topic. + // cli.go uses text/template to render templates. You can + // render custom help text by setting this variable. + CustomAppHelpTemplate string + // Boolean to enable short-option handling so user can combine several + // single-character bool arguments into one + // i.e. foobar -o -v -> foobar -ov + UseShortOptionHandling bool + + didSetup bool +} + +// Tries to find out when this binary was compiled. +// Returns the current time if it fails to find it. +func compileTime() time.Time { + info, err := os.Stat(os.Args[0]) + if err != nil { + return time.Now() + } + return info.ModTime() +} + +// NewApp creates a new cli Application with some reasonable defaults for Name, +// Usage, Version and Action. +func NewApp() *App { + return &App{ + Name: filepath.Base(os.Args[0]), + HelpName: filepath.Base(os.Args[0]), + Usage: "A new cli application", + UsageText: "", + BashComplete: DefaultAppComplete, + Action: helpCommand.Action, + Compiled: compileTime(), + Reader: os.Stdin, + Writer: os.Stdout, + ErrWriter: os.Stderr, + } +} + +// Setup runs initialization code to ensure all data structures are ready for +// `Run` or inspection prior to `Run`. It is internally called by `Run`, but +// will return early if setup has already happened. +func (a *App) Setup() { + if a.didSetup { + return + } + + a.didSetup = true + + if a.Name == "" { + a.Name = filepath.Base(os.Args[0]) + } + + if a.HelpName == "" { + a.HelpName = filepath.Base(os.Args[0]) + } + + if a.Usage == "" { + a.Usage = "A new cli application" + } + + if a.Version == "" { + a.HideVersion = true + } + + if a.BashComplete == nil { + a.BashComplete = DefaultAppComplete + } + + if a.Action == nil { + a.Action = helpCommand.Action + } + + if a.Compiled == (time.Time{}) { + a.Compiled = compileTime() + } + + if a.Reader == nil { + a.Reader = os.Stdin + } + + if a.Writer == nil { + a.Writer = os.Stdout + } + + if a.ErrWriter == nil { + a.ErrWriter = os.Stderr + } + + var newCommands []*Command + + for _, c := range a.Commands { + if c.HelpName == "" { + c.HelpName = fmt.Sprintf("%s %s", a.HelpName, c.Name) + } + newCommands = append(newCommands, c) + } + a.Commands = newCommands + + if a.Command(helpCommand.Name) == nil && !a.HideHelp { + if !a.HideHelpCommand { + a.appendCommand(helpCommand) + } + + if HelpFlag != nil { + a.appendFlag(HelpFlag) + } + } + + if !a.HideVersion { + a.appendFlag(VersionFlag) + } + + a.categories = newCommandCategories() + for _, command := range a.Commands { + a.categories.AddCommand(command.Category, command) + } + sort.Sort(a.categories.(*commandCategories)) + + if a.Metadata == nil { + a.Metadata = make(map[string]interface{}) + } +} + +func (a *App) newFlagSet() (*flag.FlagSet, error) { + return flagSet(a.Name, a.Flags) +} + +func (a *App) useShortOptionHandling() bool { + return a.UseShortOptionHandling +} + +// Run is the entry point to the cli app. Parses the arguments slice and routes +// to the proper flag/args combination +func (a *App) Run(arguments []string) (err error) { + return a.RunContext(context.Background(), arguments) +} + +// RunContext is like Run except it takes a Context that will be +// passed to its commands and sub-commands. Through this, you can +// propagate timeouts and cancellation requests +func (a *App) RunContext(ctx context.Context, arguments []string) (err error) { + a.Setup() + + // handle the completion flag separately from the flagset since + // completion could be attempted after a flag, but before its value was put + // on the command line. this causes the flagset to interpret the completion + // flag name as the value of the flag before it which is undesirable + // note that we can only do this because the shell autocomplete function + // always appends the completion flag at the end of the command + shellComplete, arguments := checkShellCompleteFlag(a, arguments) + + set, err := a.newFlagSet() + if err != nil { + return err + } + + err = parseIter(set, a, arguments[1:], shellComplete) + nerr := normalizeFlags(a.Flags, set) + context := NewContext(a, set, &Context{Context: ctx}) + if nerr != nil { + _, _ = fmt.Fprintln(a.Writer, nerr) + _ = ShowAppHelp(context) + return nerr + } + context.shellComplete = shellComplete + + if checkCompletions(context) { + return nil + } + + if err != nil { + if a.OnUsageError != nil { + err := a.OnUsageError(context, err, false) + a.handleExitCoder(context, err) + return err + } + _, _ = fmt.Fprintf(a.Writer, "%s %s\n\n", "Incorrect Usage.", err.Error()) + _ = ShowAppHelp(context) + return err + } + + if !a.HideHelp && checkHelp(context) { + _ = ShowAppHelp(context) + return nil + } + + if !a.HideVersion && checkVersion(context) { + ShowVersion(context) + return nil + } + + cerr := checkRequiredFlags(a.Flags, context) + if cerr != nil { + _ = ShowAppHelp(context) + return cerr + } + + if a.After != nil { + defer func() { + if afterErr := a.After(context); afterErr != nil { + if err != nil { + err = newMultiError(err, afterErr) + } else { + err = afterErr + } + } + }() + } + + if a.Before != nil { + beforeErr := a.Before(context) + if beforeErr != nil { + a.handleExitCoder(context, beforeErr) + err = beforeErr + return err + } + } + + args := context.Args() + if args.Present() { + name := args.First() + c := a.Command(name) + if c != nil { + return c.Run(context) + } + } + + if a.Action == nil { + a.Action = helpCommand.Action + } + + // Run default Action + err = a.Action(context) + + a.handleExitCoder(context, err) + return err +} + +// RunAndExitOnError calls .Run() and exits non-zero if an error was returned +// +// Deprecated: instead you should return an error that fulfills cli.ExitCoder +// to cli.App.Run. This will cause the application to exit with the given eror +// code in the cli.ExitCoder +func (a *App) RunAndExitOnError() { + if err := a.Run(os.Args); err != nil { + _, _ = fmt.Fprintln(a.ErrWriter, err) + OsExiter(1) + } +} + +// RunAsSubcommand invokes the subcommand given the context, parses ctx.Args() to +// generate command-specific flags +func (a *App) RunAsSubcommand(ctx *Context) (err error) { + // Setup also handles HideHelp and HideHelpCommand + a.Setup() + + var newCmds []*Command + for _, c := range a.Commands { + if c.HelpName == "" { + c.HelpName = fmt.Sprintf("%s %s", a.HelpName, c.Name) + } + newCmds = append(newCmds, c) + } + a.Commands = newCmds + + set, err := a.newFlagSet() + if err != nil { + return err + } + + err = parseIter(set, a, ctx.Args().Tail(), ctx.shellComplete) + nerr := normalizeFlags(a.Flags, set) + context := NewContext(a, set, ctx) + + if nerr != nil { + _, _ = fmt.Fprintln(a.Writer, nerr) + _, _ = fmt.Fprintln(a.Writer) + if len(a.Commands) > 0 { + _ = ShowSubcommandHelp(context) + } else { + _ = ShowCommandHelp(ctx, context.Args().First()) + } + return nerr + } + + if checkCompletions(context) { + return nil + } + + if err != nil { + if a.OnUsageError != nil { + err = a.OnUsageError(context, err, true) + a.handleExitCoder(context, err) + return err + } + _, _ = fmt.Fprintf(a.Writer, "%s %s\n\n", "Incorrect Usage.", err.Error()) + _ = ShowSubcommandHelp(context) + return err + } + + if len(a.Commands) > 0 { + if checkSubcommandHelp(context) { + return nil + } + } else { + if checkCommandHelp(ctx, context.Args().First()) { + return nil + } + } + + cerr := checkRequiredFlags(a.Flags, context) + if cerr != nil { + _ = ShowSubcommandHelp(context) + return cerr + } + + if a.After != nil { + defer func() { + afterErr := a.After(context) + if afterErr != nil { + a.handleExitCoder(context, err) + if err != nil { + err = newMultiError(err, afterErr) + } else { + err = afterErr + } + } + }() + } + + if a.Before != nil { + beforeErr := a.Before(context) + if beforeErr != nil { + a.handleExitCoder(context, beforeErr) + err = beforeErr + return err + } + } + + args := context.Args() + if args.Present() { + name := args.First() + c := a.Command(name) + if c != nil { + return c.Run(context) + } + } + + // Run default Action + err = a.Action(context) + + a.handleExitCoder(context, err) + return err +} + +// Command returns the named command on App. Returns nil if the command does not exist +func (a *App) Command(name string) *Command { + for _, c := range a.Commands { + if c.HasName(name) { + return c + } + } + + return nil +} + +// VisibleCategories returns a slice of categories and commands that are +// Hidden=false +func (a *App) VisibleCategories() []CommandCategory { + ret := []CommandCategory{} + for _, category := range a.categories.Categories() { + if visible := func() CommandCategory { + if len(category.VisibleCommands()) > 0 { + return category + } + return nil + }(); visible != nil { + ret = append(ret, visible) + } + } + return ret +} + +// VisibleCommands returns a slice of the Commands with Hidden=false +func (a *App) VisibleCommands() []*Command { + var ret []*Command + for _, command := range a.Commands { + if !command.Hidden { + ret = append(ret, command) + } + } + return ret +} + +// VisibleFlags returns a slice of the Flags with Hidden=false +func (a *App) VisibleFlags() []Flag { + return visibleFlags(a.Flags) +} + +func (a *App) appendFlag(fl Flag) { + if !hasFlag(a.Flags, fl) { + a.Flags = append(a.Flags, fl) + } +} + +func (a *App) appendCommand(c *Command) { + if !hasCommand(a.Commands, c) { + a.Commands = append(a.Commands, c) + } +} + +func (a *App) handleExitCoder(context *Context, err error) { + if a.ExitErrHandler != nil { + a.ExitErrHandler(context, err) + } else { + HandleExitCoder(err) + } +} + +// Author represents someone who has contributed to a cli project. +type Author struct { + Name string // The Authors name + Email string // The Authors email +} + +// String makes Author comply to the Stringer interface, to allow an easy print in the templating process +func (a *Author) String() string { + e := "" + if a.Email != "" { + e = " <" + a.Email + ">" + } + + return fmt.Sprintf("%v%v", a.Name, e) +} + +// HandleAction attempts to figure out which Action signature was used. If +// it's an ActionFunc or a func with the legacy signature for Action, the func +// is run! +func HandleAction(action interface{}, context *Context) (err error) { + switch a := action.(type) { + case ActionFunc: + return a(context) + case func(*Context) error: + return a(context) + case func(*Context): // deprecated function signature + a(context) + return nil + } + + return errInvalidActionType +} diff --git a/vendor/github.com/urfave/cli/v2/args.go b/vendor/github.com/urfave/cli/v2/args.go new file mode 100644 index 000000000..bd65c17bd --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/args.go @@ -0,0 +1,54 @@ +package cli + +type Args interface { + // Get returns the nth argument, or else a blank string + Get(n int) string + // First returns the first argument, or else a blank string + First() string + // Tail returns the rest of the arguments (not the first one) + // or else an empty string slice + Tail() []string + // Len returns the length of the wrapped slice + Len() int + // Present checks if there are any arguments present + Present() bool + // Slice returns a copy of the internal slice + Slice() []string +} + +type args []string + +func (a *args) Get(n int) string { + if len(*a) > n { + return (*a)[n] + } + return "" +} + +func (a *args) First() string { + return a.Get(0) +} + +func (a *args) Tail() []string { + if a.Len() >= 2 { + tail := []string((*a)[1:]) + ret := make([]string, len(tail)) + copy(ret, tail) + return ret + } + return []string{} +} + +func (a *args) Len() int { + return len(*a) +} + +func (a *args) Present() bool { + return a.Len() != 0 +} + +func (a *args) Slice() []string { + ret := make([]string, len(*a)) + copy(ret, *a) + return ret +} diff --git a/vendor/github.com/urfave/cli/v2/category.go b/vendor/github.com/urfave/cli/v2/category.go new file mode 100644 index 000000000..867e3908c --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/category.go @@ -0,0 +1,79 @@ +package cli + +// CommandCategories interface allows for category manipulation +type CommandCategories interface { + // AddCommand adds a command to a category, creating a new category if necessary. + AddCommand(category string, command *Command) + // categories returns a copy of the category slice + Categories() []CommandCategory +} + +type commandCategories []*commandCategory + +func newCommandCategories() CommandCategories { + ret := commandCategories([]*commandCategory{}) + return &ret +} + +func (c *commandCategories) Less(i, j int) bool { + return lexicographicLess((*c)[i].Name(), (*c)[j].Name()) +} + +func (c *commandCategories) Len() int { + return len(*c) +} + +func (c *commandCategories) Swap(i, j int) { + (*c)[i], (*c)[j] = (*c)[j], (*c)[i] +} + +func (c *commandCategories) AddCommand(category string, command *Command) { + for _, commandCategory := range []*commandCategory(*c) { + if commandCategory.name == category { + commandCategory.commands = append(commandCategory.commands, command) + return + } + } + newVal := append(*c, + &commandCategory{name: category, commands: []*Command{command}}) + *c = newVal +} + +func (c *commandCategories) Categories() []CommandCategory { + ret := make([]CommandCategory, len(*c)) + for i, cat := range *c { + ret[i] = cat + } + return ret +} + +// CommandCategory is a category containing commands. +type CommandCategory interface { + // Name returns the category name string + Name() string + // VisibleCommands returns a slice of the Commands with Hidden=false + VisibleCommands() []*Command +} + +type commandCategory struct { + name string + commands []*Command +} + +func (c *commandCategory) Name() string { + return c.name +} + +func (c *commandCategory) VisibleCommands() []*Command { + if c.commands == nil { + c.commands = []*Command{} + } + + var ret []*Command + for _, command := range c.commands { + if !command.Hidden { + ret = append(ret, command) + } + } + return ret +} diff --git a/vendor/github.com/urfave/cli/v2/cli.go b/vendor/github.com/urfave/cli/v2/cli.go new file mode 100644 index 000000000..62a5bc22d --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/cli.go @@ -0,0 +1,23 @@ +// Package cli provides a minimal framework for creating and organizing command line +// Go applications. cli is designed to be easy to understand and write, the most simple +// cli application can be written as follows: +// func main() { +// (&cli.App{}).Run(os.Args) +// } +// +// Of course this application does not do much, so let's make this an actual application: +// func main() { +// app := &cli.App{ +// Name: "greet", +// Usage: "say a greeting", +// Action: func(c *cli.Context) error { +// fmt.Println("Greetings") +// return nil +// }, +// } +// +// app.Run(os.Args) +// } +package cli + +//go:generate go run flag-gen/main.go flag-gen/assets_vfsdata.go diff --git a/vendor/github.com/urfave/cli/v2/command.go b/vendor/github.com/urfave/cli/v2/command.go new file mode 100644 index 000000000..dda2f49a0 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/command.go @@ -0,0 +1,300 @@ +package cli + +import ( + "flag" + "fmt" + "sort" + "strings" +) + +// Command is a subcommand for a cli.App. +type Command struct { + // The name of the command + Name string + // A list of aliases for the command + Aliases []string + // A short description of the usage of this command + Usage string + // Custom text to show on USAGE section of help + UsageText string + // A longer explanation of how the command works + Description string + // A short description of the arguments of this command + ArgsUsage string + // The category the command is part of + Category string + // The function to call when checking for bash command completions + BashComplete BashCompleteFunc + // An action to execute before any sub-subcommands are run, but after the context is ready + // If a non-nil error is returned, no sub-subcommands are run + Before BeforeFunc + // An action to execute after any subcommands are run, but after the subcommand has finished + // It is run even if Action() panics + After AfterFunc + // The function to call when this command is invoked + Action ActionFunc + // Execute this function if a usage error occurs. + OnUsageError OnUsageErrorFunc + // List of child commands + Subcommands []*Command + // List of flags to parse + Flags []Flag + // Treat all flags as normal arguments if true + SkipFlagParsing bool + // Boolean to hide built-in help command and help flag + HideHelp bool + // Boolean to hide built-in help command but keep help flag + // Ignored if HideHelp is true. + HideHelpCommand bool + // Boolean to hide this command from help or completion + Hidden bool + // Boolean to enable short-option handling so user can combine several + // single-character bool arguments into one + // i.e. foobar -o -v -> foobar -ov + UseShortOptionHandling bool + + // Full name of command for help, defaults to full command name, including parent commands. + HelpName string + commandNamePath []string + + // CustomHelpTemplate the text template for the command help topic. + // cli.go uses text/template to render templates. You can + // render custom help text by setting this variable. + CustomHelpTemplate string +} + +type Commands []*Command + +type CommandsByName []*Command + +func (c CommandsByName) Len() int { + return len(c) +} + +func (c CommandsByName) Less(i, j int) bool { + return lexicographicLess(c[i].Name, c[j].Name) +} + +func (c CommandsByName) Swap(i, j int) { + c[i], c[j] = c[j], c[i] +} + +// FullName returns the full name of the command. +// For subcommands this ensures that parent commands are part of the command path +func (c *Command) FullName() string { + if c.commandNamePath == nil { + return c.Name + } + return strings.Join(c.commandNamePath, " ") +} + +// Run invokes the command given the context, parses ctx.Args() to generate command-specific flags +func (c *Command) Run(ctx *Context) (err error) { + if len(c.Subcommands) > 0 { + return c.startApp(ctx) + } + + if !c.HideHelp && HelpFlag != nil { + // append help to flags + c.appendFlag(HelpFlag) + } + + if ctx.App.UseShortOptionHandling { + c.UseShortOptionHandling = true + } + + set, err := c.parseFlags(ctx.Args(), ctx.shellComplete) + + context := NewContext(ctx.App, set, ctx) + context.Command = c + if checkCommandCompletions(context, c.Name) { + return nil + } + + if err != nil { + if c.OnUsageError != nil { + err = c.OnUsageError(context, err, false) + context.App.handleExitCoder(context, err) + return err + } + _, _ = fmt.Fprintln(context.App.Writer, "Incorrect Usage:", err.Error()) + _, _ = fmt.Fprintln(context.App.Writer) + _ = ShowCommandHelp(context, c.Name) + return err + } + + if checkCommandHelp(context, c.Name) { + return nil + } + + cerr := checkRequiredFlags(c.Flags, context) + if cerr != nil { + _ = ShowCommandHelp(context, c.Name) + return cerr + } + + if c.After != nil { + defer func() { + afterErr := c.After(context) + if afterErr != nil { + context.App.handleExitCoder(context, err) + if err != nil { + err = newMultiError(err, afterErr) + } else { + err = afterErr + } + } + }() + } + + if c.Before != nil { + err = c.Before(context) + if err != nil { + context.App.handleExitCoder(context, err) + return err + } + } + + if c.Action == nil { + c.Action = helpSubcommand.Action + } + + context.Command = c + err = c.Action(context) + + if err != nil { + context.App.handleExitCoder(context, err) + } + return err +} + +func (c *Command) newFlagSet() (*flag.FlagSet, error) { + return flagSet(c.Name, c.Flags) +} + +func (c *Command) useShortOptionHandling() bool { + return c.UseShortOptionHandling +} + +func (c *Command) parseFlags(args Args, shellComplete bool) (*flag.FlagSet, error) { + set, err := c.newFlagSet() + if err != nil { + return nil, err + } + + if c.SkipFlagParsing { + return set, set.Parse(append([]string{"--"}, args.Tail()...)) + } + + err = parseIter(set, c, args.Tail(), shellComplete) + if err != nil { + return nil, err + } + + err = normalizeFlags(c.Flags, set) + if err != nil { + return nil, err + } + + return set, nil +} + +// Names returns the names including short names and aliases. +func (c *Command) Names() []string { + return append([]string{c.Name}, c.Aliases...) +} + +// HasName returns true if Command.Name matches given name +func (c *Command) HasName(name string) bool { + for _, n := range c.Names() { + if n == name { + return true + } + } + return false +} + +func (c *Command) startApp(ctx *Context) error { + app := &App{ + Metadata: ctx.App.Metadata, + Name: fmt.Sprintf("%s %s", ctx.App.Name, c.Name), + } + + if c.HelpName == "" { + app.HelpName = c.HelpName + } else { + app.HelpName = app.Name + } + + app.Usage = c.Usage + app.Description = c.Description + app.ArgsUsage = c.ArgsUsage + + // set CommandNotFound + app.CommandNotFound = ctx.App.CommandNotFound + app.CustomAppHelpTemplate = c.CustomHelpTemplate + + // set the flags and commands + app.Commands = c.Subcommands + app.Flags = c.Flags + app.HideHelp = c.HideHelp + app.HideHelpCommand = c.HideHelpCommand + + app.Version = ctx.App.Version + app.HideVersion = true + app.Compiled = ctx.App.Compiled + app.Writer = ctx.App.Writer + app.ErrWriter = ctx.App.ErrWriter + app.ExitErrHandler = ctx.App.ExitErrHandler + app.UseShortOptionHandling = ctx.App.UseShortOptionHandling + + app.categories = newCommandCategories() + for _, command := range c.Subcommands { + app.categories.AddCommand(command.Category, command) + } + + sort.Sort(app.categories.(*commandCategories)) + + // bash completion + app.EnableBashCompletion = ctx.App.EnableBashCompletion + if c.BashComplete != nil { + app.BashComplete = c.BashComplete + } + + // set the actions + app.Before = c.Before + app.After = c.After + if c.Action != nil { + app.Action = c.Action + } else { + app.Action = helpSubcommand.Action + } + app.OnUsageError = c.OnUsageError + + for index, cc := range app.Commands { + app.Commands[index].commandNamePath = []string{c.Name, cc.Name} + } + + return app.RunAsSubcommand(ctx) +} + +// VisibleFlags returns a slice of the Flags with Hidden=false +func (c *Command) VisibleFlags() []Flag { + return visibleFlags(c.Flags) +} + +func (c *Command) appendFlag(fl Flag) { + if !hasFlag(c.Flags, fl) { + c.Flags = append(c.Flags, fl) + } +} + +func hasCommand(commands []*Command, command *Command) bool { + for _, existing := range commands { + if command == existing { + return true + } + } + + return false +} diff --git a/vendor/github.com/urfave/cli/v2/context.go b/vendor/github.com/urfave/cli/v2/context.go new file mode 100644 index 000000000..74ed51912 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/context.go @@ -0,0 +1,273 @@ +package cli + +import ( + "context" + "errors" + "flag" + "fmt" + "strings" +) + +// Context is a type that is passed through to +// each Handler action in a cli application. Context +// can be used to retrieve context-specific args and +// parsed command-line options. +type Context struct { + context.Context + App *App + Command *Command + shellComplete bool + flagSet *flag.FlagSet + parentContext *Context +} + +// NewContext creates a new context. For use in when invoking an App or Command action. +func NewContext(app *App, set *flag.FlagSet, parentCtx *Context) *Context { + c := &Context{App: app, flagSet: set, parentContext: parentCtx} + if parentCtx != nil { + c.Context = parentCtx.Context + c.shellComplete = parentCtx.shellComplete + if parentCtx.flagSet == nil { + parentCtx.flagSet = &flag.FlagSet{} + } + } + + c.Command = &Command{} + + if c.Context == nil { + c.Context = context.Background() + } + + return c +} + +// NumFlags returns the number of flags set +func (c *Context) NumFlags() int { + return c.flagSet.NFlag() +} + +// Set sets a context flag to a value. +func (c *Context) Set(name, value string) error { + return c.flagSet.Set(name, value) +} + +// IsSet determines if the flag was actually set +func (c *Context) IsSet(name string) bool { + if fs := lookupFlagSet(name, c); fs != nil { + if fs := lookupFlagSet(name, c); fs != nil { + isSet := false + fs.Visit(func(f *flag.Flag) { + if f.Name == name { + isSet = true + } + }) + if isSet { + return true + } + } + + f := lookupFlag(name, c) + if f == nil { + return false + } + + return f.IsSet() + } + + return false +} + +// LocalFlagNames returns a slice of flag names used in this context. +func (c *Context) LocalFlagNames() []string { + var names []string + c.flagSet.Visit(makeFlagNameVisitor(&names)) + return names +} + +// FlagNames returns a slice of flag names used by the this context and all of +// its parent contexts. +func (c *Context) FlagNames() []string { + var names []string + for _, ctx := range c.Lineage() { + ctx.flagSet.Visit(makeFlagNameVisitor(&names)) + } + return names +} + +// Lineage returns *this* context and all of its ancestor contexts in order from +// child to parent +func (c *Context) Lineage() []*Context { + var lineage []*Context + + for cur := c; cur != nil; cur = cur.parentContext { + lineage = append(lineage, cur) + } + + return lineage +} + +// Value returns the value of the flag corresponding to `name` +func (c *Context) Value(name string) interface{} { + return c.flagSet.Lookup(name).Value.(flag.Getter).Get() +} + +// Args returns the command line arguments associated with the context. +func (c *Context) Args() Args { + ret := args(c.flagSet.Args()) + return &ret +} + +// NArg returns the number of the command line arguments. +func (c *Context) NArg() int { + return c.Args().Len() +} + +func lookupFlag(name string, ctx *Context) Flag { + for _, c := range ctx.Lineage() { + if c.Command == nil { + continue + } + + for _, f := range c.Command.Flags { + for _, n := range f.Names() { + if n == name { + return f + } + } + } + } + + if ctx.App != nil { + for _, f := range ctx.App.Flags { + for _, n := range f.Names() { + if n == name { + return f + } + } + } + } + + return nil +} + +func lookupFlagSet(name string, ctx *Context) *flag.FlagSet { + for _, c := range ctx.Lineage() { + if f := c.flagSet.Lookup(name); f != nil { + return c.flagSet + } + } + + return nil +} + +func copyFlag(name string, ff *flag.Flag, set *flag.FlagSet) { + switch ff.Value.(type) { + case Serializer: + _ = set.Set(name, ff.Value.(Serializer).Serialize()) + default: + _ = set.Set(name, ff.Value.String()) + } +} + +func normalizeFlags(flags []Flag, set *flag.FlagSet) error { + visited := make(map[string]bool) + set.Visit(func(f *flag.Flag) { + visited[f.Name] = true + }) + for _, f := range flags { + parts := f.Names() + if len(parts) == 1 { + continue + } + var ff *flag.Flag + for _, name := range parts { + name = strings.Trim(name, " ") + if visited[name] { + if ff != nil { + return errors.New("Cannot use two forms of the same flag: " + name + " " + ff.Name) + } + ff = set.Lookup(name) + } + } + if ff == nil { + continue + } + for _, name := range parts { + name = strings.Trim(name, " ") + if !visited[name] { + copyFlag(name, ff, set) + } + } + } + return nil +} + +func makeFlagNameVisitor(names *[]string) func(*flag.Flag) { + return func(f *flag.Flag) { + nameParts := strings.Split(f.Name, ",") + name := strings.TrimSpace(nameParts[0]) + + for _, part := range nameParts { + part = strings.TrimSpace(part) + if len(part) > len(name) { + name = part + } + } + + if name != "" { + *names = append(*names, name) + } + } +} + +type requiredFlagsErr interface { + error + getMissingFlags() []string +} + +type errRequiredFlags struct { + missingFlags []string +} + +func (e *errRequiredFlags) Error() string { + numberOfMissingFlags := len(e.missingFlags) + if numberOfMissingFlags == 1 { + return fmt.Sprintf("Required flag %q not set", e.missingFlags[0]) + } + joinedMissingFlags := strings.Join(e.missingFlags, ", ") + return fmt.Sprintf("Required flags %q not set", joinedMissingFlags) +} + +func (e *errRequiredFlags) getMissingFlags() []string { + return e.missingFlags +} + +func checkRequiredFlags(flags []Flag, context *Context) requiredFlagsErr { + var missingFlags []string + for _, f := range flags { + if rf, ok := f.(RequiredFlag); ok && rf.IsRequired() { + var flagPresent bool + var flagName string + + for _, key := range f.Names() { + if len(key) > 1 { + flagName = key + } + + if context.IsSet(strings.TrimSpace(key)) { + flagPresent = true + } + } + + if !flagPresent && flagName != "" { + missingFlags = append(missingFlags, flagName) + } + } + } + + if len(missingFlags) != 0 { + return &errRequiredFlags{missingFlags: missingFlags} + } + + return nil +} diff --git a/vendor/github.com/urfave/cli/v2/docs.go b/vendor/github.com/urfave/cli/v2/docs.go new file mode 100644 index 000000000..dc16fc82d --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/docs.go @@ -0,0 +1,148 @@ +package cli + +import ( + "bytes" + "fmt" + "io" + "sort" + "strings" + "text/template" + + "github.com/cpuguy83/go-md2man/v2/md2man" +) + +// ToMarkdown creates a markdown string for the `*App` +// The function errors if either parsing or writing of the string fails. +func (a *App) ToMarkdown() (string, error) { + var w bytes.Buffer + if err := a.writeDocTemplate(&w); err != nil { + return "", err + } + return w.String(), nil +} + +// ToMan creates a man page string for the `*App` +// The function errors if either parsing or writing of the string fails. +func (a *App) ToMan() (string, error) { + var w bytes.Buffer + if err := a.writeDocTemplate(&w); err != nil { + return "", err + } + man := md2man.Render(w.Bytes()) + return string(man), nil +} + +type cliTemplate struct { + App *App + Commands []string + GlobalArgs []string + SynopsisArgs []string +} + +func (a *App) writeDocTemplate(w io.Writer) error { + const name = "cli" + t, err := template.New(name).Parse(MarkdownDocTemplate) + if err != nil { + return err + } + return t.ExecuteTemplate(w, name, &cliTemplate{ + App: a, + Commands: prepareCommands(a.Commands, 0), + GlobalArgs: prepareArgsWithValues(a.VisibleFlags()), + SynopsisArgs: prepareArgsSynopsis(a.VisibleFlags()), + }) +} + +func prepareCommands(commands []*Command, level int) []string { + var coms []string + for _, command := range commands { + if command.Hidden { + continue + } + usage := "" + if command.Usage != "" { + usage = command.Usage + } + + prepared := fmt.Sprintf("%s %s\n\n%s\n", + strings.Repeat("#", level+2), + strings.Join(command.Names(), ", "), + usage, + ) + + flags := prepareArgsWithValues(command.Flags) + if len(flags) > 0 { + prepared += fmt.Sprintf("\n%s", strings.Join(flags, "\n")) + } + + coms = append(coms, prepared) + + // recursevly iterate subcommands + if len(command.Subcommands) > 0 { + coms = append( + coms, + prepareCommands(command.Subcommands, level+1)..., + ) + } + } + + return coms +} + +func prepareArgsWithValues(flags []Flag) []string { + return prepareFlags(flags, ", ", "**", "**", `""`, true) +} + +func prepareArgsSynopsis(flags []Flag) []string { + return prepareFlags(flags, "|", "[", "]", "[value]", false) +} + +func prepareFlags( + flags []Flag, + sep, opener, closer, value string, + addDetails bool, +) []string { + args := []string{} + for _, f := range flags { + flag, ok := f.(DocGenerationFlag) + if !ok { + continue + } + modifiedArg := opener + + for _, s := range flag.Names() { + trimmed := strings.TrimSpace(s) + if len(modifiedArg) > len(opener) { + modifiedArg += sep + } + if len(trimmed) > 1 { + modifiedArg += fmt.Sprintf("--%s", trimmed) + } else { + modifiedArg += fmt.Sprintf("-%s", trimmed) + } + } + modifiedArg += closer + if flag.TakesValue() { + modifiedArg += fmt.Sprintf("=%s", value) + } + + if addDetails { + modifiedArg += flagDetails(flag) + } + + args = append(args, modifiedArg+"\n") + + } + sort.Strings(args) + return args +} + +// flagDetails returns a string containing the flags metadata +func flagDetails(flag DocGenerationFlag) string { + description := flag.GetUsage() + value := flag.GetValue() + if value != "" { + description += " (default: " + value + ")" + } + return ": " + description +} diff --git a/vendor/github.com/urfave/cli/v2/errors.go b/vendor/github.com/urfave/cli/v2/errors.go new file mode 100644 index 000000000..751ef9b16 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/errors.go @@ -0,0 +1,141 @@ +package cli + +import ( + "fmt" + "io" + "os" + "strings" +) + +// OsExiter is the function used when the app exits. If not set defaults to os.Exit. +var OsExiter = os.Exit + +// ErrWriter is used to write errors to the user. This can be anything +// implementing the io.Writer interface and defaults to os.Stderr. +var ErrWriter io.Writer = os.Stderr + +// MultiError is an error that wraps multiple errors. +type MultiError interface { + error + Errors() []error +} + +// newMultiError creates a new MultiError. Pass in one or more errors. +func newMultiError(err ...error) MultiError { + ret := multiError(err) + return &ret +} + +type multiError []error + +// Error implements the error interface. +func (m *multiError) Error() string { + errs := make([]string, len(*m)) + for i, err := range *m { + errs[i] = err.Error() + } + + return strings.Join(errs, "\n") +} + +// Errors returns a copy of the errors slice +func (m *multiError) Errors() []error { + errs := make([]error, len(*m)) + for _, err := range *m { + errs = append(errs, err) + } + return errs +} + +// ErrorFormatter is the interface that will suitably format the error output +type ErrorFormatter interface { + Format(s fmt.State, verb rune) +} + +// ExitCoder is the interface checked by `App` and `Command` for a custom exit +// code +type ExitCoder interface { + error + ExitCode() int +} + +type exitError struct { + exitCode int + message interface{} +} + +// NewExitError calls Exit to create a new ExitCoder. +// +// Deprecated: This function is a duplicate of Exit and will eventually be removed. +func NewExitError(message interface{}, exitCode int) ExitCoder { + return Exit(message, exitCode) +} + +// Exit wraps a message and exit code into an error, which by default is +// handled with a call to os.Exit during default error handling. +// +// This is the simplest way to trigger a non-zero exit code for an App without +// having to call os.Exit manually. During testing, this behavior can be avoided +// by overiding the ExitErrHandler function on an App or the package-global +// OsExiter function. +func Exit(message interface{}, exitCode int) ExitCoder { + return &exitError{ + message: message, + exitCode: exitCode, + } +} + +func (ee *exitError) Error() string { + return fmt.Sprintf("%v", ee.message) +} + +func (ee *exitError) ExitCode() int { + return ee.exitCode +} + +// HandleExitCoder handles errors implementing ExitCoder by printing their +// message and calling OsExiter with the given exit code. +// +// If the given error instead implements MultiError, each error will be checked +// for the ExitCoder interface, and OsExiter will be called with the last exit +// code found, or exit code 1 if no ExitCoder is found. +// +// This function is the default error-handling behavior for an App. +func HandleExitCoder(err error) { + if err == nil { + return + } + + if exitErr, ok := err.(ExitCoder); ok { + if err.Error() != "" { + if _, ok := exitErr.(ErrorFormatter); ok { + _, _ = fmt.Fprintf(ErrWriter, "%+v\n", err) + } else { + _, _ = fmt.Fprintln(ErrWriter, err) + } + } + OsExiter(exitErr.ExitCode()) + return + } + + if multiErr, ok := err.(MultiError); ok { + code := handleMultiError(multiErr) + OsExiter(code) + return + } +} + +func handleMultiError(multiErr MultiError) int { + code := 1 + for _, merr := range multiErr.Errors() { + if multiErr2, ok := merr.(MultiError); ok { + code = handleMultiError(multiErr2) + } else if merr != nil { + fmt.Fprintln(ErrWriter, merr) + if exitErr, ok := merr.(ExitCoder); ok { + code = exitErr.ExitCode() + } + } + } + return code +} diff --git a/vendor/github.com/urfave/cli/v2/fish.go b/vendor/github.com/urfave/cli/v2/fish.go new file mode 100644 index 000000000..588e070ea --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/fish.go @@ -0,0 +1,196 @@ +package cli + +import ( + "bytes" + "fmt" + "io" + "strings" + "text/template" +) + +// ToFishCompletion creates a fish completion string for the `*App` +// The function errors if either parsing or writing of the string fails. +func (a *App) ToFishCompletion() (string, error) { + var w bytes.Buffer + if err := a.writeFishCompletionTemplate(&w); err != nil { + return "", err + } + return w.String(), nil +} + +type fishCompletionTemplate struct { + App *App + Completions []string + AllCommands []string +} + +func (a *App) writeFishCompletionTemplate(w io.Writer) error { + const name = "cli" + t, err := template.New(name).Parse(FishCompletionTemplate) + if err != nil { + return err + } + allCommands := []string{} + + // Add global flags + completions := a.prepareFishFlags(a.VisibleFlags(), allCommands) + + // Add help flag + if !a.HideHelp { + completions = append( + completions, + a.prepareFishFlags([]Flag{HelpFlag}, allCommands)..., + ) + } + + // Add version flag + if !a.HideVersion { + completions = append( + completions, + a.prepareFishFlags([]Flag{VersionFlag}, allCommands)..., + ) + } + + // Add commands and their flags + completions = append( + completions, + a.prepareFishCommands(a.VisibleCommands(), &allCommands, []string{})..., + ) + + return t.ExecuteTemplate(w, name, &fishCompletionTemplate{ + App: a, + Completions: completions, + AllCommands: allCommands, + }) +} + +func (a *App) prepareFishCommands(commands []*Command, allCommands *[]string, previousCommands []string) []string { + completions := []string{} + for _, command := range commands { + if command.Hidden { + continue + } + + var completion strings.Builder + completion.WriteString(fmt.Sprintf( + "complete -r -c %s -n '%s' -a '%s'", + a.Name, + a.fishSubcommandHelper(previousCommands), + strings.Join(command.Names(), " "), + )) + + if command.Usage != "" { + completion.WriteString(fmt.Sprintf(" -d '%s'", + escapeSingleQuotes(command.Usage))) + } + + if !command.HideHelp { + completions = append( + completions, + a.prepareFishFlags([]Flag{HelpFlag}, command.Names())..., + ) + } + + *allCommands = append(*allCommands, command.Names()...) + completions = append(completions, completion.String()) + completions = append( + completions, + a.prepareFishFlags(command.Flags, command.Names())..., + ) + + // recursevly iterate subcommands + if len(command.Subcommands) > 0 { + completions = append( + completions, + a.prepareFishCommands( + command.Subcommands, allCommands, command.Names(), + )..., + ) + } + } + + return completions +} + +func (a *App) prepareFishFlags(flags []Flag, previousCommands []string) []string { + completions := []string{} + for _, f := range flags { + flag, ok := f.(DocGenerationFlag) + if !ok { + continue + } + + completion := &strings.Builder{} + completion.WriteString(fmt.Sprintf( + "complete -c %s -n '%s'", + a.Name, + a.fishSubcommandHelper(previousCommands), + )) + + fishAddFileFlag(f, completion) + + for idx, opt := range flag.Names() { + if idx == 0 { + completion.WriteString(fmt.Sprintf( + " -l %s", strings.TrimSpace(opt), + )) + } else { + completion.WriteString(fmt.Sprintf( + " -s %s", strings.TrimSpace(opt), + )) + + } + } + + if flag.TakesValue() { + completion.WriteString(" -r") + } + + if flag.GetUsage() != "" { + completion.WriteString(fmt.Sprintf(" -d '%s'", + escapeSingleQuotes(flag.GetUsage()))) + } + + completions = append(completions, completion.String()) + } + + return completions +} + +func fishAddFileFlag(flag Flag, completion *strings.Builder) { + switch f := flag.(type) { + case *GenericFlag: + if f.TakesFile { + return + } + case *StringFlag: + if f.TakesFile { + return + } + case *StringSliceFlag: + if f.TakesFile { + return + } + case *PathFlag: + if f.TakesFile { + return + } + } + completion.WriteString(" -f") +} + +func (a *App) fishSubcommandHelper(allCommands []string) string { + fishHelper := fmt.Sprintf("__fish_%s_no_subcommand", a.Name) + if len(allCommands) > 0 { + fishHelper = fmt.Sprintf( + "__fish_seen_subcommand_from %s", + strings.Join(allCommands, " "), + ) + } + return fishHelper + +} + +func escapeSingleQuotes(input string) string { + return strings.Replace(input, `'`, `\'`, -1) +} diff --git a/vendor/github.com/urfave/cli/v2/flag.go b/vendor/github.com/urfave/cli/v2/flag.go new file mode 100644 index 000000000..ad97c2d05 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/flag.go @@ -0,0 +1,388 @@ +package cli + +import ( + "flag" + "fmt" + "io/ioutil" + "reflect" + "regexp" + "runtime" + "strconv" + "strings" + "syscall" + "time" +) + +const defaultPlaceholder = "value" + +var ( + slPfx = fmt.Sprintf("sl:::%d:::", time.Now().UTC().UnixNano()) + + commaWhitespace = regexp.MustCompile("[, ]+.*") +) + +// BashCompletionFlag enables bash-completion for all commands and subcommands +var BashCompletionFlag Flag = &BoolFlag{ + Name: "generate-bash-completion", + Hidden: true, +} + +// VersionFlag prints the version for the application +var VersionFlag Flag = &BoolFlag{ + Name: "version", + Aliases: []string{"v"}, + Usage: "print the version", +} + +// HelpFlag prints the help for all commands and subcommands. +// Set to nil to disable the flag. The subcommand +// will still be added unless HideHelp or HideHelpCommand is set to true. +var HelpFlag Flag = &BoolFlag{ + Name: "help", + Aliases: []string{"h"}, + Usage: "show help", +} + +// FlagStringer converts a flag definition to a string. This is used by help +// to display a flag. +var FlagStringer FlagStringFunc = stringifyFlag + +// Serializer is used to circumvent the limitations of flag.FlagSet.Set +type Serializer interface { + Serialize() string +} + +// FlagNamePrefixer converts a full flag name and its placeholder into the help +// message flag prefix. This is used by the default FlagStringer. +var FlagNamePrefixer FlagNamePrefixFunc = prefixedNames + +// FlagEnvHinter annotates flag help message with the environment variable +// details. This is used by the default FlagStringer. +var FlagEnvHinter FlagEnvHintFunc = withEnvHint + +// FlagFileHinter annotates flag help message with the environment variable +// details. This is used by the default FlagStringer. +var FlagFileHinter FlagFileHintFunc = withFileHint + +// FlagsByName is a slice of Flag. +type FlagsByName []Flag + +func (f FlagsByName) Len() int { + return len(f) +} + +func (f FlagsByName) Less(i, j int) bool { + if len(f[j].Names()) == 0 { + return false + } else if len(f[i].Names()) == 0 { + return true + } + return lexicographicLess(f[i].Names()[0], f[j].Names()[0]) +} + +func (f FlagsByName) Swap(i, j int) { + f[i], f[j] = f[j], f[i] +} + +// Flag is a common interface related to parsing flags in cli. +// For more advanced flag parsing techniques, it is recommended that +// this interface be implemented. +type Flag interface { + fmt.Stringer + // Apply Flag settings to the given flag set + Apply(*flag.FlagSet) error + Names() []string + IsSet() bool +} + +// RequiredFlag is an interface that allows us to mark flags as required +// it allows flags required flags to be backwards compatible with the Flag interface +type RequiredFlag interface { + Flag + + IsRequired() bool +} + +// DocGenerationFlag is an interface that allows documentation generation for the flag +type DocGenerationFlag interface { + Flag + + // TakesValue returns true if the flag takes a value, otherwise false + TakesValue() bool + + // GetUsage returns the usage string for the flag + GetUsage() string + + // GetValue returns the flags value as string representation and an empty + // string if the flag takes no value at all. + GetValue() string +} + +func flagSet(name string, flags []Flag) (*flag.FlagSet, error) { + set := flag.NewFlagSet(name, flag.ContinueOnError) + + for _, f := range flags { + if err := f.Apply(set); err != nil { + return nil, err + } + } + set.SetOutput(ioutil.Discard) + return set, nil +} + +func visibleFlags(fl []Flag) []Flag { + var visible []Flag + for _, f := range fl { + field := flagValue(f).FieldByName("Hidden") + if !field.IsValid() || !field.Bool() { + visible = append(visible, f) + } + } + return visible +} + +func prefixFor(name string) (prefix string) { + if len(name) == 1 { + prefix = "-" + } else { + prefix = "--" + } + + return +} + +// Returns the placeholder, if any, and the unquoted usage string. +func unquoteUsage(usage string) (string, string) { + for i := 0; i < len(usage); i++ { + if usage[i] == '`' { + for j := i + 1; j < len(usage); j++ { + if usage[j] == '`' { + name := usage[i+1 : j] + usage = usage[:i] + name + usage[j+1:] + return name, usage + } + } + break + } + } + return "", usage +} + +func prefixedNames(names []string, placeholder string) string { + var prefixed string + for i, name := range names { + if name == "" { + continue + } + + prefixed += prefixFor(name) + name + if placeholder != "" { + prefixed += " " + placeholder + } + if i < len(names)-1 { + prefixed += ", " + } + } + return prefixed +} + +func withEnvHint(envVars []string, str string) string { + envText := "" + if envVars != nil && len(envVars) > 0 { + prefix := "$" + suffix := "" + sep := ", $" + if runtime.GOOS == "windows" { + prefix = "%" + suffix = "%" + sep = "%, %" + } + + envText = fmt.Sprintf(" [%s%s%s]", prefix, strings.Join(envVars, sep), suffix) + } + return str + envText +} + +func flagNames(name string, aliases []string) []string { + var ret []string + + for _, part := range append([]string{name}, aliases...) { + // v1 -> v2 migration warning zone: + // Strip off anything after the first found comma or space, which + // *hopefully* makes it a tiny bit more obvious that unexpected behavior is + // caused by using the v1 form of stringly typed "Name". + ret = append(ret, commaWhitespace.ReplaceAllString(part, "")) + } + + return ret +} + +func flagStringSliceField(f Flag, name string) []string { + fv := flagValue(f) + field := fv.FieldByName(name) + + if field.IsValid() { + return field.Interface().([]string) + } + + return []string{} +} + +func withFileHint(filePath, str string) string { + fileText := "" + if filePath != "" { + fileText = fmt.Sprintf(" [%s]", filePath) + } + return str + fileText +} + +func flagValue(f Flag) reflect.Value { + fv := reflect.ValueOf(f) + for fv.Kind() == reflect.Ptr { + fv = reflect.Indirect(fv) + } + return fv +} + +func formatDefault(format string) string { + return " (default: " + format + ")" +} + +func stringifyFlag(f Flag) string { + fv := flagValue(f) + + switch f := f.(type) { + case *IntSliceFlag: + return withEnvHint(flagStringSliceField(f, "EnvVars"), + stringifyIntSliceFlag(f)) + case *Int64SliceFlag: + return withEnvHint(flagStringSliceField(f, "EnvVars"), + stringifyInt64SliceFlag(f)) + case *Float64SliceFlag: + return withEnvHint(flagStringSliceField(f, "EnvVars"), + stringifyFloat64SliceFlag(f)) + case *StringSliceFlag: + return withEnvHint(flagStringSliceField(f, "EnvVars"), + stringifyStringSliceFlag(f)) + } + + placeholder, usage := unquoteUsage(fv.FieldByName("Usage").String()) + + needsPlaceholder := false + defaultValueString := "" + val := fv.FieldByName("Value") + if val.IsValid() { + needsPlaceholder = val.Kind() != reflect.Bool + defaultValueString = fmt.Sprintf(formatDefault("%v"), val.Interface()) + + if val.Kind() == reflect.String && val.String() != "" { + defaultValueString = fmt.Sprintf(formatDefault("%q"), val.String()) + } + } + + helpText := fv.FieldByName("DefaultText") + if helpText.IsValid() && helpText.String() != "" { + needsPlaceholder = val.Kind() != reflect.Bool + defaultValueString = fmt.Sprintf(formatDefault("%s"), helpText.String()) + } + + if defaultValueString == formatDefault("") { + defaultValueString = "" + } + + if needsPlaceholder && placeholder == "" { + placeholder = defaultPlaceholder + } + + usageWithDefault := strings.TrimSpace(usage + defaultValueString) + + return withEnvHint(flagStringSliceField(f, "EnvVars"), + fmt.Sprintf("%s\t%s", prefixedNames(f.Names(), placeholder), usageWithDefault)) +} + +func stringifyIntSliceFlag(f *IntSliceFlag) string { + var defaultVals []string + if f.Value != nil && len(f.Value.Value()) > 0 { + for _, i := range f.Value.Value() { + defaultVals = append(defaultVals, strconv.Itoa(i)) + } + } + + return stringifySliceFlag(f.Usage, f.Names(), defaultVals) +} + +func stringifyInt64SliceFlag(f *Int64SliceFlag) string { + var defaultVals []string + if f.Value != nil && len(f.Value.Value()) > 0 { + for _, i := range f.Value.Value() { + defaultVals = append(defaultVals, strconv.FormatInt(i, 10)) + } + } + + return stringifySliceFlag(f.Usage, f.Names(), defaultVals) +} + +func stringifyFloat64SliceFlag(f *Float64SliceFlag) string { + var defaultVals []string + + if f.Value != nil && len(f.Value.Value()) > 0 { + for _, i := range f.Value.Value() { + defaultVals = append(defaultVals, strings.TrimRight(strings.TrimRight(fmt.Sprintf("%f", i), "0"), ".")) + } + } + + return stringifySliceFlag(f.Usage, f.Names(), defaultVals) +} + +func stringifyStringSliceFlag(f *StringSliceFlag) string { + var defaultVals []string + if f.Value != nil && len(f.Value.Value()) > 0 { + for _, s := range f.Value.Value() { + if len(s) > 0 { + defaultVals = append(defaultVals, strconv.Quote(s)) + } + } + } + + return stringifySliceFlag(f.Usage, f.Names(), defaultVals) +} + +func stringifySliceFlag(usage string, names, defaultVals []string) string { + placeholder, usage := unquoteUsage(usage) + if placeholder == "" { + placeholder = defaultPlaceholder + } + + defaultVal := "" + if len(defaultVals) > 0 { + defaultVal = fmt.Sprintf(formatDefault("%s"), strings.Join(defaultVals, ", ")) + } + + usageWithDefault := strings.TrimSpace(fmt.Sprintf("%s%s", usage, defaultVal)) + return fmt.Sprintf("%s\t%s", prefixedNames(names, placeholder), usageWithDefault) +} + +func hasFlag(flags []Flag, fl Flag) bool { + for _, existing := range flags { + if fl == existing { + return true + } + } + + return false +} + +func flagFromEnvOrFile(envVars []string, filePath string) (val string, ok bool) { + for _, envVar := range envVars { + envVar = strings.TrimSpace(envVar) + if val, ok := syscall.Getenv(envVar); ok { + return val, true + } + } + for _, fileVar := range strings.Split(filePath, ",") { + if data, err := ioutil.ReadFile(fileVar); err == nil { + return string(data), true + } + } + return "", false +} diff --git a/vendor/github.com/urfave/cli/v2/flag_bool.go b/vendor/github.com/urfave/cli/v2/flag_bool.go new file mode 100644 index 000000000..bc9ea35d0 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/flag_bool.go @@ -0,0 +1,106 @@ +package cli + +import ( + "flag" + "fmt" + "strconv" +) + +// BoolFlag is a flag with type bool +type BoolFlag struct { + Name string + Aliases []string + Usage string + EnvVars []string + FilePath string + Required bool + Hidden bool + Value bool + DefaultText string + Destination *bool + HasBeenSet bool +} + +// IsSet returns whether or not the flag has been set through env or file +func (f *BoolFlag) IsSet() bool { + return f.HasBeenSet +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f *BoolFlag) String() string { + return FlagStringer(f) +} + +// Names returns the names of the flag +func (f *BoolFlag) Names() []string { + return flagNames(f.Name, f.Aliases) +} + +// IsRequired returns whether or not the flag is required +func (f *BoolFlag) IsRequired() bool { + return f.Required +} + +// TakesValue returns true of the flag takes a value, otherwise false +func (f *BoolFlag) TakesValue() bool { + return false +} + +// GetUsage returns the usage string for the flag +func (f *BoolFlag) GetUsage() string { + return f.Usage +} + +// GetValue returns the flags value as string representation and an empty +// string if the flag takes no value at all. +func (f *BoolFlag) GetValue() string { + return "" +} + +// Apply populates the flag given the flag set and environment +func (f *BoolFlag) Apply(set *flag.FlagSet) error { + if val, ok := flagFromEnvOrFile(f.EnvVars, f.FilePath); ok { + if val != "" { + valBool, err := strconv.ParseBool(val) + + if err != nil { + return fmt.Errorf("could not parse %q as bool value for flag %s: %s", val, f.Name, err) + } + + f.Value = valBool + f.HasBeenSet = true + } + } + + for _, name := range f.Names() { + if f.Destination != nil { + set.BoolVar(f.Destination, name, f.Value, f.Usage) + continue + } + set.Bool(name, f.Value, f.Usage) + } + + return nil +} + +// Bool looks up the value of a local BoolFlag, returns +// false if not found +func (c *Context) Bool(name string) bool { + if fs := lookupFlagSet(name, c); fs != nil { + return lookupBool(name, fs) + } + return false +} + +func lookupBool(name string, set *flag.FlagSet) bool { + f := set.Lookup(name) + if f != nil { + parsed, err := strconv.ParseBool(f.Value.String()) + if err != nil { + return false + } + return parsed + } + return false +} diff --git a/vendor/github.com/urfave/cli/v2/flag_duration.go b/vendor/github.com/urfave/cli/v2/flag_duration.go new file mode 100644 index 000000000..22a2e6720 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/flag_duration.go @@ -0,0 +1,105 @@ +package cli + +import ( + "flag" + "fmt" + "time" +) + +// DurationFlag is a flag with type time.Duration (see https://golang.org/pkg/time/#ParseDuration) +type DurationFlag struct { + Name string + Aliases []string + Usage string + EnvVars []string + FilePath string + Required bool + Hidden bool + Value time.Duration + DefaultText string + Destination *time.Duration + HasBeenSet bool +} + +// IsSet returns whether or not the flag has been set through env or file +func (f *DurationFlag) IsSet() bool { + return f.HasBeenSet +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f *DurationFlag) String() string { + return FlagStringer(f) +} + +// Names returns the names of the flag +func (f *DurationFlag) Names() []string { + return flagNames(f.Name, f.Aliases) +} + +// IsRequired returns whether or not the flag is required +func (f *DurationFlag) IsRequired() bool { + return f.Required +} + +// TakesValue returns true of the flag takes a value, otherwise false +func (f *DurationFlag) TakesValue() bool { + return true +} + +// GetUsage returns the usage string for the flag +func (f *DurationFlag) GetUsage() string { + return f.Usage +} + +// GetValue returns the flags value as string representation and an empty +// string if the flag takes no value at all. +func (f *DurationFlag) GetValue() string { + return f.Value.String() +} + +// Apply populates the flag given the flag set and environment +func (f *DurationFlag) Apply(set *flag.FlagSet) error { + if val, ok := flagFromEnvOrFile(f.EnvVars, f.FilePath); ok { + if val != "" { + valDuration, err := time.ParseDuration(val) + + if err != nil { + return fmt.Errorf("could not parse %q as duration value for flag %s: %s", val, f.Name, err) + } + + f.Value = valDuration + f.HasBeenSet = true + } + } + + for _, name := range f.Names() { + if f.Destination != nil { + set.DurationVar(f.Destination, name, f.Value, f.Usage) + continue + } + set.Duration(name, f.Value, f.Usage) + } + return nil +} + +// Duration looks up the value of a local DurationFlag, returns +// 0 if not found +func (c *Context) Duration(name string) time.Duration { + if fs := lookupFlagSet(name, c); fs != nil { + return lookupDuration(name, fs) + } + return 0 +} + +func lookupDuration(name string, set *flag.FlagSet) time.Duration { + f := set.Lookup(name) + if f != nil { + parsed, err := time.ParseDuration(f.Value.String()) + if err != nil { + return 0 + } + return parsed + } + return 0 +} diff --git a/vendor/github.com/urfave/cli/v2/flag_float64.go b/vendor/github.com/urfave/cli/v2/flag_float64.go new file mode 100644 index 000000000..91c778c87 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/flag_float64.go @@ -0,0 +1,106 @@ +package cli + +import ( + "flag" + "fmt" + "strconv" +) + +// Float64Flag is a flag with type float64 +type Float64Flag struct { + Name string + Aliases []string + Usage string + EnvVars []string + FilePath string + Required bool + Hidden bool + Value float64 + DefaultText string + Destination *float64 + HasBeenSet bool +} + +// IsSet returns whether or not the flag has been set through env or file +func (f *Float64Flag) IsSet() bool { + return f.HasBeenSet +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f *Float64Flag) String() string { + return FlagStringer(f) +} + +// Names returns the names of the flag +func (f *Float64Flag) Names() []string { + return flagNames(f.Name, f.Aliases) +} + +// IsRequired returns whether or not the flag is required +func (f *Float64Flag) IsRequired() bool { + return f.Required +} + +// TakesValue returns true of the flag takes a value, otherwise false +func (f *Float64Flag) TakesValue() bool { + return true +} + +// GetUsage returns the usage string for the flag +func (f *Float64Flag) GetUsage() string { + return f.Usage +} + +// GetValue returns the flags value as string representation and an empty +// string if the flag takes no value at all. +func (f *Float64Flag) GetValue() string { + return fmt.Sprintf("%f", f.Value) +} + +// Apply populates the flag given the flag set and environment +func (f *Float64Flag) Apply(set *flag.FlagSet) error { + if val, ok := flagFromEnvOrFile(f.EnvVars, f.FilePath); ok { + if val != "" { + valFloat, err := strconv.ParseFloat(val, 10) + + if err != nil { + return fmt.Errorf("could not parse %q as float64 value for flag %s: %s", val, f.Name, err) + } + + f.Value = valFloat + f.HasBeenSet = true + } + } + + for _, name := range f.Names() { + if f.Destination != nil { + set.Float64Var(f.Destination, name, f.Value, f.Usage) + continue + } + set.Float64(name, f.Value, f.Usage) + } + + return nil +} + +// Float64 looks up the value of a local Float64Flag, returns +// 0 if not found +func (c *Context) Float64(name string) float64 { + if fs := lookupFlagSet(name, c); fs != nil { + return lookupFloat64(name, fs) + } + return 0 +} + +func lookupFloat64(name string, set *flag.FlagSet) float64 { + f := set.Lookup(name) + if f != nil { + parsed, err := strconv.ParseFloat(f.Value.String(), 64) + if err != nil { + return 0 + } + return parsed + } + return 0 +} diff --git a/vendor/github.com/urfave/cli/v2/flag_float64_slice.go b/vendor/github.com/urfave/cli/v2/flag_float64_slice.go new file mode 100644 index 000000000..706ee6cd4 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/flag_float64_slice.go @@ -0,0 +1,163 @@ +package cli + +import ( + "encoding/json" + "flag" + "fmt" + "strconv" + "strings" +) + +// Float64Slice wraps []float64 to satisfy flag.Value +type Float64Slice struct { + slice []float64 + hasBeenSet bool +} + +// NewFloat64Slice makes a *Float64Slice with default values +func NewFloat64Slice(defaults ...float64) *Float64Slice { + return &Float64Slice{slice: append([]float64{}, defaults...)} +} + +// Set parses the value into a float64 and appends it to the list of values +func (f *Float64Slice) Set(value string) error { + if !f.hasBeenSet { + f.slice = []float64{} + f.hasBeenSet = true + } + + if strings.HasPrefix(value, slPfx) { + // Deserializing assumes overwrite + _ = json.Unmarshal([]byte(strings.Replace(value, slPfx, "", 1)), &f.slice) + f.hasBeenSet = true + return nil + } + + tmp, err := strconv.ParseFloat(value, 64) + if err != nil { + return err + } + + f.slice = append(f.slice, tmp) + return nil +} + +// String returns a readable representation of this value (for usage defaults) +func (f *Float64Slice) String() string { + return fmt.Sprintf("%#v", f.slice) +} + +// Serialize allows Float64Slice to fulfill Serializer +func (f *Float64Slice) Serialize() string { + jsonBytes, _ := json.Marshal(f.slice) + return fmt.Sprintf("%s%s", slPfx, string(jsonBytes)) +} + +// Value returns the slice of float64s set by this flag +func (f *Float64Slice) Value() []float64 { + return f.slice +} + +// Get returns the slice of float64s set by this flag +func (f *Float64Slice) Get() interface{} { + return *f +} + +// Float64SliceFlag is a flag with type *Float64Slice +type Float64SliceFlag struct { + Name string + Aliases []string + Usage string + EnvVars []string + FilePath string + Required bool + Hidden bool + Value *Float64Slice + DefaultText string + HasBeenSet bool +} + +// IsSet returns whether or not the flag has been set through env or file +func (f *Float64SliceFlag) IsSet() bool { + return f.HasBeenSet +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f *Float64SliceFlag) String() string { + return FlagStringer(f) +} + +// Names returns the names of the flag +func (f *Float64SliceFlag) Names() []string { + return flagNames(f.Name, f.Aliases) +} + +// IsRequired returns whether or not the flag is required +func (f *Float64SliceFlag) IsRequired() bool { + return f.Required +} + +// TakesValue returns true if the flag takes a value, otherwise false +func (f *Float64SliceFlag) TakesValue() bool { + return true +} + +// GetUsage returns the usage string for the flag +func (f *Float64SliceFlag) GetUsage() string { + return f.Usage +} + +// GetValue returns the flags value as string representation and an empty +// string if the flag takes no value at all. +func (f *Float64SliceFlag) GetValue() string { + if f.Value != nil { + return f.Value.String() + } + return "" +} + +// Apply populates the flag given the flag set and environment +func (f *Float64SliceFlag) Apply(set *flag.FlagSet) error { + if val, ok := flagFromEnvOrFile(f.EnvVars, f.FilePath); ok { + if val != "" { + f.Value = &Float64Slice{} + + for _, s := range strings.Split(val, ",") { + if err := f.Value.Set(strings.TrimSpace(s)); err != nil { + return fmt.Errorf("could not parse %q as float64 slice value for flag %s: %s", f.Value, f.Name, err) + } + } + + f.HasBeenSet = true + } + } + + for _, name := range f.Names() { + if f.Value == nil { + f.Value = &Float64Slice{} + } + set.Var(f.Value, name, f.Usage) + } + + return nil +} + +// Float64Slice looks up the value of a local Float64SliceFlag, returns +// nil if not found +func (c *Context) Float64Slice(name string) []float64 { + if fs := lookupFlagSet(name, c); fs != nil { + return lookupFloat64Slice(name, fs) + } + return nil +} + +func lookupFloat64Slice(name string, set *flag.FlagSet) []float64 { + f := set.Lookup(name) + if f != nil { + if slice, ok := f.Value.(*Float64Slice); ok { + return slice.Value() + } + } + return nil +} diff --git a/vendor/github.com/urfave/cli/v2/flag_generic.go b/vendor/github.com/urfave/cli/v2/flag_generic.go new file mode 100644 index 000000000..b0c8ff44d --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/flag_generic.go @@ -0,0 +1,108 @@ +package cli + +import ( + "flag" + "fmt" +) + +// Generic is a generic parseable type identified by a specific flag +type Generic interface { + Set(value string) error + String() string +} + +// GenericFlag is a flag with type Generic +type GenericFlag struct { + Name string + Aliases []string + Usage string + EnvVars []string + FilePath string + Required bool + Hidden bool + TakesFile bool + Value Generic + DefaultText string + HasBeenSet bool +} + +// IsSet returns whether or not the flag has been set through env or file +func (f *GenericFlag) IsSet() bool { + return f.HasBeenSet +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f *GenericFlag) String() string { + return FlagStringer(f) +} + +// Names returns the names of the flag +func (f *GenericFlag) Names() []string { + return flagNames(f.Name, f.Aliases) +} + +// IsRequired returns whether or not the flag is required +func (f *GenericFlag) IsRequired() bool { + return f.Required +} + +// TakesValue returns true of the flag takes a value, otherwise false +func (f *GenericFlag) TakesValue() bool { + return true +} + +// GetUsage returns the usage string for the flag +func (f *GenericFlag) GetUsage() string { + return f.Usage +} + +// GetValue returns the flags value as string representation and an empty +// string if the flag takes no value at all. +func (f *GenericFlag) GetValue() string { + if f.Value != nil { + return f.Value.String() + } + return "" +} + +// Apply takes the flagset and calls Set on the generic flag with the value +// provided by the user for parsing by the flag +func (f GenericFlag) Apply(set *flag.FlagSet) error { + if val, ok := flagFromEnvOrFile(f.EnvVars, f.FilePath); ok { + if val != "" { + if err := f.Value.Set(val); err != nil { + return fmt.Errorf("could not parse %q as value for flag %s: %s", val, f.Name, err) + } + + f.HasBeenSet = true + } + } + + for _, name := range f.Names() { + set.Var(f.Value, name, f.Usage) + } + + return nil +} + +// Generic looks up the value of a local GenericFlag, returns +// nil if not found +func (c *Context) Generic(name string) interface{} { + if fs := lookupFlagSet(name, c); fs != nil { + return lookupGeneric(name, fs) + } + return nil +} + +func lookupGeneric(name string, set *flag.FlagSet) interface{} { + f := set.Lookup(name) + if f != nil { + parsed, err := f.Value, error(nil) + if err != nil { + return nil + } + return parsed + } + return nil +} diff --git a/vendor/github.com/urfave/cli/v2/flag_int.go b/vendor/github.com/urfave/cli/v2/flag_int.go new file mode 100644 index 000000000..ac39d4a9e --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/flag_int.go @@ -0,0 +1,106 @@ +package cli + +import ( + "flag" + "fmt" + "strconv" +) + +// IntFlag is a flag with type int +type IntFlag struct { + Name string + Aliases []string + Usage string + EnvVars []string + FilePath string + Required bool + Hidden bool + Value int + DefaultText string + Destination *int + HasBeenSet bool +} + +// IsSet returns whether or not the flag has been set through env or file +func (f *IntFlag) IsSet() bool { + return f.HasBeenSet +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f *IntFlag) String() string { + return FlagStringer(f) +} + +// Names returns the names of the flag +func (f *IntFlag) Names() []string { + return flagNames(f.Name, f.Aliases) +} + +// IsRequired returns whether or not the flag is required +func (f *IntFlag) IsRequired() bool { + return f.Required +} + +// TakesValue returns true of the flag takes a value, otherwise false +func (f *IntFlag) TakesValue() bool { + return true +} + +// GetUsage returns the usage string for the flag +func (f *IntFlag) GetUsage() string { + return f.Usage +} + +// GetValue returns the flags value as string representation and an empty +// string if the flag takes no value at all. +func (f *IntFlag) GetValue() string { + return fmt.Sprintf("%d", f.Value) +} + +// Apply populates the flag given the flag set and environment +func (f *IntFlag) Apply(set *flag.FlagSet) error { + if val, ok := flagFromEnvOrFile(f.EnvVars, f.FilePath); ok { + if val != "" { + valInt, err := strconv.ParseInt(val, 0, 64) + + if err != nil { + return fmt.Errorf("could not parse %q as int value for flag %s: %s", val, f.Name, err) + } + + f.Value = int(valInt) + f.HasBeenSet = true + } + } + + for _, name := range f.Names() { + if f.Destination != nil { + set.IntVar(f.Destination, name, f.Value, f.Usage) + continue + } + set.Int(name, f.Value, f.Usage) + } + + return nil +} + +// Int looks up the value of a local IntFlag, returns +// 0 if not found +func (c *Context) Int(name string) int { + if fs := lookupFlagSet(name, c); fs != nil { + return lookupInt(name, fs) + } + return 0 +} + +func lookupInt(name string, set *flag.FlagSet) int { + f := set.Lookup(name) + if f != nil { + parsed, err := strconv.ParseInt(f.Value.String(), 0, 64) + if err != nil { + return 0 + } + return int(parsed) + } + return 0 +} diff --git a/vendor/github.com/urfave/cli/v2/flag_int64.go b/vendor/github.com/urfave/cli/v2/flag_int64.go new file mode 100644 index 000000000..e09991269 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/flag_int64.go @@ -0,0 +1,105 @@ +package cli + +import ( + "flag" + "fmt" + "strconv" +) + +// Int64Flag is a flag with type int64 +type Int64Flag struct { + Name string + Aliases []string + Usage string + EnvVars []string + FilePath string + Required bool + Hidden bool + Value int64 + DefaultText string + Destination *int64 + HasBeenSet bool +} + +// IsSet returns whether or not the flag has been set through env or file +func (f *Int64Flag) IsSet() bool { + return f.HasBeenSet +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f *Int64Flag) String() string { + return FlagStringer(f) +} + +// Names returns the names of the flag +func (f *Int64Flag) Names() []string { + return flagNames(f.Name, f.Aliases) +} + +// IsRequired returns whether or not the flag is required +func (f *Int64Flag) IsRequired() bool { + return f.Required +} + +// TakesValue returns true of the flag takes a value, otherwise false +func (f *Int64Flag) TakesValue() bool { + return true +} + +// GetUsage returns the usage string for the flag +func (f *Int64Flag) GetUsage() string { + return f.Usage +} + +// GetValue returns the flags value as string representation and an empty +// string if the flag takes no value at all. +func (f *Int64Flag) GetValue() string { + return fmt.Sprintf("%d", f.Value) +} + +// Apply populates the flag given the flag set and environment +func (f *Int64Flag) Apply(set *flag.FlagSet) error { + if val, ok := flagFromEnvOrFile(f.EnvVars, f.FilePath); ok { + if val != "" { + valInt, err := strconv.ParseInt(val, 0, 64) + + if err != nil { + return fmt.Errorf("could not parse %q as int value for flag %s: %s", val, f.Name, err) + } + + f.Value = valInt + f.HasBeenSet = true + } + } + + for _, name := range f.Names() { + if f.Destination != nil { + set.Int64Var(f.Destination, name, f.Value, f.Usage) + continue + } + set.Int64(name, f.Value, f.Usage) + } + return nil +} + +// Int64 looks up the value of a local Int64Flag, returns +// 0 if not found +func (c *Context) Int64(name string) int64 { + if fs := lookupFlagSet(name, c); fs != nil { + return lookupInt64(name, fs) + } + return 0 +} + +func lookupInt64(name string, set *flag.FlagSet) int64 { + f := set.Lookup(name) + if f != nil { + parsed, err := strconv.ParseInt(f.Value.String(), 0, 64) + if err != nil { + return 0 + } + return parsed + } + return 0 +} diff --git a/vendor/github.com/urfave/cli/v2/flag_int64_slice.go b/vendor/github.com/urfave/cli/v2/flag_int64_slice.go new file mode 100644 index 000000000..6c7fd9376 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/flag_int64_slice.go @@ -0,0 +1,159 @@ +package cli + +import ( + "encoding/json" + "flag" + "fmt" + "strconv" + "strings" +) + +// Int64Slice wraps []int64 to satisfy flag.Value +type Int64Slice struct { + slice []int64 + hasBeenSet bool +} + +// NewInt64Slice makes an *Int64Slice with default values +func NewInt64Slice(defaults ...int64) *Int64Slice { + return &Int64Slice{slice: append([]int64{}, defaults...)} +} + +// Set parses the value into an integer and appends it to the list of values +func (i *Int64Slice) Set(value string) error { + if !i.hasBeenSet { + i.slice = []int64{} + i.hasBeenSet = true + } + + if strings.HasPrefix(value, slPfx) { + // Deserializing assumes overwrite + _ = json.Unmarshal([]byte(strings.Replace(value, slPfx, "", 1)), &i.slice) + i.hasBeenSet = true + return nil + } + + tmp, err := strconv.ParseInt(value, 0, 64) + if err != nil { + return err + } + + i.slice = append(i.slice, tmp) + + return nil +} + +// String returns a readable representation of this value (for usage defaults) +func (i *Int64Slice) String() string { + return fmt.Sprintf("%#v", i.slice) +} + +// Serialize allows Int64Slice to fulfill Serializer +func (i *Int64Slice) Serialize() string { + jsonBytes, _ := json.Marshal(i.slice) + return fmt.Sprintf("%s%s", slPfx, string(jsonBytes)) +} + +// Value returns the slice of ints set by this flag +func (i *Int64Slice) Value() []int64 { + return i.slice +} + +// Get returns the slice of ints set by this flag +func (i *Int64Slice) Get() interface{} { + return *i +} + +// Int64SliceFlag is a flag with type *Int64Slice +type Int64SliceFlag struct { + Name string + Aliases []string + Usage string + EnvVars []string + FilePath string + Required bool + Hidden bool + Value *Int64Slice + DefaultText string + HasBeenSet bool +} + +// IsSet returns whether or not the flag has been set through env or file +func (f *Int64SliceFlag) IsSet() bool { + return f.HasBeenSet +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f *Int64SliceFlag) String() string { + return FlagStringer(f) +} + +// Names returns the names of the flag +func (f *Int64SliceFlag) Names() []string { + return flagNames(f.Name, f.Aliases) +} + +// IsRequired returns whether or not the flag is required +func (f *Int64SliceFlag) IsRequired() bool { + return f.Required +} + +// TakesValue returns true of the flag takes a value, otherwise false +func (f *Int64SliceFlag) TakesValue() bool { + return true +} + +// GetUsage returns the usage string for the flag +func (f Int64SliceFlag) GetUsage() string { + return f.Usage +} + +// GetValue returns the flags value as string representation and an empty +// string if the flag takes no value at all. +func (f *Int64SliceFlag) GetValue() string { + if f.Value != nil { + return f.Value.String() + } + return "" +} + +// Apply populates the flag given the flag set and environment +func (f *Int64SliceFlag) Apply(set *flag.FlagSet) error { + if val, ok := flagFromEnvOrFile(f.EnvVars, f.FilePath); ok { + f.Value = &Int64Slice{} + + for _, s := range strings.Split(val, ",") { + if err := f.Value.Set(strings.TrimSpace(s)); err != nil { + return fmt.Errorf("could not parse %q as int64 slice value for flag %s: %s", val, f.Name, err) + } + } + + f.HasBeenSet = true + } + + for _, name := range f.Names() { + if f.Value == nil { + f.Value = &Int64Slice{} + } + set.Var(f.Value, name, f.Usage) + } + + return nil +} + +// Int64Slice looks up the value of a local Int64SliceFlag, returns +// nil if not found +func (c *Context) Int64Slice(name string) []int64 { + return lookupInt64Slice(name, c.flagSet) +} + +func lookupInt64Slice(name string, set *flag.FlagSet) []int64 { + f := set.Lookup(name) + if f != nil { + if slice, ok := f.Value.(*Int64Slice); ok { + return slice.Value() + } + } + return nil +} diff --git a/vendor/github.com/urfave/cli/v2/flag_int_slice.go b/vendor/github.com/urfave/cli/v2/flag_int_slice.go new file mode 100644 index 000000000..4e0afc021 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/flag_int_slice.go @@ -0,0 +1,173 @@ +package cli + +import ( + "encoding/json" + "flag" + "fmt" + "strconv" + "strings" +) + +// IntSlice wraps []int to satisfy flag.Value +type IntSlice struct { + slice []int + hasBeenSet bool +} + +// NewIntSlice makes an *IntSlice with default values +func NewIntSlice(defaults ...int) *IntSlice { + return &IntSlice{slice: append([]int{}, defaults...)} +} + +// TODO: Consistently have specific Set function for Int64 and Float64 ? +// SetInt directly adds an integer to the list of values +func (i *IntSlice) SetInt(value int) { + if !i.hasBeenSet { + i.slice = []int{} + i.hasBeenSet = true + } + + i.slice = append(i.slice, value) +} + +// Set parses the value into an integer and appends it to the list of values +func (i *IntSlice) Set(value string) error { + if !i.hasBeenSet { + i.slice = []int{} + i.hasBeenSet = true + } + + if strings.HasPrefix(value, slPfx) { + // Deserializing assumes overwrite + _ = json.Unmarshal([]byte(strings.Replace(value, slPfx, "", 1)), &i.slice) + i.hasBeenSet = true + return nil + } + + tmp, err := strconv.ParseInt(value, 0, 64) + if err != nil { + return err + } + + i.slice = append(i.slice, int(tmp)) + + return nil +} + +// String returns a readable representation of this value (for usage defaults) +func (i *IntSlice) String() string { + return fmt.Sprintf("%#v", i.slice) +} + +// Serialize allows IntSlice to fulfill Serializer +func (i *IntSlice) Serialize() string { + jsonBytes, _ := json.Marshal(i.slice) + return fmt.Sprintf("%s%s", slPfx, string(jsonBytes)) +} + +// Value returns the slice of ints set by this flag +func (i *IntSlice) Value() []int { + return i.slice +} + +// Get returns the slice of ints set by this flag +func (i *IntSlice) Get() interface{} { + return *i +} + +// IntSliceFlag is a flag with type *IntSlice +type IntSliceFlag struct { + Name string + Aliases []string + Usage string + EnvVars []string + FilePath string + Required bool + Hidden bool + Value *IntSlice + DefaultText string + HasBeenSet bool +} + +// IsSet returns whether or not the flag has been set through env or file +func (f *IntSliceFlag) IsSet() bool { + return f.HasBeenSet +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f *IntSliceFlag) String() string { + return FlagStringer(f) +} + +// Names returns the names of the flag +func (f *IntSliceFlag) Names() []string { + return flagNames(f.Name, f.Aliases) +} + +// IsRequired returns whether or not the flag is required +func (f *IntSliceFlag) IsRequired() bool { + return f.Required +} + +// TakesValue returns true of the flag takes a value, otherwise false +func (f *IntSliceFlag) TakesValue() bool { + return true +} + +// GetUsage returns the usage string for the flag +func (f IntSliceFlag) GetUsage() string { + return f.Usage +} + +// GetValue returns the flags value as string representation and an empty +// string if the flag takes no value at all. +func (f *IntSliceFlag) GetValue() string { + if f.Value != nil { + return f.Value.String() + } + return "" +} + +// Apply populates the flag given the flag set and environment +func (f *IntSliceFlag) Apply(set *flag.FlagSet) error { + if val, ok := flagFromEnvOrFile(f.EnvVars, f.FilePath); ok { + f.Value = &IntSlice{} + + for _, s := range strings.Split(val, ",") { + if err := f.Value.Set(strings.TrimSpace(s)); err != nil { + return fmt.Errorf("could not parse %q as int slice value for flag %s: %s", val, f.Name, err) + } + } + + f.HasBeenSet = true + } + + for _, name := range f.Names() { + if f.Value == nil { + f.Value = &IntSlice{} + } + set.Var(f.Value, name, f.Usage) + } + + return nil +} + +// IntSlice looks up the value of a local IntSliceFlag, returns +// nil if not found +func (c *Context) IntSlice(name string) []int { + if fs := lookupFlagSet(name, c); fs != nil { + return lookupIntSlice(name, c.flagSet) + } + return nil +} + +func lookupIntSlice(name string, set *flag.FlagSet) []int { + f := set.Lookup(name) + if f != nil { + if slice, ok := f.Value.(*IntSlice); ok { + return slice.Value() + } + } + return nil +} diff --git a/vendor/github.com/urfave/cli/v2/flag_path.go b/vendor/github.com/urfave/cli/v2/flag_path.go new file mode 100644 index 000000000..8070dc4b0 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/flag_path.go @@ -0,0 +1,95 @@ +package cli + +import "flag" + +type PathFlag struct { + Name string + Aliases []string + Usage string + EnvVars []string + FilePath string + Required bool + Hidden bool + TakesFile bool + Value string + DefaultText string + Destination *string + HasBeenSet bool +} + +// IsSet returns whether or not the flag has been set through env or file +func (f *PathFlag) IsSet() bool { + return f.HasBeenSet +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f *PathFlag) String() string { + return FlagStringer(f) +} + +// Names returns the names of the flag +func (f *PathFlag) Names() []string { + return flagNames(f.Name, f.Aliases) +} + +// IsRequired returns whether or not the flag is required +func (f *PathFlag) IsRequired() bool { + return f.Required +} + +// TakesValue returns true of the flag takes a value, otherwise false +func (f *PathFlag) TakesValue() bool { + return true +} + +// GetUsage returns the usage string for the flag +func (f *PathFlag) GetUsage() string { + return f.Usage +} + +// GetValue returns the flags value as string representation and an empty +// string if the flag takes no value at all. +func (f *PathFlag) GetValue() string { + return f.Value +} + +// Apply populates the flag given the flag set and environment +func (f *PathFlag) Apply(set *flag.FlagSet) error { + if val, ok := flagFromEnvOrFile(f.EnvVars, f.FilePath); ok { + f.Value = val + f.HasBeenSet = true + } + + for _, name := range f.Names() { + if f.Destination != nil { + set.StringVar(f.Destination, name, f.Value, f.Usage) + continue + } + set.String(name, f.Value, f.Usage) + } + + return nil +} + +// Path looks up the value of a local PathFlag, returns +// "" if not found +func (c *Context) Path(name string) string { + if fs := lookupFlagSet(name, c); fs != nil { + return lookupPath(name, fs) + } + + return "" +} + +func lookupPath(name string, set *flag.FlagSet) string { + f := set.Lookup(name) + if f != nil { + parsed, err := f.Value.String(), error(nil) + if err != nil { + return "" + } + return parsed + } + return "" +} diff --git a/vendor/github.com/urfave/cli/v2/flag_string.go b/vendor/github.com/urfave/cli/v2/flag_string.go new file mode 100644 index 000000000..400bb532e --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/flag_string.go @@ -0,0 +1,95 @@ +package cli + +import "flag" + +// StringFlag is a flag with type string +type StringFlag struct { + Name string + Aliases []string + Usage string + EnvVars []string + FilePath string + Required bool + Hidden bool + TakesFile bool + Value string + DefaultText string + Destination *string + HasBeenSet bool +} + +// IsSet returns whether or not the flag has been set through env or file +func (f *StringFlag) IsSet() bool { + return f.HasBeenSet +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f *StringFlag) String() string { + return FlagStringer(f) +} + +// Names returns the names of the flag +func (f *StringFlag) Names() []string { + return flagNames(f.Name, f.Aliases) +} + +// IsRequired returns whether or not the flag is required +func (f *StringFlag) IsRequired() bool { + return f.Required +} + +// TakesValue returns true of the flag takes a value, otherwise false +func (f *StringFlag) TakesValue() bool { + return true +} + +// GetUsage returns the usage string for the flag +func (f *StringFlag) GetUsage() string { + return f.Usage +} + +// GetValue returns the flags value as string representation and an empty +// string if the flag takes no value at all. +func (f *StringFlag) GetValue() string { + return f.Value +} + +// Apply populates the flag given the flag set and environment +func (f *StringFlag) Apply(set *flag.FlagSet) error { + if val, ok := flagFromEnvOrFile(f.EnvVars, f.FilePath); ok { + f.Value = val + f.HasBeenSet = true + } + + for _, name := range f.Names() { + if f.Destination != nil { + set.StringVar(f.Destination, name, f.Value, f.Usage) + continue + } + set.String(name, f.Value, f.Usage) + } + + return nil +} + +// String looks up the value of a local StringFlag, returns +// "" if not found +func (c *Context) String(name string) string { + if fs := lookupFlagSet(name, c); fs != nil { + return lookupString(name, fs) + } + return "" +} + +func lookupString(name string, set *flag.FlagSet) string { + f := set.Lookup(name) + if f != nil { + parsed, err := f.Value.String(), error(nil) + if err != nil { + return "" + } + return parsed + } + return "" +} diff --git a/vendor/github.com/urfave/cli/v2/flag_string_slice.go b/vendor/github.com/urfave/cli/v2/flag_string_slice.go new file mode 100644 index 000000000..35497032c --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/flag_string_slice.go @@ -0,0 +1,180 @@ +package cli + +import ( + "encoding/json" + "flag" + "fmt" + "strings" +) + +// StringSlice wraps a []string to satisfy flag.Value +type StringSlice struct { + slice []string + hasBeenSet bool +} + +// NewStringSlice creates a *StringSlice with default values +func NewStringSlice(defaults ...string) *StringSlice { + return &StringSlice{slice: append([]string{}, defaults...)} +} + +// Set appends the string value to the list of values +func (s *StringSlice) Set(value string) error { + if !s.hasBeenSet { + s.slice = []string{} + s.hasBeenSet = true + } + + if strings.HasPrefix(value, slPfx) { + // Deserializing assumes overwrite + _ = json.Unmarshal([]byte(strings.Replace(value, slPfx, "", 1)), &s.slice) + s.hasBeenSet = true + return nil + } + + s.slice = append(s.slice, value) + + return nil +} + +// String returns a readable representation of this value (for usage defaults) +func (s *StringSlice) String() string { + return fmt.Sprintf("%s", s.slice) +} + +// Serialize allows StringSlice to fulfill Serializer +func (s *StringSlice) Serialize() string { + jsonBytes, _ := json.Marshal(s.slice) + return fmt.Sprintf("%s%s", slPfx, string(jsonBytes)) +} + +// Value returns the slice of strings set by this flag +func (s *StringSlice) Value() []string { + return s.slice +} + +// Get returns the slice of strings set by this flag +func (s *StringSlice) Get() interface{} { + return *s +} + +// StringSliceFlag is a flag with type *StringSlice +type StringSliceFlag struct { + Name string + Aliases []string + Usage string + EnvVars []string + FilePath string + Required bool + Hidden bool + TakesFile bool + Value *StringSlice + DefaultText string + HasBeenSet bool + Destination *StringSlice +} + +// IsSet returns whether or not the flag has been set through env or file +func (f *StringSliceFlag) IsSet() bool { + return f.HasBeenSet +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f *StringSliceFlag) String() string { + return FlagStringer(f) +} + +// Names returns the names of the flag +func (f *StringSliceFlag) Names() []string { + return flagNames(f.Name, f.Aliases) +} + +// IsRequired returns whether or not the flag is required +func (f *StringSliceFlag) IsRequired() bool { + return f.Required +} + +// TakesValue returns true of the flag takes a value, otherwise false +func (f *StringSliceFlag) TakesValue() bool { + return true +} + +// GetUsage returns the usage string for the flag +func (f *StringSliceFlag) GetUsage() string { + return f.Usage +} + +// GetValue returns the flags value as string representation and an empty +// string if the flag takes no value at all. +func (f *StringSliceFlag) GetValue() string { + if f.Value != nil { + return f.Value.String() + } + return "" +} + +// Apply populates the flag given the flag set and environment +func (f *StringSliceFlag) Apply(set *flag.FlagSet) error { + + if f.Destination != nil && f.Value != nil { + f.Destination.slice = make([]string, len(f.Value.slice)) + copy(f.Destination.slice, f.Value.slice) + + } + + if val, ok := flagFromEnvOrFile(f.EnvVars, f.FilePath); ok { + if f.Value == nil { + f.Value = &StringSlice{} + } + destination := f.Value + if f.Destination != nil { + destination = f.Destination + } + + for _, s := range strings.Split(val, ",") { + if err := destination.Set(strings.TrimSpace(s)); err != nil { + return fmt.Errorf("could not parse %q as string value for flag %s: %s", val, f.Name, err) + } + } + + // Set this to false so that we reset the slice if we then set values from + // flags that have already been set by the environment. + destination.hasBeenSet = false + f.HasBeenSet = true + } + + for _, name := range f.Names() { + if f.Value == nil { + f.Value = &StringSlice{} + } + + if f.Destination != nil { + set.Var(f.Destination, name, f.Usage) + continue + } + + set.Var(f.Value, name, f.Usage) + } + + return nil +} + +// StringSlice looks up the value of a local StringSliceFlag, returns +// nil if not found +func (c *Context) StringSlice(name string) []string { + if fs := lookupFlagSet(name, c); fs != nil { + return lookupStringSlice(name, fs) + } + return nil +} + +func lookupStringSlice(name string, set *flag.FlagSet) []string { + f := set.Lookup(name) + if f != nil { + if slice, ok := f.Value.(*StringSlice); ok { + return slice.Value() + } + } + return nil +} diff --git a/vendor/github.com/urfave/cli/v2/flag_timestamp.go b/vendor/github.com/urfave/cli/v2/flag_timestamp.go new file mode 100644 index 000000000..0382a6b9d --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/flag_timestamp.go @@ -0,0 +1,154 @@ +package cli + +import ( + "flag" + "fmt" + "time" +) + +// Timestamp wrap to satisfy golang's flag interface. +type Timestamp struct { + timestamp *time.Time + hasBeenSet bool + layout string +} + +// Timestamp constructor +func NewTimestamp(timestamp time.Time) *Timestamp { + return &Timestamp{timestamp: ×tamp} +} + +// Set the timestamp value directly +func (t *Timestamp) SetTimestamp(value time.Time) { + if !t.hasBeenSet { + t.timestamp = &value + t.hasBeenSet = true + } +} + +// Set the timestamp string layout for future parsing +func (t *Timestamp) SetLayout(layout string) { + t.layout = layout +} + +// Parses the string value to timestamp +func (t *Timestamp) Set(value string) error { + timestamp, err := time.Parse(t.layout, value) + if err != nil { + return err + } + + t.timestamp = ×tamp + t.hasBeenSet = true + return nil +} + +// String returns a readable representation of this value (for usage defaults) +func (t *Timestamp) String() string { + return fmt.Sprintf("%#v", t.timestamp) +} + +// Value returns the timestamp value stored in the flag +func (t *Timestamp) Value() *time.Time { + return t.timestamp +} + +// Get returns the flag structure +func (t *Timestamp) Get() interface{} { + return *t +} + +// TimestampFlag is a flag with type time +type TimestampFlag struct { + Name string + Aliases []string + Usage string + EnvVars []string + FilePath string + Required bool + Hidden bool + Layout string + Value *Timestamp + DefaultText string + HasBeenSet bool +} + +// IsSet returns whether or not the flag has been set through env or file +func (f *TimestampFlag) IsSet() bool { + return f.HasBeenSet +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f *TimestampFlag) String() string { + return FlagStringer(f) +} + +// Names returns the names of the flag +func (f *TimestampFlag) Names() []string { + return flagNames(f.Name, f.Aliases) +} + +// IsRequired returns whether or not the flag is required +func (f *TimestampFlag) IsRequired() bool { + return f.Required +} + +// TakesValue returns true of the flag takes a value, otherwise false +func (f *TimestampFlag) TakesValue() bool { + return true +} + +// GetUsage returns the usage string for the flag +func (f *TimestampFlag) GetUsage() string { + return f.Usage +} + +// GetValue returns the flags value as string representation and an empty +// string if the flag takes no value at all. +func (f *TimestampFlag) GetValue() string { + if f.Value != nil { + return f.Value.timestamp.String() + } + return "" +} + +// Apply populates the flag given the flag set and environment +func (f *TimestampFlag) Apply(set *flag.FlagSet) error { + if f.Layout == "" { + return fmt.Errorf("timestamp Layout is required") + } + if f.Value == nil { + f.Value = &Timestamp{} + } + f.Value.SetLayout(f.Layout) + + if val, ok := flagFromEnvOrFile(f.EnvVars, f.FilePath); ok { + if err := f.Value.Set(val); err != nil { + return fmt.Errorf("could not parse %q as timestamp value for flag %s: %s", val, f.Name, err) + } + f.HasBeenSet = true + } + + for _, name := range f.Names() { + set.Var(f.Value, name, f.Usage) + } + return nil +} + +// Timestamp gets the timestamp from a flag name +func (c *Context) Timestamp(name string) *time.Time { + if fs := lookupFlagSet(name, c); fs != nil { + return lookupTimestamp(name, fs) + } + return nil +} + +// Fetches the timestamp value from the local timestampWrap +func lookupTimestamp(name string, set *flag.FlagSet) *time.Time { + f := set.Lookup(name) + if f != nil { + return (f.Value.(*Timestamp)).Value() + } + return nil +} diff --git a/vendor/github.com/urfave/cli/v2/flag_uint.go b/vendor/github.com/urfave/cli/v2/flag_uint.go new file mode 100644 index 000000000..2e5e76b0e --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/flag_uint.go @@ -0,0 +1,105 @@ +package cli + +import ( + "flag" + "fmt" + "strconv" +) + +// UintFlag is a flag with type uint +type UintFlag struct { + Name string + Aliases []string + Usage string + EnvVars []string + FilePath string + Required bool + Hidden bool + Value uint + DefaultText string + Destination *uint + HasBeenSet bool +} + +// IsSet returns whether or not the flag has been set through env or file +func (f *UintFlag) IsSet() bool { + return f.HasBeenSet +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f *UintFlag) String() string { + return FlagStringer(f) +} + +// Names returns the names of the flag +func (f *UintFlag) Names() []string { + return flagNames(f.Name, f.Aliases) +} + +// IsRequired returns whether or not the flag is required +func (f *UintFlag) IsRequired() bool { + return f.Required +} + +// TakesValue returns true of the flag takes a value, otherwise false +func (f *UintFlag) TakesValue() bool { + return true +} + +// GetUsage returns the usage string for the flag +func (f *UintFlag) GetUsage() string { + return f.Usage +} + +// Apply populates the flag given the flag set and environment +func (f *UintFlag) Apply(set *flag.FlagSet) error { + if val, ok := flagFromEnvOrFile(f.EnvVars, f.FilePath); ok { + if val != "" { + valInt, err := strconv.ParseUint(val, 0, 64) + if err != nil { + return fmt.Errorf("could not parse %q as uint value for flag %s: %s", val, f.Name, err) + } + + f.Value = uint(valInt) + f.HasBeenSet = true + } + } + + for _, name := range f.Names() { + if f.Destination != nil { + set.UintVar(f.Destination, name, f.Value, f.Usage) + continue + } + set.Uint(name, f.Value, f.Usage) + } + + return nil +} + +// GetValue returns the flags value as string representation and an empty +// string if the flag takes no value at all. +func (f *UintFlag) GetValue() string { + return fmt.Sprintf("%d", f.Value) +} + +// Uint looks up the value of a local UintFlag, returns +// 0 if not found +func (c *Context) Uint(name string) uint { + if fs := lookupFlagSet(name, c); fs != nil { + return lookupUint(name, fs) + } + return 0 +} + +func lookupUint(name string, set *flag.FlagSet) uint { + f := set.Lookup(name) + if f != nil { + parsed, err := strconv.ParseUint(f.Value.String(), 0, 64) + if err != nil { + return 0 + } + return uint(parsed) + } + return 0 +} diff --git a/vendor/github.com/urfave/cli/v2/flag_uint64.go b/vendor/github.com/urfave/cli/v2/flag_uint64.go new file mode 100644 index 000000000..8fc3289d8 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/flag_uint64.go @@ -0,0 +1,105 @@ +package cli + +import ( + "flag" + "fmt" + "strconv" +) + +// Uint64Flag is a flag with type uint64 +type Uint64Flag struct { + Name string + Aliases []string + Usage string + EnvVars []string + FilePath string + Required bool + Hidden bool + Value uint64 + DefaultText string + Destination *uint64 + HasBeenSet bool +} + +// IsSet returns whether or not the flag has been set through env or file +func (f *Uint64Flag) IsSet() bool { + return f.HasBeenSet +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f *Uint64Flag) String() string { + return FlagStringer(f) +} + +// Names returns the names of the flag +func (f *Uint64Flag) Names() []string { + return flagNames(f.Name, f.Aliases) +} + +// IsRequired returns whether or not the flag is required +func (f *Uint64Flag) IsRequired() bool { + return f.Required +} + +// TakesValue returns true of the flag takes a value, otherwise false +func (f *Uint64Flag) TakesValue() bool { + return true +} + +// GetUsage returns the usage string for the flag +func (f *Uint64Flag) GetUsage() string { + return f.Usage +} + +// Apply populates the flag given the flag set and environment +func (f *Uint64Flag) Apply(set *flag.FlagSet) error { + if val, ok := flagFromEnvOrFile(f.EnvVars, f.FilePath); ok { + if val != "" { + valInt, err := strconv.ParseUint(val, 0, 64) + if err != nil { + return fmt.Errorf("could not parse %q as uint64 value for flag %s: %s", val, f.Name, err) + } + + f.Value = valInt + f.HasBeenSet = true + } + } + + for _, name := range f.Names() { + if f.Destination != nil { + set.Uint64Var(f.Destination, name, f.Value, f.Usage) + continue + } + set.Uint64(name, f.Value, f.Usage) + } + + return nil +} + +// GetValue returns the flags value as string representation and an empty +// string if the flag takes no value at all. +func (f *Uint64Flag) GetValue() string { + return fmt.Sprintf("%d", f.Value) +} + +// Uint64 looks up the value of a local Uint64Flag, returns +// 0 if not found +func (c *Context) Uint64(name string) uint64 { + if fs := lookupFlagSet(name, c); fs != nil { + return lookupUint64(name, fs) + } + return 0 +} + +func lookupUint64(name string, set *flag.FlagSet) uint64 { + f := set.Lookup(name) + if f != nil { + parsed, err := strconv.ParseUint(f.Value.String(), 0, 64) + if err != nil { + return 0 + } + return parsed + } + return 0 +} diff --git a/vendor/github.com/urfave/cli/v2/funcs.go b/vendor/github.com/urfave/cli/v2/funcs.go new file mode 100644 index 000000000..474c48faf --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/funcs.go @@ -0,0 +1,44 @@ +package cli + +// BashCompleteFunc is an action to execute when the shell completion flag is set +type BashCompleteFunc func(*Context) + +// BeforeFunc is an action to execute before any subcommands are run, but after +// the context is ready if a non-nil error is returned, no subcommands are run +type BeforeFunc func(*Context) error + +// AfterFunc is an action to execute after any subcommands are run, but after the +// subcommand has finished it is run even if Action() panics +type AfterFunc func(*Context) error + +// ActionFunc is the action to execute when no subcommands are specified +type ActionFunc func(*Context) error + +// CommandNotFoundFunc is executed if the proper command cannot be found +type CommandNotFoundFunc func(*Context, string) + +// OnUsageErrorFunc is executed if an usage error occurs. This is useful for displaying +// customized usage error messages. This function is able to replace the +// original error messages. If this function is not set, the "Incorrect usage" +// is displayed and the execution is interrupted. +type OnUsageErrorFunc func(context *Context, err error, isSubcommand bool) error + +// ExitErrHandlerFunc is executed if provided in order to handle exitError values +// returned by Actions and Before/After functions. +type ExitErrHandlerFunc func(context *Context, err error) + +// FlagStringFunc is used by the help generation to display a flag, which is +// expected to be a single line. +type FlagStringFunc func(Flag) string + +// FlagNamePrefixFunc is used by the default FlagStringFunc to create prefix +// text for a flag's full name. +type FlagNamePrefixFunc func(fullName []string, placeholder string) string + +// FlagEnvHintFunc is used by the default FlagStringFunc to annotate flag help +// with the environment variable details. +type FlagEnvHintFunc func(envVars []string, str string) string + +// FlagFileHintFunc is used by the default FlagStringFunc to annotate flag help +// with the file path details. +type FlagFileHintFunc func(filePath, str string) string diff --git a/vendor/github.com/urfave/cli/v2/go.mod b/vendor/github.com/urfave/cli/v2/go.mod new file mode 100644 index 000000000..113966432 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/go.mod @@ -0,0 +1,9 @@ +module github.com/urfave/cli/v2 + +go 1.11 + +require ( + github.com/BurntSushi/toml v0.3.1 + github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d + gopkg.in/yaml.v2 v2.2.3 +) diff --git a/vendor/github.com/urfave/cli/v2/go.sum b/vendor/github.com/urfave/cli/v2/go.sum new file mode 100644 index 000000000..663ad7276 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/go.sum @@ -0,0 +1,14 @@ +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d h1:U+s90UTSYgptZMwQh2aRr3LuazLJIa+Pg3Kc1ylSYVY= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.3 h1:fvjTMHxHEw/mxHbtzPi3JCcKXQRAnQTBRo6YCJSVHKI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/urfave/cli/v2/help.go b/vendor/github.com/urfave/cli/v2/help.go new file mode 100644 index 000000000..0a421ee99 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/help.go @@ -0,0 +1,386 @@ +package cli + +import ( + "fmt" + "io" + "os" + "strings" + "text/tabwriter" + "text/template" + "unicode/utf8" +) + +var helpCommand = &Command{ + Name: "help", + Aliases: []string{"h"}, + Usage: "Shows a list of commands or help for one command", + ArgsUsage: "[command]", + Action: func(c *Context) error { + args := c.Args() + if args.Present() { + return ShowCommandHelp(c, args.First()) + } + + _ = ShowAppHelp(c) + return nil + }, +} + +var helpSubcommand = &Command{ + Name: "help", + Aliases: []string{"h"}, + Usage: "Shows a list of commands or help for one command", + ArgsUsage: "[command]", + Action: func(c *Context) error { + args := c.Args() + if args.Present() { + return ShowCommandHelp(c, args.First()) + } + + return ShowSubcommandHelp(c) + }, +} + +// Prints help for the App or Command +type helpPrinter func(w io.Writer, templ string, data interface{}) + +// Prints help for the App or Command with custom template function. +type helpPrinterCustom func(w io.Writer, templ string, data interface{}, customFunc map[string]interface{}) + +// HelpPrinter is a function that writes the help output. If not set explicitly, +// this calls HelpPrinterCustom using only the default template functions. +// +// If custom logic for printing help is required, this function can be +// overridden. If the ExtraInfo field is defined on an App, this function +// should not be modified, as HelpPrinterCustom will be used directly in order +// to capture the extra information. +var HelpPrinter helpPrinter = printHelp + +// HelpPrinterCustom is a function that writes the help output. It is used as +// the default implementation of HelpPrinter, and may be called directly if +// the ExtraInfo field is set on an App. +var HelpPrinterCustom helpPrinterCustom = printHelpCustom + +// VersionPrinter prints the version for the App +var VersionPrinter = printVersion + +// ShowAppHelpAndExit - Prints the list of subcommands for the app and exits with exit code. +func ShowAppHelpAndExit(c *Context, exitCode int) { + _ = ShowAppHelp(c) + os.Exit(exitCode) +} + +// ShowAppHelp is an action that displays the help. +func ShowAppHelp(c *Context) error { + tpl := c.App.CustomAppHelpTemplate + if tpl == "" { + tpl = AppHelpTemplate + } + + if c.App.ExtraInfo == nil { + HelpPrinter(c.App.Writer, tpl, c.App) + return nil + } + + customAppData := func() map[string]interface{} { + return map[string]interface{}{ + "ExtraInfo": c.App.ExtraInfo, + } + } + HelpPrinterCustom(c.App.Writer, tpl, c.App, customAppData()) + + return nil +} + +// DefaultAppComplete prints the list of subcommands as the default app completion method +func DefaultAppComplete(c *Context) { + DefaultCompleteWithFlags(nil)(c) +} + +func printCommandSuggestions(commands []*Command, writer io.Writer) { + for _, command := range commands { + if command.Hidden { + continue + } + if os.Getenv("_CLI_ZSH_AUTOCOMPLETE_HACK") == "1" { + for _, name := range command.Names() { + _, _ = fmt.Fprintf(writer, "%s:%s\n", name, command.Usage) + } + } else { + for _, name := range command.Names() { + _, _ = fmt.Fprintf(writer, "%s\n", name) + } + } + } +} + +func cliArgContains(flagName string) bool { + for _, name := range strings.Split(flagName, ",") { + name = strings.TrimSpace(name) + count := utf8.RuneCountInString(name) + if count > 2 { + count = 2 + } + flag := fmt.Sprintf("%s%s", strings.Repeat("-", count), name) + for _, a := range os.Args { + if a == flag { + return true + } + } + } + return false +} + +func printFlagSuggestions(lastArg string, flags []Flag, writer io.Writer) { + cur := strings.TrimPrefix(lastArg, "-") + cur = strings.TrimPrefix(cur, "-") + for _, flag := range flags { + if bflag, ok := flag.(*BoolFlag); ok && bflag.Hidden { + continue + } + for _, name := range flag.Names() { + name = strings.TrimSpace(name) + // this will get total count utf8 letters in flag name + count := utf8.RuneCountInString(name) + if count > 2 { + count = 2 // resuse this count to generate single - or -- in flag completion + } + // if flag name has more than one utf8 letter and last argument in cli has -- prefix then + // skip flag completion for short flags example -v or -x + if strings.HasPrefix(lastArg, "--") && count == 1 { + continue + } + // match if last argument matches this flag and it is not repeated + if strings.HasPrefix(name, cur) && cur != name && !cliArgContains(name) { + flagCompletion := fmt.Sprintf("%s%s", strings.Repeat("-", count), name) + _, _ = fmt.Fprintln(writer, flagCompletion) + } + } + } +} + +func DefaultCompleteWithFlags(cmd *Command) func(c *Context) { + return func(c *Context) { + if len(os.Args) > 2 { + lastArg := os.Args[len(os.Args)-2] + if strings.HasPrefix(lastArg, "-") { + printFlagSuggestions(lastArg, c.App.Flags, c.App.Writer) + if cmd != nil { + printFlagSuggestions(lastArg, cmd.Flags, c.App.Writer) + } + return + } + } + if cmd != nil { + printCommandSuggestions(cmd.Subcommands, c.App.Writer) + } else { + printCommandSuggestions(c.App.Commands, c.App.Writer) + } + } +} + +// ShowCommandHelpAndExit - exits with code after showing help +func ShowCommandHelpAndExit(c *Context, command string, code int) { + _ = ShowCommandHelp(c, command) + os.Exit(code) +} + +// ShowCommandHelp prints help for the given command +func ShowCommandHelp(ctx *Context, command string) error { + // show the subcommand help for a command with subcommands + if command == "" { + HelpPrinter(ctx.App.Writer, SubcommandHelpTemplate, ctx.App) + return nil + } + + for _, c := range ctx.App.Commands { + if c.HasName(command) { + templ := c.CustomHelpTemplate + if templ == "" { + templ = CommandHelpTemplate + } + + HelpPrinter(ctx.App.Writer, templ, c) + + return nil + } + } + + if ctx.App.CommandNotFound == nil { + return Exit(fmt.Sprintf("No help topic for '%v'", command), 3) + } + + ctx.App.CommandNotFound(ctx, command) + return nil +} + +// ShowSubcommandHelpAndExit - Prints help for the given subcommand and exits with exit code. +func ShowSubcommandHelpAndExit(c *Context, exitCode int) { + _ = ShowSubcommandHelp(c) + os.Exit(exitCode) +} + +// ShowSubcommandHelp prints help for the given subcommand +func ShowSubcommandHelp(c *Context) error { + if c == nil { + return nil + } + + if c.Command != nil { + return ShowCommandHelp(c, c.Command.Name) + } + + return ShowCommandHelp(c, "") +} + +// ShowVersion prints the version number of the App +func ShowVersion(c *Context) { + VersionPrinter(c) +} + +func printVersion(c *Context) { + _, _ = fmt.Fprintf(c.App.Writer, "%v version %v\n", c.App.Name, c.App.Version) +} + +// ShowCompletions prints the lists of commands within a given context +func ShowCompletions(c *Context) { + a := c.App + if a != nil && a.BashComplete != nil { + a.BashComplete(c) + } +} + +// ShowCommandCompletions prints the custom completions for a given command +func ShowCommandCompletions(ctx *Context, command string) { + c := ctx.App.Command(command) + if c != nil { + if c.BashComplete != nil { + c.BashComplete(ctx) + } else { + DefaultCompleteWithFlags(c)(ctx) + } + } + +} + +// printHelpCustom is the default implementation of HelpPrinterCustom. +// +// The customFuncs map will be combined with a default template.FuncMap to +// allow using arbitrary functions in template rendering. +func printHelpCustom(out io.Writer, templ string, data interface{}, customFuncs map[string]interface{}) { + funcMap := template.FuncMap{ + "join": strings.Join, + "indent": indent, + "nindent": nindent, + "trim": strings.TrimSpace, + } + for key, value := range customFuncs { + funcMap[key] = value + } + + w := tabwriter.NewWriter(out, 1, 8, 2, ' ', 0) + t := template.Must(template.New("help").Funcs(funcMap).Parse(templ)) + + err := t.Execute(w, data) + if err != nil { + // If the writer is closed, t.Execute will fail, and there's nothing + // we can do to recover. + if os.Getenv("CLI_TEMPLATE_ERROR_DEBUG") != "" { + _, _ = fmt.Fprintf(ErrWriter, "CLI TEMPLATE ERROR: %#v\n", err) + } + return + } + _ = w.Flush() +} + +func printHelp(out io.Writer, templ string, data interface{}) { + HelpPrinterCustom(out, templ, data, nil) +} + +func checkVersion(c *Context) bool { + found := false + for _, name := range VersionFlag.Names() { + if c.Bool(name) { + found = true + } + } + return found +} + +func checkHelp(c *Context) bool { + found := false + for _, name := range HelpFlag.Names() { + if c.Bool(name) { + found = true + } + } + return found +} + +func checkCommandHelp(c *Context, name string) bool { + if c.Bool("h") || c.Bool("help") { + _ = ShowCommandHelp(c, name) + return true + } + + return false +} + +func checkSubcommandHelp(c *Context) bool { + if c.Bool("h") || c.Bool("help") { + _ = ShowSubcommandHelp(c) + return true + } + + return false +} + +func checkShellCompleteFlag(a *App, arguments []string) (bool, []string) { + if !a.EnableBashCompletion { + return false, arguments + } + + pos := len(arguments) - 1 + lastArg := arguments[pos] + + if lastArg != "--generate-bash-completion" { + return false, arguments + } + + return true, arguments[:pos] +} + +func checkCompletions(c *Context) bool { + if !c.shellComplete { + return false + } + + if args := c.Args(); args.Present() { + name := args.First() + if cmd := c.App.Command(name); cmd != nil { + // let the command handle the completion + return false + } + } + + ShowCompletions(c) + return true +} + +func checkCommandCompletions(c *Context, name string) bool { + if !c.shellComplete { + return false + } + + ShowCommandCompletions(c, name) + return true +} + +func indent(spaces int, v string) string { + pad := strings.Repeat(" ", spaces) + return pad + strings.Replace(v, "\n", "\n"+pad, -1) +} + +func nindent(spaces int, v string) string { + return "\n" + indent(spaces, v) +} diff --git a/vendor/github.com/urfave/cli/v2/parse.go b/vendor/github.com/urfave/cli/v2/parse.go new file mode 100644 index 000000000..7df17296a --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/parse.go @@ -0,0 +1,94 @@ +package cli + +import ( + "flag" + "strings" +) + +type iterativeParser interface { + newFlagSet() (*flag.FlagSet, error) + useShortOptionHandling() bool +} + +// To enable short-option handling (e.g., "-it" vs "-i -t") we have to +// iteratively catch parsing errors. This way we achieve LR parsing without +// transforming any arguments. Otherwise, there is no way we can discriminate +// combined short options from common arguments that should be left untouched. +// Pass `shellComplete` to continue parsing options on failure during shell +// completion when, the user-supplied options may be incomplete. +func parseIter(set *flag.FlagSet, ip iterativeParser, args []string, shellComplete bool) error { + for { + err := set.Parse(args) + if !ip.useShortOptionHandling() || err == nil { + if shellComplete { + return nil + } + return err + } + + errStr := err.Error() + trimmed := strings.TrimPrefix(errStr, "flag provided but not defined: -") + if errStr == trimmed { + return err + } + + // regenerate the initial args with the split short opts + argsWereSplit := false + for i, arg := range args { + // skip args that are not part of the error message + if name := strings.TrimLeft(arg, "-"); name != trimmed { + continue + } + + // if we can't split, the error was accurate + shortOpts := splitShortOptions(set, arg) + if len(shortOpts) == 1 { + return err + } + + // swap current argument with the split version + args = append(args[:i], append(shortOpts, args[i+1:]...)...) + argsWereSplit = true + break + } + + // This should be an impossible to reach code path, but in case the arg + // splitting failed to happen, this will prevent infinite loops + if !argsWereSplit { + return err + } + + // Since custom parsing failed, replace the flag set before retrying + newSet, err := ip.newFlagSet() + if err != nil { + return err + } + *set = *newSet + } +} + +func splitShortOptions(set *flag.FlagSet, arg string) []string { + shortFlagsExist := func(s string) bool { + for _, c := range s[1:] { + if f := set.Lookup(string(c)); f == nil { + return false + } + } + return true + } + + if !isSplittable(arg) || !shortFlagsExist(arg) { + return []string{arg} + } + + separated := make([]string, 0, len(arg)-1) + for _, flagChar := range arg[1:] { + separated = append(separated, "-"+string(flagChar)) + } + + return separated +} + +func isSplittable(flagArg string) bool { + return strings.HasPrefix(flagArg, "-") && !strings.HasPrefix(flagArg, "--") && len(flagArg) > 2 +} diff --git a/vendor/github.com/urfave/cli/v2/sort.go b/vendor/github.com/urfave/cli/v2/sort.go new file mode 100644 index 000000000..23d1c2f77 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/sort.go @@ -0,0 +1,29 @@ +package cli + +import "unicode" + +// lexicographicLess compares strings alphabetically considering case. +func lexicographicLess(i, j string) bool { + iRunes := []rune(i) + jRunes := []rune(j) + + lenShared := len(iRunes) + if lenShared > len(jRunes) { + lenShared = len(jRunes) + } + + for index := 0; index < lenShared; index++ { + ir := iRunes[index] + jr := jRunes[index] + + if lir, ljr := unicode.ToLower(ir), unicode.ToLower(jr); lir != ljr { + return lir < ljr + } + + if ir != jr { + return ir < jr + } + } + + return i < j +} diff --git a/vendor/github.com/urfave/cli/v2/template.go b/vendor/github.com/urfave/cli/v2/template.go new file mode 100644 index 000000000..31c03f81c --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/template.go @@ -0,0 +1,120 @@ +package cli + +// AppHelpTemplate is the text template for the Default help topic. +// cli.go uses text/template to render templates. You can +// render custom help text by setting this variable. +var AppHelpTemplate = `NAME: + {{.Name}}{{if .Usage}} - {{.Usage}}{{end}} + +USAGE: + {{if .UsageText}}{{.UsageText}}{{else}}{{.HelpName}} {{if .VisibleFlags}}[global options]{{end}}{{if .Commands}} command [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}}{{if .Version}}{{if not .HideVersion}} + +VERSION: + {{.Version}}{{end}}{{end}}{{if .Description}} + +DESCRIPTION: + {{.Description | nindent 3 | trim}}{{end}}{{if len .Authors}} + +AUTHOR{{with $length := len .Authors}}{{if ne 1 $length}}S{{end}}{{end}}: + {{range $index, $author := .Authors}}{{if $index}} + {{end}}{{$author}}{{end}}{{end}}{{if .VisibleCommands}} + +COMMANDS:{{range .VisibleCategories}}{{if .Name}} + {{.Name}}:{{range .VisibleCommands}} + {{join .Names ", "}}{{"\t"}}{{.Usage}}{{end}}{{else}}{{range .VisibleCommands}} + {{join .Names ", "}}{{"\t"}}{{.Usage}}{{end}}{{end}}{{end}}{{end}}{{if .VisibleFlags}} + +GLOBAL OPTIONS: + {{range $index, $option := .VisibleFlags}}{{if $index}} + {{end}}{{$option}}{{end}}{{end}}{{if .Copyright}} + +COPYRIGHT: + {{.Copyright}}{{end}} +` + +// CommandHelpTemplate is the text template for the command help topic. +// cli.go uses text/template to render templates. You can +// render custom help text by setting this variable. +var CommandHelpTemplate = `NAME: + {{.HelpName}} - {{.Usage}} + +USAGE: + {{if .UsageText}}{{.UsageText}}{{else}}{{.HelpName}}{{if .VisibleFlags}} [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}}{{if .Category}} + +CATEGORY: + {{.Category}}{{end}}{{if .Description}} + +DESCRIPTION: + {{.Description | nindent 3 | trim}}{{end}}{{if .VisibleFlags}} + +OPTIONS: + {{range .VisibleFlags}}{{.}} + {{end}}{{end}} +` + +// SubcommandHelpTemplate is the text template for the subcommand help topic. +// cli.go uses text/template to render templates. You can +// render custom help text by setting this variable. +var SubcommandHelpTemplate = `NAME: + {{.HelpName}} - {{.Usage}} + +USAGE: + {{if .UsageText}}{{.UsageText}}{{else}}{{.HelpName}} command{{if .VisibleFlags}} [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}}{{if .Description}} + +DESCRIPTION: + {{.Description | nindent 3 | trim}}{{end}} + +COMMANDS:{{range .VisibleCategories}}{{if .Name}} + {{.Name}}:{{range .VisibleCommands}} + {{join .Names ", "}}{{"\t"}}{{.Usage}}{{end}}{{else}}{{range .VisibleCommands}} + {{join .Names ", "}}{{"\t"}}{{.Usage}}{{end}}{{end}}{{end}}{{if .VisibleFlags}} + +OPTIONS: + {{range .VisibleFlags}}{{.}} + {{end}}{{end}} +` + +var MarkdownDocTemplate = `% {{ .App.Name }} 8 + +# NAME + +{{ .App.Name }}{{ if .App.Usage }} - {{ .App.Usage }}{{ end }} + +# SYNOPSIS + +{{ .App.Name }} +{{ if .SynopsisArgs }} +` + "```" + ` +{{ range $v := .SynopsisArgs }}{{ $v }}{{ end }}` + "```" + ` +{{ end }}{{ if .App.UsageText }} +# DESCRIPTION + +{{ .App.UsageText }} +{{ end }} +**Usage**: + +` + "```" + ` +{{ .App.Name }} [GLOBAL OPTIONS] command [COMMAND OPTIONS] [ARGUMENTS...] +` + "```" + ` +{{ if .GlobalArgs }} +# GLOBAL OPTIONS +{{ range $v := .GlobalArgs }} +{{ $v }}{{ end }} +{{ end }}{{ if .Commands }} +# COMMANDS +{{ range $v := .Commands }} +{{ $v }}{{ end }}{{ end }}` + +var FishCompletionTemplate = `# {{ .App.Name }} fish shell completion + +function __fish_{{ .App.Name }}_no_subcommand --description 'Test if there has been any subcommand yet' + for i in (commandline -opc) + if contains -- $i{{ range $v := .AllCommands }} {{ $v }}{{ end }} + return 1 + end + end + return 0 +end + +{{ range $v := .Completions }}{{ $v }} +{{ end }}` diff --git a/vendor/go.opencensus.io/.travis.yml b/vendor/go.opencensus.io/.travis.yml deleted file mode 100644 index bd6b66ee8..000000000 --- a/vendor/go.opencensus.io/.travis.yml +++ /dev/null @@ -1,17 +0,0 @@ -language: go - -go_import_path: go.opencensus.io - -go: - - 1.11.x - -env: - global: - GO111MODULE=on - -before_script: - - make install-tools - -script: - - make travis-ci - - go run internal/check/version.go # TODO move this to makefile diff --git a/vendor/go.opencensus.io/go.mod b/vendor/go.opencensus.io/go.mod index 297071562..95b2522a7 100644 --- a/vendor/go.opencensus.io/go.mod +++ b/vendor/go.opencensus.io/go.mod @@ -1,15 +1,12 @@ module go.opencensus.io require ( - github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 - github.com/golang/protobuf v1.3.1 - github.com/google/go-cmp v0.3.0 - github.com/stretchr/testify v1.4.0 - golang.org/x/net v0.0.0-20190620200207-3b0461eec859 - golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd // indirect - golang.org/x/text v0.3.3 // indirect - google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb // indirect - google.golang.org/grpc v1.20.1 + github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e + github.com/golang/protobuf v1.4.3 + github.com/google/go-cmp v0.5.3 + github.com/stretchr/testify v1.6.1 + golang.org/x/net v0.0.0-20201110031124-69a78807bb2b + google.golang.org/grpc v1.33.2 ) go 1.13 diff --git a/vendor/go.opencensus.io/go.sum b/vendor/go.opencensus.io/go.sum index 01c02972c..c97cd1b55 100644 --- a/vendor/go.opencensus.io/go.sum +++ b/vendor/go.opencensus.io/go.sum @@ -1,25 +1,47 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 h1:ZgQEtGgCBiWRM39fZuwSd1LwSqqSW0hOdXCYYDX0R3I= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3 h1:x95R7cp+rSeeqAMI2knLtQ0DKlaBhv2NrtrOvafPHRo= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= @@ -30,24 +52,25 @@ golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b h1:uwuIcX0g4Yl1NC5XAz37xsr2lTtcqevgzYNVt49waME= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f h1:Bl/8QSvNqXvPGPGXa2z5xUTmV7VDcZyvRZ+QQXkXTZQ= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6 h1:bjcUS9ztw9kFmmIxJInhon/0Is3p+EHBKNgquIzo1OI= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd h1:r7DufRZuZbWB7j439YfAzP8RPDa9unLkpwQKUYbIMPI= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f h1:+Nyd8tzPX9R7BWHguqsrbFdRx3WQ/1ib8I44HXV5yTA= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -55,20 +78,39 @@ golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3 golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd h1:/e+gpKk9r3dJobndpTytxS2gOy6m5uvpg+ISQoEcusQ= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb h1:i1Ppqkc3WQXikh8bXiwHqAN5Rv3/qDCcRk0/Otx73BY= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/grpc v1.19.0 h1:cfg4PD8YEdSFnm7qLV4++93WcmhH2nIUhMjhdCvl3j8= google.golang.org/grpc v1.19.0 h1:cfg4PD8YEdSFnm7qLV4++93WcmhH2nIUhMjhdCvl3j8= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1 h1:Hz2g2wirWK7H0qIIhGIqRGTuMwTE8HEKFnDZZ7lm9NU= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.33.2 h1:EQyQC3sa8M+p6Ulc8yy9SWSS2GVwyRc83gAbG8lrl4o= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/go.opencensus.io/trace/basetypes.go b/vendor/go.opencensus.io/trace/basetypes.go index 0c54492a2..c8e26ed63 100644 --- a/vendor/go.opencensus.io/trace/basetypes.go +++ b/vendor/go.opencensus.io/trace/basetypes.go @@ -49,6 +49,16 @@ type Attribute struct { value interface{} } +// Key returns the attribute's key +func (a *Attribute) Key() string { + return a.key +} + +// Value returns the attribute's value +func (a *Attribute) Value() interface{} { + return a.value +} + // BoolAttribute returns a bool-valued attribute. func BoolAttribute(key string, value bool) Attribute { return Attribute{key: key, value: value} diff --git a/vendor/go.opencensus.io/trace/spanstore.go b/vendor/go.opencensus.io/trace/spanstore.go index c442d9902..e601f76f2 100644 --- a/vendor/go.opencensus.io/trace/spanstore.go +++ b/vendor/go.opencensus.io/trace/spanstore.go @@ -48,8 +48,10 @@ func (i internalOnly) ReportActiveSpans(name string) []*SpanData { var out []*SpanData s.mu.Lock() defer s.mu.Unlock() - for span := range s.active { - out = append(out, span.makeSpanData()) + for activeSpan := range s.active { + if s, ok := activeSpan.(*span); ok { + out = append(out, s.makeSpanData()) + } } return out } @@ -185,7 +187,7 @@ func (i internalOnly) ReportSpansByLatency(name string, minLatency, maxLatency t // bucketed by latency. type spanStore struct { mu sync.Mutex // protects everything below. - active map[*Span]struct{} + active map[SpanInterface]struct{} errors map[int32]*bucket latency []bucket maxSpansPerErrorBucket int @@ -194,7 +196,7 @@ type spanStore struct { // newSpanStore creates a span store. func newSpanStore(name string, latencyBucketSize int, errorBucketSize int) *spanStore { s := &spanStore{ - active: make(map[*Span]struct{}), + active: make(map[SpanInterface]struct{}), latency: make([]bucket, len(defaultLatencies)+1), maxSpansPerErrorBucket: errorBucketSize, } @@ -271,7 +273,7 @@ func (s *spanStore) resize(latencyBucketSize int, errorBucketSize int) { } // add adds a span to the active bucket of the spanStore. -func (s *spanStore) add(span *Span) { +func (s *spanStore) add(span SpanInterface) { s.mu.Lock() s.active[span] = struct{}{} s.mu.Unlock() @@ -279,7 +281,7 @@ func (s *spanStore) add(span *Span) { // finished removes a span from the active set, and adds a corresponding // SpanData to a latency or error bucket. -func (s *spanStore) finished(span *Span, sd *SpanData) { +func (s *spanStore) finished(span SpanInterface, sd *SpanData) { latency := sd.EndTime.Sub(sd.StartTime) if latency < 0 { latency = 0 diff --git a/vendor/go.opencensus.io/trace/trace.go b/vendor/go.opencensus.io/trace/trace.go index daf895596..861df9d39 100644 --- a/vendor/go.opencensus.io/trace/trace.go +++ b/vendor/go.opencensus.io/trace/trace.go @@ -28,12 +28,16 @@ import ( "go.opencensus.io/trace/tracestate" ) +type tracer struct{} + +var _ Tracer = &tracer{} + // Span represents a span of a trace. It has an associated SpanContext, and // stores data accumulated while the span is active. // // Ideally users should interact with Spans by calling the functions in this // package that take a Context parameter. -type Span struct { +type span struct { // data contains information recorded about the span. // // It will be non-nil if we are exporting the span or recording events for it. @@ -66,7 +70,7 @@ type Span struct { // IsRecordingEvents returns true if events are being recorded for this span. // Use this check to avoid computing expensive annotations when they will never // be used. -func (s *Span) IsRecordingEvents() bool { +func (s *span) IsRecordingEvents() bool { if s == nil { return false } @@ -109,13 +113,13 @@ type SpanContext struct { type contextKey struct{} // FromContext returns the Span stored in a context, or nil if there isn't one. -func FromContext(ctx context.Context) *Span { +func (t *tracer) FromContext(ctx context.Context) *Span { s, _ := ctx.Value(contextKey{}).(*Span) return s } // NewContext returns a new context with the given Span attached. -func NewContext(parent context.Context, s *Span) context.Context { +func (t *tracer) NewContext(parent context.Context, s *Span) context.Context { return context.WithValue(parent, contextKey{}, s) } @@ -166,12 +170,14 @@ func WithSampler(sampler Sampler) StartOption { // // Returned context contains the newly created span. You can use it to // propagate the returned span in process. -func StartSpan(ctx context.Context, name string, o ...StartOption) (context.Context, *Span) { +func (t *tracer) StartSpan(ctx context.Context, name string, o ...StartOption) (context.Context, *Span) { var opts StartOptions var parent SpanContext - if p := FromContext(ctx); p != nil { - p.addChild() - parent = p.spanContext + if p := t.FromContext(ctx); p != nil { + if ps, ok := p.internal.(*span); ok { + ps.addChild() + } + parent = p.SpanContext() } for _, op := range o { op(&opts) @@ -180,7 +186,8 @@ func StartSpan(ctx context.Context, name string, o ...StartOption) (context.Cont ctx, end := startExecutionTracerTask(ctx, name) span.executionTracerTaskEnd = end - return NewContext(ctx, span), span + extSpan := NewSpan(span) + return t.NewContext(ctx, extSpan), extSpan } // StartSpanWithRemoteParent starts a new child span of the span from the given parent. @@ -190,7 +197,7 @@ func StartSpan(ctx context.Context, name string, o ...StartOption) (context.Cont // // Returned context contains the newly created span. You can use it to // propagate the returned span in process. -func StartSpanWithRemoteParent(ctx context.Context, name string, parent SpanContext, o ...StartOption) (context.Context, *Span) { +func (t *tracer) StartSpanWithRemoteParent(ctx context.Context, name string, parent SpanContext, o ...StartOption) (context.Context, *Span) { var opts StartOptions for _, op := range o { op(&opts) @@ -198,12 +205,13 @@ func StartSpanWithRemoteParent(ctx context.Context, name string, parent SpanCont span := startSpanInternal(name, parent != SpanContext{}, parent, true, opts) ctx, end := startExecutionTracerTask(ctx, name) span.executionTracerTaskEnd = end - return NewContext(ctx, span), span + extSpan := NewSpan(span) + return t.NewContext(ctx, extSpan), extSpan } -func startSpanInternal(name string, hasParent bool, parent SpanContext, remoteParent bool, o StartOptions) *Span { - span := &Span{} - span.spanContext = parent +func startSpanInternal(name string, hasParent bool, parent SpanContext, remoteParent bool, o StartOptions) *span { + s := &span{} + s.spanContext = parent cfg := config.Load().(*Config) if gen, ok := cfg.IDGenerator.(*defaultIDGenerator); ok { @@ -212,9 +220,9 @@ func startSpanInternal(name string, hasParent bool, parent SpanContext, remotePa } if !hasParent { - span.spanContext.TraceID = cfg.IDGenerator.NewTraceID() + s.spanContext.TraceID = cfg.IDGenerator.NewTraceID() } - span.spanContext.SpanID = cfg.IDGenerator.NewSpanID() + s.spanContext.SpanID = cfg.IDGenerator.NewSpanID() sampler := cfg.DefaultSampler if !hasParent || remoteParent || o.Sampler != nil { @@ -226,47 +234,47 @@ func startSpanInternal(name string, hasParent bool, parent SpanContext, remotePa if o.Sampler != nil { sampler = o.Sampler } - span.spanContext.setIsSampled(sampler(SamplingParameters{ + s.spanContext.setIsSampled(sampler(SamplingParameters{ ParentContext: parent, - TraceID: span.spanContext.TraceID, - SpanID: span.spanContext.SpanID, + TraceID: s.spanContext.TraceID, + SpanID: s.spanContext.SpanID, Name: name, HasRemoteParent: remoteParent}).Sample) } - if !internal.LocalSpanStoreEnabled && !span.spanContext.IsSampled() { - return span + if !internal.LocalSpanStoreEnabled && !s.spanContext.IsSampled() { + return s } - span.data = &SpanData{ - SpanContext: span.spanContext, + s.data = &SpanData{ + SpanContext: s.spanContext, StartTime: time.Now(), SpanKind: o.SpanKind, Name: name, HasRemoteParent: remoteParent, } - span.lruAttributes = newLruMap(cfg.MaxAttributesPerSpan) - span.annotations = newEvictedQueue(cfg.MaxAnnotationEventsPerSpan) - span.messageEvents = newEvictedQueue(cfg.MaxMessageEventsPerSpan) - span.links = newEvictedQueue(cfg.MaxLinksPerSpan) + s.lruAttributes = newLruMap(cfg.MaxAttributesPerSpan) + s.annotations = newEvictedQueue(cfg.MaxAnnotationEventsPerSpan) + s.messageEvents = newEvictedQueue(cfg.MaxMessageEventsPerSpan) + s.links = newEvictedQueue(cfg.MaxLinksPerSpan) if hasParent { - span.data.ParentSpanID = parent.SpanID + s.data.ParentSpanID = parent.SpanID } if internal.LocalSpanStoreEnabled { var ss *spanStore ss = spanStoreForNameCreateIfNew(name) if ss != nil { - span.spanStore = ss - ss.add(span) + s.spanStore = ss + ss.add(s) } } - return span + return s } // End ends the span. -func (s *Span) End() { +func (s *span) End() { if s == nil { return } @@ -296,7 +304,7 @@ func (s *Span) End() { // makeSpanData produces a SpanData representing the current state of the Span. // It requires that s.data is non-nil. -func (s *Span) makeSpanData() *SpanData { +func (s *span) makeSpanData() *SpanData { var sd SpanData s.mu.Lock() sd = *s.data @@ -321,7 +329,7 @@ func (s *Span) makeSpanData() *SpanData { } // SpanContext returns the SpanContext of the span. -func (s *Span) SpanContext() SpanContext { +func (s *span) SpanContext() SpanContext { if s == nil { return SpanContext{} } @@ -329,7 +337,7 @@ func (s *Span) SpanContext() SpanContext { } // SetName sets the name of the span, if it is recording events. -func (s *Span) SetName(name string) { +func (s *span) SetName(name string) { if !s.IsRecordingEvents() { return } @@ -339,7 +347,7 @@ func (s *Span) SetName(name string) { } // SetStatus sets the status of the span, if it is recording events. -func (s *Span) SetStatus(status Status) { +func (s *span) SetStatus(status Status) { if !s.IsRecordingEvents() { return } @@ -348,7 +356,7 @@ func (s *Span) SetStatus(status Status) { s.mu.Unlock() } -func (s *Span) interfaceArrayToLinksArray() []Link { +func (s *span) interfaceArrayToLinksArray() []Link { linksArr := make([]Link, 0, len(s.links.queue)) for _, value := range s.links.queue { linksArr = append(linksArr, value.(Link)) @@ -356,7 +364,7 @@ func (s *Span) interfaceArrayToLinksArray() []Link { return linksArr } -func (s *Span) interfaceArrayToMessageEventArray() []MessageEvent { +func (s *span) interfaceArrayToMessageEventArray() []MessageEvent { messageEventArr := make([]MessageEvent, 0, len(s.messageEvents.queue)) for _, value := range s.messageEvents.queue { messageEventArr = append(messageEventArr, value.(MessageEvent)) @@ -364,7 +372,7 @@ func (s *Span) interfaceArrayToMessageEventArray() []MessageEvent { return messageEventArr } -func (s *Span) interfaceArrayToAnnotationArray() []Annotation { +func (s *span) interfaceArrayToAnnotationArray() []Annotation { annotationArr := make([]Annotation, 0, len(s.annotations.queue)) for _, value := range s.annotations.queue { annotationArr = append(annotationArr, value.(Annotation)) @@ -372,7 +380,7 @@ func (s *Span) interfaceArrayToAnnotationArray() []Annotation { return annotationArr } -func (s *Span) lruAttributesToAttributeMap() map[string]interface{} { +func (s *span) lruAttributesToAttributeMap() map[string]interface{} { attributes := make(map[string]interface{}, s.lruAttributes.len()) for _, key := range s.lruAttributes.keys() { value, ok := s.lruAttributes.get(key) @@ -384,13 +392,13 @@ func (s *Span) lruAttributesToAttributeMap() map[string]interface{} { return attributes } -func (s *Span) copyToCappedAttributes(attributes []Attribute) { +func (s *span) copyToCappedAttributes(attributes []Attribute) { for _, a := range attributes { s.lruAttributes.add(a.key, a.value) } } -func (s *Span) addChild() { +func (s *span) addChild() { if !s.IsRecordingEvents() { return } @@ -402,7 +410,7 @@ func (s *Span) addChild() { // AddAttributes sets attributes in the span. // // Existing attributes whose keys appear in the attributes parameter are overwritten. -func (s *Span) AddAttributes(attributes ...Attribute) { +func (s *span) AddAttributes(attributes ...Attribute) { if !s.IsRecordingEvents() { return } @@ -411,49 +419,27 @@ func (s *Span) AddAttributes(attributes ...Attribute) { s.mu.Unlock() } -// copyAttributes copies a slice of Attributes into a map. -func copyAttributes(m map[string]interface{}, attributes []Attribute) { - for _, a := range attributes { - m[a.key] = a.value - } -} - -func (s *Span) lazyPrintfInternal(attributes []Attribute, format string, a ...interface{}) { +func (s *span) printStringInternal(attributes []Attribute, str string) { now := time.Now() - msg := fmt.Sprintf(format, a...) - var m map[string]interface{} - s.mu.Lock() + var am map[string]interface{} if len(attributes) != 0 { - m = make(map[string]interface{}, len(attributes)) - copyAttributes(m, attributes) + am = make(map[string]interface{}, len(attributes)) + for _, attr := range attributes { + am[attr.key] = attr.value + } } - s.annotations.add(Annotation{ - Time: now, - Message: msg, - Attributes: m, - }) - s.mu.Unlock() -} - -func (s *Span) printStringInternal(attributes []Attribute, str string) { - now := time.Now() - var a map[string]interface{} s.mu.Lock() - if len(attributes) != 0 { - a = make(map[string]interface{}, len(attributes)) - copyAttributes(a, attributes) - } s.annotations.add(Annotation{ Time: now, Message: str, - Attributes: a, + Attributes: am, }) s.mu.Unlock() } // Annotate adds an annotation with attributes. // Attributes can be nil. -func (s *Span) Annotate(attributes []Attribute, str string) { +func (s *span) Annotate(attributes []Attribute, str string) { if !s.IsRecordingEvents() { return } @@ -461,11 +447,11 @@ func (s *Span) Annotate(attributes []Attribute, str string) { } // Annotatef adds an annotation with attributes. -func (s *Span) Annotatef(attributes []Attribute, format string, a ...interface{}) { +func (s *span) Annotatef(attributes []Attribute, format string, a ...interface{}) { if !s.IsRecordingEvents() { return } - s.lazyPrintfInternal(attributes, format, a...) + s.printStringInternal(attributes, fmt.Sprintf(format, a...)) } // AddMessageSendEvent adds a message send event to the span. @@ -474,7 +460,7 @@ func (s *Span) Annotatef(attributes []Attribute, format string, a ...interface{} // unique in this span and the same between the send event and the receive // event (this allows to identify a message between the sender and receiver). // For example, this could be a sequence id. -func (s *Span) AddMessageSendEvent(messageID, uncompressedByteSize, compressedByteSize int64) { +func (s *span) AddMessageSendEvent(messageID, uncompressedByteSize, compressedByteSize int64) { if !s.IsRecordingEvents() { return } @@ -496,7 +482,7 @@ func (s *Span) AddMessageSendEvent(messageID, uncompressedByteSize, compressedBy // unique in this span and the same between the send event and the receive // event (this allows to identify a message between the sender and receiver). // For example, this could be a sequence id. -func (s *Span) AddMessageReceiveEvent(messageID, uncompressedByteSize, compressedByteSize int64) { +func (s *span) AddMessageReceiveEvent(messageID, uncompressedByteSize, compressedByteSize int64) { if !s.IsRecordingEvents() { return } @@ -513,7 +499,7 @@ func (s *Span) AddMessageReceiveEvent(messageID, uncompressedByteSize, compresse } // AddLink adds a link to the span. -func (s *Span) AddLink(l Link) { +func (s *span) AddLink(l Link) { if !s.IsRecordingEvents() { return } @@ -522,7 +508,7 @@ func (s *Span) AddLink(l Link) { s.mu.Unlock() } -func (s *Span) String() string { +func (s *span) String() string { if s == nil { return "" } diff --git a/vendor/go.opencensus.io/trace/trace_api.go b/vendor/go.opencensus.io/trace/trace_api.go new file mode 100644 index 000000000..9e2c3a999 --- /dev/null +++ b/vendor/go.opencensus.io/trace/trace_api.go @@ -0,0 +1,265 @@ +// Copyright 2020, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "context" +) + +// DefaultTracer is the tracer used when package-level exported functions are invoked. +var DefaultTracer Tracer = &tracer{} + +// Tracer can start spans and access context functions. +type Tracer interface { + + // StartSpan starts a new child span of the current span in the context. If + // there is no span in the context, creates a new trace and span. + // + // Returned context contains the newly created span. You can use it to + // propagate the returned span in process. + StartSpan(ctx context.Context, name string, o ...StartOption) (context.Context, *Span) + + // StartSpanWithRemoteParent starts a new child span of the span from the given parent. + // + // If the incoming context contains a parent, it ignores. StartSpanWithRemoteParent is + // preferred for cases where the parent is propagated via an incoming request. + // + // Returned context contains the newly created span. You can use it to + // propagate the returned span in process. + StartSpanWithRemoteParent(ctx context.Context, name string, parent SpanContext, o ...StartOption) (context.Context, *Span) + + // FromContext returns the Span stored in a context, or nil if there isn't one. + FromContext(ctx context.Context) *Span + + // NewContext returns a new context with the given Span attached. + NewContext(parent context.Context, s *Span) context.Context +} + +// StartSpan starts a new child span of the current span in the context. If +// there is no span in the context, creates a new trace and span. +// +// Returned context contains the newly created span. You can use it to +// propagate the returned span in process. +func StartSpan(ctx context.Context, name string, o ...StartOption) (context.Context, *Span) { + return DefaultTracer.StartSpan(ctx, name, o...) +} + +// StartSpanWithRemoteParent starts a new child span of the span from the given parent. +// +// If the incoming context contains a parent, it ignores. StartSpanWithRemoteParent is +// preferred for cases where the parent is propagated via an incoming request. +// +// Returned context contains the newly created span. You can use it to +// propagate the returned span in process. +func StartSpanWithRemoteParent(ctx context.Context, name string, parent SpanContext, o ...StartOption) (context.Context, *Span) { + return DefaultTracer.StartSpanWithRemoteParent(ctx, name, parent, o...) +} + +// FromContext returns the Span stored in a context, or a Span that is not +// recording events if there isn't one. +func FromContext(ctx context.Context) *Span { + return DefaultTracer.FromContext(ctx) +} + +// NewContext returns a new context with the given Span attached. +func NewContext(parent context.Context, s *Span) context.Context { + return DefaultTracer.NewContext(parent, s) +} + +// SpanInterface represents a span of a trace. It has an associated SpanContext, and +// stores data accumulated while the span is active. +// +// Ideally users should interact with Spans by calling the functions in this +// package that take a Context parameter. +type SpanInterface interface { + + // IsRecordingEvents returns true if events are being recorded for this span. + // Use this check to avoid computing expensive annotations when they will never + // be used. + IsRecordingEvents() bool + + // End ends the span. + End() + + // SpanContext returns the SpanContext of the span. + SpanContext() SpanContext + + // SetName sets the name of the span, if it is recording events. + SetName(name string) + + // SetStatus sets the status of the span, if it is recording events. + SetStatus(status Status) + + // AddAttributes sets attributes in the span. + // + // Existing attributes whose keys appear in the attributes parameter are overwritten. + AddAttributes(attributes ...Attribute) + + // Annotate adds an annotation with attributes. + // Attributes can be nil. + Annotate(attributes []Attribute, str string) + + // Annotatef adds an annotation with attributes. + Annotatef(attributes []Attribute, format string, a ...interface{}) + + // AddMessageSendEvent adds a message send event to the span. + // + // messageID is an identifier for the message, which is recommended to be + // unique in this span and the same between the send event and the receive + // event (this allows to identify a message between the sender and receiver). + // For example, this could be a sequence id. + AddMessageSendEvent(messageID, uncompressedByteSize, compressedByteSize int64) + + // AddMessageReceiveEvent adds a message receive event to the span. + // + // messageID is an identifier for the message, which is recommended to be + // unique in this span and the same between the send event and the receive + // event (this allows to identify a message between the sender and receiver). + // For example, this could be a sequence id. + AddMessageReceiveEvent(messageID, uncompressedByteSize, compressedByteSize int64) + + // AddLink adds a link to the span. + AddLink(l Link) + + // String prints a string representation of a span. + String() string +} + +// NewSpan is a convenience function for creating a *Span out of a *span +func NewSpan(s SpanInterface) *Span { + return &Span{internal: s} +} + +// Span is a struct wrapper around the SpanInt interface, which allows correctly handling +// nil spans, while also allowing the SpanInterface implementation to be swapped out. +type Span struct { + internal SpanInterface +} + +// Internal returns the underlying implementation of the Span +func (s *Span) Internal() SpanInterface { + return s.internal +} + +// IsRecordingEvents returns true if events are being recorded for this span. +// Use this check to avoid computing expensive annotations when they will never +// be used. +func (s *Span) IsRecordingEvents() bool { + if s == nil { + return false + } + return s.internal.IsRecordingEvents() +} + +// End ends the span. +func (s *Span) End() { + if s == nil { + return + } + s.internal.End() +} + +// SpanContext returns the SpanContext of the span. +func (s *Span) SpanContext() SpanContext { + if s == nil { + return SpanContext{} + } + return s.internal.SpanContext() +} + +// SetName sets the name of the span, if it is recording events. +func (s *Span) SetName(name string) { + if !s.IsRecordingEvents() { + return + } + s.internal.SetName(name) +} + +// SetStatus sets the status of the span, if it is recording events. +func (s *Span) SetStatus(status Status) { + if !s.IsRecordingEvents() { + return + } + s.internal.SetStatus(status) +} + +// AddAttributes sets attributes in the span. +// +// Existing attributes whose keys appear in the attributes parameter are overwritten. +func (s *Span) AddAttributes(attributes ...Attribute) { + if !s.IsRecordingEvents() { + return + } + s.internal.AddAttributes(attributes...) +} + +// Annotate adds an annotation with attributes. +// Attributes can be nil. +func (s *Span) Annotate(attributes []Attribute, str string) { + if !s.IsRecordingEvents() { + return + } + s.internal.Annotate(attributes, str) +} + +// Annotatef adds an annotation with attributes. +func (s *Span) Annotatef(attributes []Attribute, format string, a ...interface{}) { + if !s.IsRecordingEvents() { + return + } + s.internal.Annotatef(attributes, format, a...) +} + +// AddMessageSendEvent adds a message send event to the span. +// +// messageID is an identifier for the message, which is recommended to be +// unique in this span and the same between the send event and the receive +// event (this allows to identify a message between the sender and receiver). +// For example, this could be a sequence id. +func (s *Span) AddMessageSendEvent(messageID, uncompressedByteSize, compressedByteSize int64) { + if !s.IsRecordingEvents() { + return + } + s.internal.AddMessageSendEvent(messageID, uncompressedByteSize, compressedByteSize) +} + +// AddMessageReceiveEvent adds a message receive event to the span. +// +// messageID is an identifier for the message, which is recommended to be +// unique in this span and the same between the send event and the receive +// event (this allows to identify a message between the sender and receiver). +// For example, this could be a sequence id. +func (s *Span) AddMessageReceiveEvent(messageID, uncompressedByteSize, compressedByteSize int64) { + if !s.IsRecordingEvents() { + return + } + s.internal.AddMessageReceiveEvent(messageID, uncompressedByteSize, compressedByteSize) +} + +// AddLink adds a link to the span. +func (s *Span) AddLink(l Link) { + if !s.IsRecordingEvents() { + return + } + s.internal.AddLink(l) +} + +// String prints a string representation of a span. +func (s *Span) String() string { + if s == nil { + return "" + } + return s.internal.String() +} diff --git a/vendor/golang.org/x/oauth2/google/google.go b/vendor/golang.org/x/oauth2/google/google.go index e24749199..2c8f1bd5a 100644 --- a/vendor/golang.org/x/oauth2/google/google.go +++ b/vendor/golang.org/x/oauth2/google/google.go @@ -115,14 +115,13 @@ type credentialsFile struct { RefreshToken string `json:"refresh_token"` // External Account fields - Audience string `json:"audience"` - SubjectTokenType string `json:"subject_token_type"` - TokenURLExternal string `json:"token_url"` - TokenInfoURL string `json:"token_info_url"` - ServiceAccountImpersonationURL string `json:"service_account_impersonation_url"` - CredentialSource externalaccount.CredentialSource `json:"credential_source"` - QuotaProjectID string `json:"quota_project_id"` - + Audience string `json:"audience"` + SubjectTokenType string `json:"subject_token_type"` + TokenURLExternal string `json:"token_url"` + TokenInfoURL string `json:"token_info_url"` + ServiceAccountImpersonationURL string `json:"service_account_impersonation_url"` + CredentialSource externalaccount.CredentialSource `json:"credential_source"` + QuotaProjectID string `json:"quota_project_id"` } func (f *credentialsFile) jwtConfig(scopes []string) *jwt.Config { @@ -155,16 +154,16 @@ func (f *credentialsFile) tokenSource(ctx context.Context, scopes []string) (oau return cfg.TokenSource(ctx, tok), nil case externalAccountKey: cfg := &externalaccount.Config{ - Audience: f.Audience, - SubjectTokenType: f.SubjectTokenType, - TokenURL: f.TokenURLExternal, - TokenInfoURL: f.TokenInfoURL, + Audience: f.Audience, + SubjectTokenType: f.SubjectTokenType, + TokenURL: f.TokenURLExternal, + TokenInfoURL: f.TokenInfoURL, ServiceAccountImpersonationURL: f.ServiceAccountImpersonationURL, - ClientSecret: f.ClientSecret, - ClientID: f.ClientID, - CredentialSource: f.CredentialSource, - QuotaProjectID: f.QuotaProjectID, - Scopes: scopes, + ClientSecret: f.ClientSecret, + ClientID: f.ClientID, + CredentialSource: f.CredentialSource, + QuotaProjectID: f.QuotaProjectID, + Scopes: scopes, } return cfg.TokenSource(ctx), nil case "": diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/aws.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/aws.go new file mode 100644 index 000000000..3725a0fcb --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/internal/externalaccount/aws.go @@ -0,0 +1,464 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package externalaccount + +import ( + "context" + "crypto/hmac" + "crypto/sha256" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "golang.org/x/oauth2" + "io" + "io/ioutil" + "net/http" + "os" + "path" + "sort" + "strings" + "time" +) + +type awsSecurityCredentials struct { + AccessKeyID string `json:"AccessKeyID"` + SecretAccessKey string `json:"SecretAccessKey"` + SecurityToken string `json:"Token"` +} + +// awsRequestSigner is a utility class to sign http requests using a AWS V4 signature. +type awsRequestSigner struct { + RegionName string + AwsSecurityCredentials awsSecurityCredentials +} + +// getenv aliases os.Getenv for testing +var getenv = os.Getenv + +const ( + // AWS Signature Version 4 signing algorithm identifier. + awsAlgorithm = "AWS4-HMAC-SHA256" + + // The termination string for the AWS credential scope value as defined in + // https://docs.aws.amazon.com/general/latest/gr/sigv4-create-string-to-sign.html + awsRequestType = "aws4_request" + + // The AWS authorization header name for the security session token if available. + awsSecurityTokenHeader = "x-amz-security-token" + + // The AWS authorization header name for the auto-generated date. + awsDateHeader = "x-amz-date" + + awsTimeFormatLong = "20060102T150405Z" + awsTimeFormatShort = "20060102" +) + +func getSha256(input []byte) (string, error) { + hash := sha256.New() + if _, err := hash.Write(input); err != nil { + return "", err + } + return hex.EncodeToString(hash.Sum(nil)), nil +} + +func getHmacSha256(key, input []byte) ([]byte, error) { + hash := hmac.New(sha256.New, key) + if _, err := hash.Write(input); err != nil { + return nil, err + } + return hash.Sum(nil), nil +} + +func cloneRequest(r *http.Request) *http.Request { + r2 := new(http.Request) + *r2 = *r + if r.Header != nil { + r2.Header = make(http.Header, len(r.Header)) + + // Find total number of values. + headerCount := 0 + for _, headerValues := range r.Header { + headerCount += len(headerValues) + } + copiedHeaders := make([]string, headerCount) // shared backing array for headers' values + + for headerKey, headerValues := range r.Header { + headerCount = copy(copiedHeaders, headerValues) + r2.Header[headerKey] = copiedHeaders[:headerCount:headerCount] + copiedHeaders = copiedHeaders[headerCount:] + } + } + return r2 +} + +func canonicalPath(req *http.Request) string { + result := req.URL.EscapedPath() + if result == "" { + return "/" + } + return path.Clean(result) +} + +func canonicalQuery(req *http.Request) string { + queryValues := req.URL.Query() + for queryKey := range queryValues { + sort.Strings(queryValues[queryKey]) + } + return queryValues.Encode() +} + +func canonicalHeaders(req *http.Request) (string, string) { + // Header keys need to be sorted alphabetically. + var headers []string + lowerCaseHeaders := make(http.Header) + for k, v := range req.Header { + k := strings.ToLower(k) + if _, ok := lowerCaseHeaders[k]; ok { + // include additional values + lowerCaseHeaders[k] = append(lowerCaseHeaders[k], v...) + } else { + headers = append(headers, k) + lowerCaseHeaders[k] = v + } + } + sort.Strings(headers) + + var fullHeaders strings.Builder + for _, header := range headers { + headerValue := strings.Join(lowerCaseHeaders[header], ",") + fullHeaders.WriteString(header) + fullHeaders.WriteRune(':') + fullHeaders.WriteString(headerValue) + fullHeaders.WriteRune('\n') + } + + return strings.Join(headers, ";"), fullHeaders.String() +} + +func requestDataHash(req *http.Request) (string, error) { + var requestData []byte + if req.Body != nil { + requestBody, err := req.GetBody() + if err != nil { + return "", err + } + defer requestBody.Close() + + requestData, err = ioutil.ReadAll(io.LimitReader(requestBody, 1<<20)) + if err != nil { + return "", err + } + } + + return getSha256(requestData) +} + +func requestHost(req *http.Request) string { + if req.Host != "" { + return req.Host + } + return req.URL.Host +} + +func canonicalRequest(req *http.Request, canonicalHeaderColumns, canonicalHeaderData string) (string, error) { + dataHash, err := requestDataHash(req) + if err != nil { + return "", err + } + + return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s", req.Method, canonicalPath(req), canonicalQuery(req), canonicalHeaderData, canonicalHeaderColumns, dataHash), nil +} + +// SignRequest adds the appropriate headers to an http.Request +// or returns an error if something prevented this. +func (rs *awsRequestSigner) SignRequest(req *http.Request) error { + signedRequest := cloneRequest(req) + timestamp := now() + + signedRequest.Header.Add("host", requestHost(req)) + + if rs.AwsSecurityCredentials.SecurityToken != "" { + signedRequest.Header.Add(awsSecurityTokenHeader, rs.AwsSecurityCredentials.SecurityToken) + } + + if signedRequest.Header.Get("date") == "" { + signedRequest.Header.Add(awsDateHeader, timestamp.Format(awsTimeFormatLong)) + } + + authorizationCode, err := rs.generateAuthentication(signedRequest, timestamp) + if err != nil { + return err + } + signedRequest.Header.Set("Authorization", authorizationCode) + + req.Header = signedRequest.Header + return nil +} + +func (rs *awsRequestSigner) generateAuthentication(req *http.Request, timestamp time.Time) (string, error) { + canonicalHeaderColumns, canonicalHeaderData := canonicalHeaders(req) + + dateStamp := timestamp.Format(awsTimeFormatShort) + serviceName := "" + if splitHost := strings.Split(requestHost(req), "."); len(splitHost) > 0 { + serviceName = splitHost[0] + } + + credentialScope := fmt.Sprintf("%s/%s/%s/%s", dateStamp, rs.RegionName, serviceName, awsRequestType) + + requestString, err := canonicalRequest(req, canonicalHeaderColumns, canonicalHeaderData) + if err != nil { + return "", err + } + requestHash, err := getSha256([]byte(requestString)) + if err != nil { + return "", err + } + + stringToSign := fmt.Sprintf("%s\n%s\n%s\n%s", awsAlgorithm, timestamp.Format(awsTimeFormatLong), credentialScope, requestHash) + + signingKey := []byte("AWS4" + rs.AwsSecurityCredentials.SecretAccessKey) + for _, signingInput := range []string{ + dateStamp, rs.RegionName, serviceName, awsRequestType, stringToSign, + } { + signingKey, err = getHmacSha256(signingKey, []byte(signingInput)) + if err != nil { + return "", err + } + } + + return fmt.Sprintf("%s Credential=%s/%s, SignedHeaders=%s, Signature=%s", awsAlgorithm, rs.AwsSecurityCredentials.AccessKeyID, credentialScope, canonicalHeaderColumns, hex.EncodeToString(signingKey)), nil +} + +type awsCredentialSource struct { + EnvironmentID string + RegionURL string + RegionalCredVerificationURL string + CredVerificationURL string + TargetResource string + requestSigner *awsRequestSigner + region string + ctx context.Context + client *http.Client +} + +type awsRequestHeader struct { + Key string `json:"key"` + Value string `json:"value"` +} + +type awsRequest struct { + URL string `json:"url"` + Method string `json:"method"` + Headers []awsRequestHeader `json:"headers"` +} + +func (cs awsCredentialSource) doRequest(req *http.Request) (*http.Response, error) { + if cs.client == nil { + cs.client = oauth2.NewClient(cs.ctx, nil) + } + return cs.client.Do(req.WithContext(cs.ctx)) +} + +func (cs awsCredentialSource) subjectToken() (string, error) { + if cs.requestSigner == nil { + awsSecurityCredentials, err := cs.getSecurityCredentials() + if err != nil { + return "", err + } + + if cs.region, err = cs.getRegion(); err != nil { + return "", err + } + + cs.requestSigner = &awsRequestSigner{ + RegionName: cs.region, + AwsSecurityCredentials: awsSecurityCredentials, + } + } + + // Generate the signed request to AWS STS GetCallerIdentity API. + // Use the required regional endpoint. Otherwise, the request will fail. + req, err := http.NewRequest("POST", strings.Replace(cs.RegionalCredVerificationURL, "{region}", cs.region, 1), nil) + if err != nil { + return "", err + } + // The full, canonical resource name of the workload identity pool + // provider, with or without the HTTPS prefix. + // Including this header as part of the signature is recommended to + // ensure data integrity. + if cs.TargetResource != "" { + req.Header.Add("x-goog-cloud-target-resource", cs.TargetResource) + } + cs.requestSigner.SignRequest(req) + + /* + The GCP STS endpoint expects the headers to be formatted as: + # [ + # {key: 'x-amz-date', value: '...'}, + # {key: 'Authorization', value: '...'}, + # ... + # ] + # And then serialized as: + # quote(json.dumps({ + # url: '...', + # method: 'POST', + # headers: [{key: 'x-amz-date', value: '...'}, ...] + # })) + */ + + awsSignedReq := awsRequest{ + URL: req.URL.String(), + Method: "POST", + } + for headerKey, headerList := range req.Header { + for _, headerValue := range headerList { + awsSignedReq.Headers = append(awsSignedReq.Headers, awsRequestHeader{ + Key: headerKey, + Value: headerValue, + }) + } + } + sort.Slice(awsSignedReq.Headers, func(i, j int) bool { + headerCompare := strings.Compare(awsSignedReq.Headers[i].Key, awsSignedReq.Headers[j].Key) + if headerCompare == 0 { + return strings.Compare(awsSignedReq.Headers[i].Value, awsSignedReq.Headers[j].Value) < 0 + } + return headerCompare < 0 + }) + + result, err := json.Marshal(awsSignedReq) + if err != nil { + return "", err + } + return string(result), nil +} + +func (cs *awsCredentialSource) getRegion() (string, error) { + if envAwsRegion := getenv("AWS_REGION"); envAwsRegion != "" { + return envAwsRegion, nil + } + + if cs.RegionURL == "" { + return "", errors.New("oauth2/google: unable to determine AWS region") + } + + req, err := http.NewRequest("GET", cs.RegionURL, nil) + if err != nil { + return "", err + } + + resp, err := cs.doRequest(req) + if err != nil { + return "", err + } + defer resp.Body.Close() + + respBody, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) + if err != nil { + return "", err + } + + if resp.StatusCode != 200 { + return "", fmt.Errorf("oauth2/google: unable to retrieve AWS region - %s", string(respBody)) + } + + // This endpoint will return the region in format: us-east-2b. + // Only the us-east-2 part should be used. + respBodyEnd := 0 + if len(respBody) > 1 { + respBodyEnd = len(respBody) - 1 + } + return string(respBody[:respBodyEnd]), nil +} + +func (cs *awsCredentialSource) getSecurityCredentials() (result awsSecurityCredentials, err error) { + if accessKeyID := getenv("AWS_ACCESS_KEY_ID"); accessKeyID != "" { + if secretAccessKey := getenv("AWS_SECRET_ACCESS_KEY"); secretAccessKey != "" { + return awsSecurityCredentials{ + AccessKeyID: accessKeyID, + SecretAccessKey: secretAccessKey, + SecurityToken: getenv("AWS_SESSION_TOKEN"), + }, nil + } + } + + roleName, err := cs.getMetadataRoleName() + if err != nil { + return + } + + credentials, err := cs.getMetadataSecurityCredentials(roleName) + if err != nil { + return + } + + if credentials.AccessKeyID == "" { + return result, errors.New("oauth2/google: missing AccessKeyId credential") + } + + if credentials.SecretAccessKey == "" { + return result, errors.New("oauth2/google: missing SecretAccessKey credential") + } + + return credentials, nil +} + +func (cs *awsCredentialSource) getMetadataSecurityCredentials(roleName string) (awsSecurityCredentials, error) { + var result awsSecurityCredentials + + req, err := http.NewRequest("GET", fmt.Sprintf("%s/%s", cs.CredVerificationURL, roleName), nil) + if err != nil { + return result, err + } + req.Header.Add("Content-Type", "application/json") + + resp, err := cs.doRequest(req) + if err != nil { + return result, err + } + defer resp.Body.Close() + + respBody, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) + if err != nil { + return result, err + } + + if resp.StatusCode != 200 { + return result, fmt.Errorf("oauth2/google: unable to retrieve AWS security credentials - %s", string(respBody)) + } + + err = json.Unmarshal(respBody, &result) + return result, err +} + +func (cs *awsCredentialSource) getMetadataRoleName() (string, error) { + if cs.CredVerificationURL == "" { + return "", errors.New("oauth2/google: unable to determine the AWS metadata server security credentials endpoint") + } + + req, err := http.NewRequest("GET", cs.CredVerificationURL, nil) + if err != nil { + return "", err + } + + resp, err := cs.doRequest(req) + if err != nil { + return "", err + } + defer resp.Body.Close() + + respBody, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) + if err != nil { + return "", err + } + + if resp.StatusCode != 200 { + return "", fmt.Errorf("oauth2/google: unable to retrieve AWS role name - %s", string(respBody)) + } + + return string(respBody), nil +} diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go index 3291d4643..57a587097 100644 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go +++ b/vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go @@ -9,6 +9,7 @@ import ( "fmt" "golang.org/x/oauth2" "net/http" + "strconv" "time" ) @@ -35,7 +36,18 @@ func (c *Config) TokenSource(ctx context.Context) oauth2.TokenSource { ctx: ctx, conf: c, } - return oauth2.ReuseTokenSource(nil, ts) + if c.ServiceAccountImpersonationURL == "" { + return oauth2.ReuseTokenSource(nil, ts) + } + scopes := c.Scopes + ts.conf.Scopes = []string{"https://www.googleapis.com/auth/cloud-platform"} + imp := impersonateTokenSource{ + ctx: ctx, + url: c.ServiceAccountImpersonationURL, + scopes: scopes, + ts: oauth2.ReuseTokenSource(nil, ts), + } + return oauth2.ReuseTokenSource(nil, imp) } // Subject token file types. @@ -66,11 +78,27 @@ type CredentialSource struct { } // parse determines the type of CredentialSource needed -func (c *Config) parse() baseCredentialSource { - if c.CredentialSource.File != "" { - return fileCredentialSource{File: c.CredentialSource.File, Format: c.CredentialSource.Format} +func (c *Config) parse(ctx context.Context) (baseCredentialSource, error) { + if len(c.CredentialSource.EnvironmentID) > 3 && c.CredentialSource.EnvironmentID[:3] == "aws" { + if awsVersion, err := strconv.Atoi(c.CredentialSource.EnvironmentID[3:]); err == nil { + if awsVersion != 1 { + return nil, fmt.Errorf("oauth2/google: aws version '%d' is not supported in the current build", awsVersion) + } + return awsCredentialSource{ + EnvironmentID: c.CredentialSource.EnvironmentID, + RegionURL: c.CredentialSource.RegionURL, + RegionalCredVerificationURL: c.CredentialSource.RegionalCredVerificationURL, + CredVerificationURL: c.CredentialSource.URL, + TargetResource: c.Audience, + ctx: ctx, + }, nil + } + } else if c.CredentialSource.File != "" { + return fileCredentialSource{File: c.CredentialSource.File, Format: c.CredentialSource.Format}, nil + } else if c.CredentialSource.URL != "" { + return urlCredentialSource{URL: c.CredentialSource.URL, Format: c.CredentialSource.Format, ctx: ctx}, nil } - return nil + return nil, fmt.Errorf("oauth2/google: unable to parse credential source") } type baseCredentialSource interface { @@ -87,11 +115,12 @@ type tokenSource struct { func (ts tokenSource) Token() (*oauth2.Token, error) { conf := ts.conf - credSource := conf.parse() - if credSource == nil { - return nil, fmt.Errorf("oauth2/google: unable to parse credential source") + credSource, err := conf.parse(ts.ctx) + if err != nil { + return nil, err } subjectToken, err := credSource.subjectToken() + if err != nil { return nil, err } @@ -128,6 +157,5 @@ func (ts tokenSource) Token() (*oauth2.Token, error) { if stsResp.RefreshToken != "" { accessToken.RefreshToken = stsResp.RefreshToken } - return accessToken, nil } diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/impersonate.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/impersonate.go new file mode 100644 index 000000000..1d29c467f --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/internal/externalaccount/impersonate.go @@ -0,0 +1,83 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package externalaccount + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "golang.org/x/oauth2" + "io" + "io/ioutil" + "net/http" + "time" +) + +// generateAccesstokenReq is used for service account impersonation +type generateAccessTokenReq struct { + Delegates []string `json:"delegates,omitempty"` + Lifetime string `json:"lifetime,omitempty"` + Scope []string `json:"scope,omitempty"` +} + +type impersonateTokenResponse struct { + AccessToken string `json:"accessToken"` + ExpireTime string `json:"expireTime"` +} + +type impersonateTokenSource struct { + ctx context.Context + ts oauth2.TokenSource + + url string + scopes []string +} + +// Token performs the exchange to get a temporary service account +func (its impersonateTokenSource) Token() (*oauth2.Token, error) { + reqBody := generateAccessTokenReq{ + Lifetime: "3600s", + Scope: its.scopes, + } + b, err := json.Marshal(reqBody) + if err != nil { + return nil, fmt.Errorf("oauth2/google: unable to marshal request: %v", err) + } + client := oauth2.NewClient(its.ctx, its.ts) + req, err := http.NewRequest("POST", its.url, bytes.NewReader(b)) + if err != nil { + return nil, fmt.Errorf("oauth2/google: unable to create impersonation request: %v", err) + } + req = req.WithContext(its.ctx) + req.Header.Set("Content-Type", "application/json") + + resp, err := client.Do(req) + if err != nil { + return nil, fmt.Errorf("oauth2/google: unable to generate access token: %v", err) + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) + if err != nil { + return nil, fmt.Errorf("oauth2/google: unable to read body: %v", err) + } + if c := resp.StatusCode; c < 200 || c > 299 { + return nil, fmt.Errorf("oauth2/google: status code %d: %s", c, body) + } + + var accessTokenResp impersonateTokenResponse + if err := json.Unmarshal(body, &accessTokenResp); err != nil { + return nil, fmt.Errorf("oauth2/google: unable to parse response: %v", err) + } + expiry, err := time.Parse(time.RFC3339, accessTokenResp.ExpireTime) + if err != nil { + return nil, fmt.Errorf("oauth2/google: unable to parse expiry: %v", err) + } + return &oauth2.Token{ + AccessToken: accessTokenResp.AccessToken, + Expiry: expiry, + TokenType: "Bearer", + }, nil +} diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/sts_exchange.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/sts_exchange.go index d7f54e09f..c7d85a3c2 100644 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/sts_exchange.go +++ b/vendor/golang.org/x/oauth2/google/internal/externalaccount/sts_exchange.go @@ -8,12 +8,13 @@ import ( "context" "encoding/json" "fmt" - "golang.org/x/oauth2" "io" "net/http" "net/url" "strconv" "strings" + + "golang.org/x/oauth2" ) // ExchangeToken performs an oauth2 token exchange with the provided endpoint. @@ -40,11 +41,12 @@ func ExchangeToken(ctx context.Context, endpoint string, request *STSTokenExchan authentication.InjectAuthentication(data, headers) encodedData := data.Encode() - req, err := http.NewRequestWithContext(ctx, "POST", endpoint, strings.NewReader(encodedData)) + req, err := http.NewRequest("POST", endpoint, strings.NewReader(encodedData)) if err != nil { return nil, fmt.Errorf("oauth2/google: failed to properly build http request: %v", err) } + req = req.WithContext(ctx) for key, list := range headers { for _, val := range list { req.Header.Add(key, val) diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/urlcredsource.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/urlcredsource.go new file mode 100644 index 000000000..b0d5d35e7 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/internal/externalaccount/urlcredsource.go @@ -0,0 +1,71 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package externalaccount + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "golang.org/x/oauth2" + "io" + "io/ioutil" + "net/http" +) + +type urlCredentialSource struct { + URL string + Headers map[string]string + Format format + ctx context.Context +} + +func (cs urlCredentialSource) subjectToken() (string, error) { + client := oauth2.NewClient(cs.ctx, nil) + req, err := http.NewRequest("GET", cs.URL, nil) + if err != nil { + return "", fmt.Errorf("oauth2/google: HTTP request for URL-sourced credential failed: %v", err) + } + req = req.WithContext(cs.ctx) + + for key, val := range cs.Headers { + req.Header.Add(key, val) + } + resp, err := client.Do(req) + if err != nil { + return "", fmt.Errorf("oauth2/google: invalid response when retrieving subject token: %v", err) + } + defer resp.Body.Close() + + tokenBytes, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) + if err != nil { + return "", fmt.Errorf("oauth2/google: invalid body in subject token URL query: %v", err) + } + + switch cs.Format.Type { + case "json": + jsonData := make(map[string]interface{}) + err = json.Unmarshal(tokenBytes, &jsonData) + if err != nil { + return "", fmt.Errorf("oauth2/google: failed to unmarshal subject token file: %v", err) + } + val, ok := jsonData[cs.Format.SubjectTokenFieldName] + if !ok { + return "", errors.New("oauth2/google: provided subject_token_field_name not found in credentials") + } + token, ok := val.(string) + if !ok { + return "", errors.New("oauth2/google: improperly formatted subject token") + } + return token, nil + case "text": + return string(tokenBytes), nil + case "": + return string(tokenBytes), nil + default: + return "", errors.New("oauth2/google: invalid credential_source file format type") + } + +} diff --git a/vendor/golang.org/x/sys/execabs/execabs.go b/vendor/golang.org/x/sys/execabs/execabs.go new file mode 100644 index 000000000..78192498d --- /dev/null +++ b/vendor/golang.org/x/sys/execabs/execabs.go @@ -0,0 +1,102 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package execabs is a drop-in replacement for os/exec +// that requires PATH lookups to find absolute paths. +// That is, execabs.Command("cmd") runs the same PATH lookup +// as exec.Command("cmd"), but if the result is a path +// which is relative, the Run and Start methods will report +// an error instead of running the executable. +// +// See https://blog.golang.org/path-security for more information +// about when it may be necessary or appropriate to use this package. +package execabs + +import ( + "context" + "fmt" + "os/exec" + "path/filepath" + "reflect" + "unsafe" +) + +// ErrNotFound is the error resulting if a path search failed to find an executable file. +// It is an alias for exec.ErrNotFound. +var ErrNotFound = exec.ErrNotFound + +// Cmd represents an external command being prepared or run. +// It is an alias for exec.Cmd. +type Cmd = exec.Cmd + +// Error is returned by LookPath when it fails to classify a file as an executable. +// It is an alias for exec.Error. +type Error = exec.Error + +// An ExitError reports an unsuccessful exit by a command. +// It is an alias for exec.ExitError. +type ExitError = exec.ExitError + +func relError(file, path string) error { + return fmt.Errorf("%s resolves to executable in current directory (.%c%s)", file, filepath.Separator, path) +} + +// LookPath searches for an executable named file in the directories +// named by the PATH environment variable. If file contains a slash, +// it is tried directly and the PATH is not consulted. The result will be +// an absolute path. +// +// LookPath differs from exec.LookPath in its handling of PATH lookups, +// which are used for file names without slashes. If exec.LookPath's +// PATH lookup would have returned an executable from the current directory, +// LookPath instead returns an error. +func LookPath(file string) (string, error) { + path, err := exec.LookPath(file) + if err != nil { + return "", err + } + if filepath.Base(file) == file && !filepath.IsAbs(path) { + return "", relError(file, path) + } + return path, nil +} + +func fixCmd(name string, cmd *exec.Cmd) { + if filepath.Base(name) == name && !filepath.IsAbs(cmd.Path) { + // exec.Command was called with a bare binary name and + // exec.LookPath returned a path which is not absolute. + // Set cmd.lookPathErr and clear cmd.Path so that it + // cannot be run. + lookPathErr := (*error)(unsafe.Pointer(reflect.ValueOf(cmd).Elem().FieldByName("lookPathErr").Addr().Pointer())) + if *lookPathErr == nil { + *lookPathErr = relError(name, cmd.Path) + } + cmd.Path = "" + } +} + +// CommandContext is like Command but includes a context. +// +// The provided context is used to kill the process (by calling os.Process.Kill) +// if the context becomes done before the command completes on its own. +func CommandContext(ctx context.Context, name string, arg ...string) *exec.Cmd { + cmd := exec.CommandContext(ctx, name, arg...) + fixCmd(name, cmd) + return cmd + +} + +// Command returns the Cmd struct to execute the named program with the given arguments. +// See exec.Command for most details. +// +// Command differs from exec.Command in its handling of PATH lookups, +// which are used when the program name contains no slashes. +// If exec.Command would have returned an exec.Cmd configured to run an +// executable from the current directory, Command instead +// returns an exec.Cmd that will return an error from Start or Run. +func Command(name string, arg ...string) *exec.Cmd { + cmd := exec.Command(name, arg...) + fixCmd(name, cmd) + return cmd +} diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index 9556fada8..b3463a8b5 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -609,6 +609,7 @@ const ( ETHTOOL_RXNTUPLE_ACTION_DROP = -0x1 ETHTOOL_RX_FLOW_SPEC_RING = 0xffffffff ETHTOOL_RX_FLOW_SPEC_RING_VF = 0xff00000000 + ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF = 0x20 ETHTOOL_SCHANNELS = 0x3d ETHTOOL_SCOALESCE = 0xf ETHTOOL_SEEE = 0x45 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index d78ce0775..9f3b1a4e5 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -3271,7 +3271,6 @@ const ( ETHTOOL_LINK_EXT_SUBSTATE_BSI_UNSUPPORTED_RATE = 0x2 ETHTOOL_LINK_EXT_SUBSTATE_CI_UNSUPPORTED_CABLE = 0x1 ETHTOOL_LINK_EXT_SUBSTATE_CI_CABLE_TEST_FAILURE = 0x2 - ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF = 0x20 ETHTOOL_FLASH_ALL_REGIONS = 0x0 ETHTOOL_F_UNSUPPORTED__BIT = 0x0 ETHTOOL_F_WISH__BIT = 0x1 diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index d249919c3..0197df872 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -22,6 +22,7 @@ type HWND uintptr const ( InvalidHandle = ^Handle(0) + InvalidHWND = ^HWND(0) // Flags for DefineDosDevice. DDD_EXACT_MATCH_ON_REMOVE = 0x00000004 @@ -215,7 +216,7 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys CreateProcess(appName *uint16, commandLine *uint16, procSecurity *SecurityAttributes, threadSecurity *SecurityAttributes, inheritHandles bool, creationFlags uint32, env *uint16, currentDir *uint16, startupInfo *StartupInfo, outProcInfo *ProcessInformation) (err error) = CreateProcessW //sys OpenProcess(desiredAccess uint32, inheritHandle bool, processId uint32) (handle Handle, err error) //sys ShellExecute(hwnd Handle, verb *uint16, file *uint16, args *uint16, cwd *uint16, showCmd int32) (err error) [failretval<=32] = shell32.ShellExecuteW -//sys GetWindowThreadProcessId(hwnd HWND, pid *uint32) (tid uint32) = user32.GetWindowThreadProcessId +//sys GetWindowThreadProcessId(hwnd HWND, pid *uint32) (tid uint32, err error) = user32.GetWindowThreadProcessId //sys GetShellWindow() (shellWindow HWND) = user32.GetShellWindow //sys MessageBox(hwnd HWND, text *uint16, caption *uint16, boxtype uint32) (ret int32, err error) [failretval==0] = user32.MessageBoxW //sys ExitWindowsEx(flags uint32, reason uint32) (err error) = user32.ExitWindowsEx @@ -264,19 +265,29 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys VirtualProtect(address uintptr, size uintptr, newprotect uint32, oldprotect *uint32) (err error) = kernel32.VirtualProtect //sys TransmitFile(s Handle, handle Handle, bytesToWrite uint32, bytsPerSend uint32, overlapped *Overlapped, transmitFileBuf *TransmitFileBuffers, flags uint32) (err error) = mswsock.TransmitFile //sys ReadDirectoryChanges(handle Handle, buf *byte, buflen uint32, watchSubTree bool, mask uint32, retlen *uint32, overlapped *Overlapped, completionRoutine uintptr) (err error) = kernel32.ReadDirectoryChangesW +//sys FindFirstChangeNotification(path string, watchSubtree bool, notifyFilter uint32) (handle Handle, err error) [failretval==InvalidHandle] = kernel32.FindFirstChangeNotificationW +//sys FindNextChangeNotification(handle Handle) (err error) +//sys FindCloseChangeNotification(handle Handle) (err error) //sys CertOpenSystemStore(hprov Handle, name *uint16) (store Handle, err error) = crypt32.CertOpenSystemStoreW -//sys CertOpenStore(storeProvider uintptr, msgAndCertEncodingType uint32, cryptProv uintptr, flags uint32, para uintptr) (handle Handle, err error) = crypt32.CertOpenStore +//sys CertOpenStore(storeProvider uintptr, msgAndCertEncodingType uint32, cryptProv uintptr, flags uint32, para uintptr) (handle Handle, err error) = crypt32.CertOpenStore //sys CertEnumCertificatesInStore(store Handle, prevContext *CertContext) (context *CertContext, err error) [failretval==nil] = crypt32.CertEnumCertificatesInStore -//sys CertAddCertificateContextToStore(store Handle, certContext *CertContext, addDisposition uint32, storeContext **CertContext) (err error) = crypt32.CertAddCertificateContextToStore +//sys CertAddCertificateContextToStore(store Handle, certContext *CertContext, addDisposition uint32, storeContext **CertContext) (err error) = crypt32.CertAddCertificateContextToStore //sys CertCloseStore(store Handle, flags uint32) (err error) = crypt32.CertCloseStore //sys CertDeleteCertificateFromStore(certContext *CertContext) (err error) = crypt32.CertDeleteCertificateFromStore //sys CertDuplicateCertificateContext(certContext *CertContext) (dupContext *CertContext) = crypt32.CertDuplicateCertificateContext -//sys PFXImportCertStore(pfx *CryptDataBlob, password *uint16, flags uint32) (store Handle, err error) = crypt32.PFXImportCertStore -//sys CertGetCertificateChain(engine Handle, leaf *CertContext, time *Filetime, additionalStore Handle, para *CertChainPara, flags uint32, reserved uintptr, chainCtx **CertChainContext) (err error) = crypt32.CertGetCertificateChain -//sys CertFreeCertificateChain(ctx *CertChainContext) = crypt32.CertFreeCertificateChain -//sys CertCreateCertificateContext(certEncodingType uint32, certEncoded *byte, encodedLen uint32) (context *CertContext, err error) [failretval==nil] = crypt32.CertCreateCertificateContext -//sys CertFreeCertificateContext(ctx *CertContext) (err error) = crypt32.CertFreeCertificateContext -//sys CertVerifyCertificateChainPolicy(policyOID uintptr, chain *CertChainContext, para *CertChainPolicyPara, status *CertChainPolicyStatus) (err error) = crypt32.CertVerifyCertificateChainPolicy +//sys PFXImportCertStore(pfx *CryptDataBlob, password *uint16, flags uint32) (store Handle, err error) = crypt32.PFXImportCertStore +//sys CertGetCertificateChain(engine Handle, leaf *CertContext, time *Filetime, additionalStore Handle, para *CertChainPara, flags uint32, reserved uintptr, chainCtx **CertChainContext) (err error) = crypt32.CertGetCertificateChain +//sys CertFreeCertificateChain(ctx *CertChainContext) = crypt32.CertFreeCertificateChain +//sys CertCreateCertificateContext(certEncodingType uint32, certEncoded *byte, encodedLen uint32) (context *CertContext, err error) [failretval==nil] = crypt32.CertCreateCertificateContext +//sys CertFreeCertificateContext(ctx *CertContext) (err error) = crypt32.CertFreeCertificateContext +//sys CertVerifyCertificateChainPolicy(policyOID uintptr, chain *CertChainContext, para *CertChainPolicyPara, status *CertChainPolicyStatus) (err error) = crypt32.CertVerifyCertificateChainPolicy +//sys CertGetNameString(certContext *CertContext, nameType uint32, flags uint32, typePara unsafe.Pointer, name *uint16, size uint32) (chars uint32) = crypt32.CertGetNameStringW +//sys CertFindExtension(objId *byte, countExtensions uint32, extensions *CertExtension) (ret *CertExtension) = crypt32.CertFindExtension +//sys CryptQueryObject(objectType uint32, object unsafe.Pointer, expectedContentTypeFlags uint32, expectedFormatTypeFlags uint32, flags uint32, msgAndCertEncodingType *uint32, contentType *uint32, formatType *uint32, certStore *Handle, msg *Handle, context *unsafe.Pointer) (err error) = crypt32.CryptQueryObject +//sys CryptDecodeObject(encodingType uint32, structType *byte, encodedBytes *byte, lenEncodedBytes uint32, flags uint32, decoded unsafe.Pointer, decodedLen *uint32) (err error) = crypt32.CryptDecodeObject +//sys CryptProtectData(dataIn *DataBlob, name *uint16, optionalEntropy *DataBlob, reserved uintptr, promptStruct *CryptProtectPromptStruct, flags uint32, dataOut *DataBlob) (err error) = crypt32.CryptProtectData +//sys CryptUnprotectData(dataIn *DataBlob, name **uint16, optionalEntropy *DataBlob, reserved uintptr, promptStruct *CryptProtectPromptStruct, flags uint32, dataOut *DataBlob) (err error) = crypt32.CryptUnprotectData +//sys WinVerifyTrustEx(hwnd HWND, actionId *GUID, data *WinTrustData) (ret error) = wintrust.WinVerifyTrustEx //sys RegOpenKeyEx(key Handle, subkey *uint16, options uint32, desiredAccess uint32, result *Handle) (regerrno error) = advapi32.RegOpenKeyExW //sys RegCloseKey(key Handle) (regerrno error) = advapi32.RegCloseKey //sys RegQueryInfoKey(key Handle, class *uint16, classLen *uint32, reserved *uint32, subkeysLen *uint32, maxSubkeyLen *uint32, maxClassLen *uint32, valuesLen *uint32, maxValueNameLen *uint32, maxValueLen *uint32, saLen *uint32, lastWriteTime *Filetime) (regerrno error) = advapi32.RegQueryInfoKeyW diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index b3bd0da42..fd4260762 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -227,7 +227,7 @@ const ( ) const ( - // filters for ReadDirectoryChangesW + // filters for ReadDirectoryChangesW and FindFirstChangeNotificationW FILE_NOTIFY_CHANGE_FILE_NAME = 0x001 FILE_NOTIFY_CHANGE_DIR_NAME = 0x002 FILE_NOTIFY_CHANGE_ATTRIBUTES = 0x004 @@ -427,6 +427,67 @@ const ( CERT_CLOSE_STORE_FORCE_FLAG = 0x00000001 CERT_CLOSE_STORE_CHECK_FLAG = 0x00000002 + /* CryptQueryObject object type */ + CERT_QUERY_OBJECT_FILE = 1 + CERT_QUERY_OBJECT_BLOB = 2 + + /* CryptQueryObject content type flags */ + CERT_QUERY_CONTENT_CERT = 1 + CERT_QUERY_CONTENT_CTL = 2 + CERT_QUERY_CONTENT_CRL = 3 + CERT_QUERY_CONTENT_SERIALIZED_STORE = 4 + CERT_QUERY_CONTENT_SERIALIZED_CERT = 5 + CERT_QUERY_CONTENT_SERIALIZED_CTL = 6 + CERT_QUERY_CONTENT_SERIALIZED_CRL = 7 + CERT_QUERY_CONTENT_PKCS7_SIGNED = 8 + CERT_QUERY_CONTENT_PKCS7_UNSIGNED = 9 + CERT_QUERY_CONTENT_PKCS7_SIGNED_EMBED = 10 + CERT_QUERY_CONTENT_PKCS10 = 11 + CERT_QUERY_CONTENT_PFX = 12 + CERT_QUERY_CONTENT_CERT_PAIR = 13 + CERT_QUERY_CONTENT_PFX_AND_LOAD = 14 + CERT_QUERY_CONTENT_FLAG_CERT = (1 << CERT_QUERY_CONTENT_CERT) + CERT_QUERY_CONTENT_FLAG_CTL = (1 << CERT_QUERY_CONTENT_CTL) + CERT_QUERY_CONTENT_FLAG_CRL = (1 << CERT_QUERY_CONTENT_CRL) + CERT_QUERY_CONTENT_FLAG_SERIALIZED_STORE = (1 << CERT_QUERY_CONTENT_SERIALIZED_STORE) + CERT_QUERY_CONTENT_FLAG_SERIALIZED_CERT = (1 << CERT_QUERY_CONTENT_SERIALIZED_CERT) + CERT_QUERY_CONTENT_FLAG_SERIALIZED_CTL = (1 << CERT_QUERY_CONTENT_SERIALIZED_CTL) + CERT_QUERY_CONTENT_FLAG_SERIALIZED_CRL = (1 << CERT_QUERY_CONTENT_SERIALIZED_CRL) + CERT_QUERY_CONTENT_FLAG_PKCS7_SIGNED = (1 << CERT_QUERY_CONTENT_PKCS7_SIGNED) + CERT_QUERY_CONTENT_FLAG_PKCS7_UNSIGNED = (1 << CERT_QUERY_CONTENT_PKCS7_UNSIGNED) + CERT_QUERY_CONTENT_FLAG_PKCS7_SIGNED_EMBED = (1 << CERT_QUERY_CONTENT_PKCS7_SIGNED_EMBED) + CERT_QUERY_CONTENT_FLAG_PKCS10 = (1 << CERT_QUERY_CONTENT_PKCS10) + CERT_QUERY_CONTENT_FLAG_PFX = (1 << CERT_QUERY_CONTENT_PFX) + CERT_QUERY_CONTENT_FLAG_CERT_PAIR = (1 << CERT_QUERY_CONTENT_CERT_PAIR) + CERT_QUERY_CONTENT_FLAG_PFX_AND_LOAD = (1 << CERT_QUERY_CONTENT_PFX_AND_LOAD) + CERT_QUERY_CONTENT_FLAG_ALL = (CERT_QUERY_CONTENT_FLAG_CERT | CERT_QUERY_CONTENT_FLAG_CTL | CERT_QUERY_CONTENT_FLAG_CRL | CERT_QUERY_CONTENT_FLAG_SERIALIZED_STORE | CERT_QUERY_CONTENT_FLAG_SERIALIZED_CERT | CERT_QUERY_CONTENT_FLAG_SERIALIZED_CTL | CERT_QUERY_CONTENT_FLAG_SERIALIZED_CRL | CERT_QUERY_CONTENT_FLAG_PKCS7_SIGNED | CERT_QUERY_CONTENT_FLAG_PKCS7_UNSIGNED | CERT_QUERY_CONTENT_FLAG_PKCS7_SIGNED_EMBED | CERT_QUERY_CONTENT_FLAG_PKCS10 | CERT_QUERY_CONTENT_FLAG_PFX | CERT_QUERY_CONTENT_FLAG_CERT_PAIR) + CERT_QUERY_CONTENT_FLAG_ALL_ISSUER_CERT = (CERT_QUERY_CONTENT_FLAG_CERT | CERT_QUERY_CONTENT_FLAG_SERIALIZED_STORE | CERT_QUERY_CONTENT_FLAG_SERIALIZED_CERT | CERT_QUERY_CONTENT_FLAG_PKCS7_SIGNED | CERT_QUERY_CONTENT_FLAG_PKCS7_UNSIGNED) + + /* CryptQueryObject format type flags */ + CERT_QUERY_FORMAT_BINARY = 1 + CERT_QUERY_FORMAT_BASE64_ENCODED = 2 + CERT_QUERY_FORMAT_ASN_ASCII_HEX_ENCODED = 3 + CERT_QUERY_FORMAT_FLAG_BINARY = (1 << CERT_QUERY_FORMAT_BINARY) + CERT_QUERY_FORMAT_FLAG_BASE64_ENCODED = (1 << CERT_QUERY_FORMAT_BASE64_ENCODED) + CERT_QUERY_FORMAT_FLAG_ASN_ASCII_HEX_ENCODED = (1 << CERT_QUERY_FORMAT_ASN_ASCII_HEX_ENCODED) + CERT_QUERY_FORMAT_FLAG_ALL = (CERT_QUERY_FORMAT_FLAG_BINARY | CERT_QUERY_FORMAT_FLAG_BASE64_ENCODED | CERT_QUERY_FORMAT_FLAG_ASN_ASCII_HEX_ENCODED) + + /* CertGetNameString name types */ + CERT_NAME_EMAIL_TYPE = 1 + CERT_NAME_RDN_TYPE = 2 + CERT_NAME_ATTR_TYPE = 3 + CERT_NAME_SIMPLE_DISPLAY_TYPE = 4 + CERT_NAME_FRIENDLY_DISPLAY_TYPE = 5 + CERT_NAME_DNS_TYPE = 6 + CERT_NAME_URL_TYPE = 7 + CERT_NAME_UPN_TYPE = 8 + + /* CertGetNameString flags */ + CERT_NAME_ISSUER_FLAG = 0x1 + CERT_NAME_DISABLE_IE4_UTF8_FLAG = 0x10000 + CERT_NAME_SEARCH_ALL_NAMES_FLAG = 0x2 + CERT_NAME_STR_ENABLE_PUNYCODE_FLAG = 0x00200000 + /* AuthType values for SSLExtraCertChainPolicyPara struct */ AUTHTYPE_CLIENT = 1 AUTHTYPE_SERVER = 2 @@ -437,6 +498,22 @@ const ( SECURITY_FLAG_IGNORE_WRONG_USAGE = 0x00000200 SECURITY_FLAG_IGNORE_CERT_CN_INVALID = 0x00001000 SECURITY_FLAG_IGNORE_CERT_DATE_INVALID = 0x00002000 + + /* Flags for Crypt[Un]ProtectData */ + CRYPTPROTECT_UI_FORBIDDEN = 0x1 + CRYPTPROTECT_LOCAL_MACHINE = 0x4 + CRYPTPROTECT_CRED_SYNC = 0x8 + CRYPTPROTECT_AUDIT = 0x10 + CRYPTPROTECT_NO_RECOVERY = 0x20 + CRYPTPROTECT_VERIFY_PROTECTION = 0x40 + CRYPTPROTECT_CRED_REGENERATE = 0x80 + + /* Flags for CryptProtectPromptStruct */ + CRYPTPROTECT_PROMPT_ON_UNPROTECT = 1 + CRYPTPROTECT_PROMPT_ON_PROTECT = 2 + CRYPTPROTECT_PROMPT_RESERVED = 4 + CRYPTPROTECT_PROMPT_STRONG = 8 + CRYPTPROTECT_PROMPT_REQUIRE_STRONG = 16 ) const ( @@ -459,10 +536,58 @@ const ( REALTIME_PRIORITY_CLASS = 0x00000100 ) +/* wintrust.h constants for WinVerifyTrustEx */ +const ( + WTD_UI_ALL = 1 + WTD_UI_NONE = 2 + WTD_UI_NOBAD = 3 + WTD_UI_NOGOOD = 4 + + WTD_REVOKE_NONE = 0 + WTD_REVOKE_WHOLECHAIN = 1 + + WTD_CHOICE_FILE = 1 + WTD_CHOICE_CATALOG = 2 + WTD_CHOICE_BLOB = 3 + WTD_CHOICE_SIGNER = 4 + WTD_CHOICE_CERT = 5 + + WTD_STATEACTION_IGNORE = 0x00000000 + WTD_STATEACTION_VERIFY = 0x00000010 + WTD_STATEACTION_CLOSE = 0x00000002 + WTD_STATEACTION_AUTO_CACHE = 0x00000003 + WTD_STATEACTION_AUTO_CACHE_FLUSH = 0x00000004 + + WTD_USE_IE4_TRUST_FLAG = 0x1 + WTD_NO_IE4_CHAIN_FLAG = 0x2 + WTD_NO_POLICY_USAGE_FLAG = 0x4 + WTD_REVOCATION_CHECK_NONE = 0x10 + WTD_REVOCATION_CHECK_END_CERT = 0x20 + WTD_REVOCATION_CHECK_CHAIN = 0x40 + WTD_REVOCATION_CHECK_CHAIN_EXCLUDE_ROOT = 0x80 + WTD_SAFER_FLAG = 0x100 + WTD_HASH_ONLY_FLAG = 0x200 + WTD_USE_DEFAULT_OSVER_CHECK = 0x400 + WTD_LIFETIME_SIGNING_FLAG = 0x800 + WTD_CACHE_ONLY_URL_RETRIEVAL = 0x1000 + WTD_DISABLE_MD2_MD4 = 0x2000 + WTD_MOTW = 0x4000 + + WTD_UICONTEXT_EXECUTE = 0 + WTD_UICONTEXT_INSTALL = 1 +) + var ( OID_PKIX_KP_SERVER_AUTH = []byte("1.3.6.1.5.5.7.3.1\x00") OID_SERVER_GATED_CRYPTO = []byte("1.3.6.1.4.1.311.10.3.3\x00") OID_SGC_NETSCAPE = []byte("2.16.840.1.113730.4.1\x00") + + WINTRUST_ACTION_GENERIC_VERIFY_V2 = GUID{ + Data1: 0xaac56b, + Data2: 0xcd44, + Data3: 0x11d0, + Data4: [8]byte{0x8c, 0xc2, 0x0, 0xc0, 0x4f, 0xc2, 0x95, 0xee}, + } ) // Pointer represents a pointer to an arbitrary Windows type. @@ -1051,7 +1176,57 @@ type MibIfRow struct { } type CertInfo struct { - // Not implemented + Version uint32 + SerialNumber CryptIntegerBlob + SignatureAlgorithm CryptAlgorithmIdentifier + Issuer CertNameBlob + NotBefore Filetime + NotAfter Filetime + Subject CertNameBlob + SubjectPublicKeyInfo CertPublicKeyInfo + IssuerUniqueId CryptBitBlob + SubjectUniqueId CryptBitBlob + CountExtensions uint32 + Extensions *CertExtension +} + +type CertExtension struct { + ObjId *byte + Critical int32 + Value CryptObjidBlob +} + +type CryptAlgorithmIdentifier struct { + ObjId *byte + Parameters CryptObjidBlob +} + +type CertPublicKeyInfo struct { + Algorithm CryptAlgorithmIdentifier + PublicKey CryptBitBlob +} + +type DataBlob struct { + Size uint32 + Data *byte +} +type CryptIntegerBlob DataBlob +type CryptUintBlob DataBlob +type CryptObjidBlob DataBlob +type CertNameBlob DataBlob +type CertRdnValueBlob DataBlob +type CertBlob DataBlob +type CrlBlob DataBlob +type CryptDataBlob DataBlob +type CryptHashBlob DataBlob +type CryptDigestBlob DataBlob +type CryptDerBlob DataBlob +type CryptAttrBlob DataBlob + +type CryptBitBlob struct { + Size uint32 + Data *byte + UnusedBits uint32 } type CertContext struct { @@ -1157,9 +1332,64 @@ type CertChainPolicyStatus struct { ExtraPolicyStatus Pointer } -type CryptDataBlob struct { - Size uint32 - Data *byte +type CertPolicyInfo struct { + Identifier *byte + CountQualifiers uint32 + Qualifiers *CertPolicyQualifierInfo +} + +type CertPoliciesInfo struct { + Count uint32 + PolicyInfos *CertPolicyInfo +} + +type CertPolicyQualifierInfo struct { + // Not implemented +} + +type CertStrongSignPara struct { + Size uint32 + InfoChoice uint32 + InfoOrSerializedInfoOrOID unsafe.Pointer +} + +type CryptProtectPromptStruct struct { + Size uint32 + PromptFlags uint32 + App HWND + Prompt *uint16 +} + +type WinTrustData struct { + Size uint32 + PolicyCallbackData uintptr + SIPClientData uintptr + UIChoice uint32 + RevocationChecks uint32 + UnionChoice uint32 + FileOrCatalogOrBlobOrSgnrOrCert unsafe.Pointer + StateAction uint32 + StateData Handle + URLReference *uint16 + ProvFlags uint32 + UIContext uint32 + SignatureSettings *WinTrustSignatureSettings +} + +type WinTrustFileInfo struct { + Size uint32 + FilePath *uint16 + File Handle + KnownSubject *GUID +} + +type WinTrustSignatureSettings struct { + Size uint32 + Index uint32 + Flags uint32 + SecondarySigs uint32 + VerifiedSigIndex uint32 + CryptoPolicy *CertStrongSignPara } const ( diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index cd5e8528c..c38c59d77 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -51,6 +51,7 @@ var ( modshell32 = NewLazySystemDLL("shell32.dll") moduser32 = NewLazySystemDLL("user32.dll") moduserenv = NewLazySystemDLL("userenv.dll") + modwintrust = NewLazySystemDLL("wintrust.dll") modws2_32 = NewLazySystemDLL("ws2_32.dll") modwtsapi32 = NewLazySystemDLL("wtsapi32.dll") @@ -145,12 +146,18 @@ var ( procCertDeleteCertificateFromStore = modcrypt32.NewProc("CertDeleteCertificateFromStore") procCertDuplicateCertificateContext = modcrypt32.NewProc("CertDuplicateCertificateContext") procCertEnumCertificatesInStore = modcrypt32.NewProc("CertEnumCertificatesInStore") + procCertFindExtension = modcrypt32.NewProc("CertFindExtension") procCertFreeCertificateChain = modcrypt32.NewProc("CertFreeCertificateChain") procCertFreeCertificateContext = modcrypt32.NewProc("CertFreeCertificateContext") procCertGetCertificateChain = modcrypt32.NewProc("CertGetCertificateChain") + procCertGetNameStringW = modcrypt32.NewProc("CertGetNameStringW") procCertOpenStore = modcrypt32.NewProc("CertOpenStore") procCertOpenSystemStoreW = modcrypt32.NewProc("CertOpenSystemStoreW") procCertVerifyCertificateChainPolicy = modcrypt32.NewProc("CertVerifyCertificateChainPolicy") + procCryptDecodeObject = modcrypt32.NewProc("CryptDecodeObject") + procCryptProtectData = modcrypt32.NewProc("CryptProtectData") + procCryptQueryObject = modcrypt32.NewProc("CryptQueryObject") + procCryptUnprotectData = modcrypt32.NewProc("CryptUnprotectData") procPFXImportCertStore = modcrypt32.NewProc("PFXImportCertStore") procDnsNameCompare_W = moddnsapi.NewProc("DnsNameCompare_W") procDnsQuery_W = moddnsapi.NewProc("DnsQuery_W") @@ -183,9 +190,12 @@ var ( procDuplicateHandle = modkernel32.NewProc("DuplicateHandle") procExitProcess = modkernel32.NewProc("ExitProcess") procFindClose = modkernel32.NewProc("FindClose") + procFindCloseChangeNotification = modkernel32.NewProc("FindCloseChangeNotification") + procFindFirstChangeNotificationW = modkernel32.NewProc("FindFirstChangeNotificationW") procFindFirstFileW = modkernel32.NewProc("FindFirstFileW") procFindFirstVolumeMountPointW = modkernel32.NewProc("FindFirstVolumeMountPointW") procFindFirstVolumeW = modkernel32.NewProc("FindFirstVolumeW") + procFindNextChangeNotification = modkernel32.NewProc("FindNextChangeNotification") procFindNextFileW = modkernel32.NewProc("FindNextFileW") procFindNextVolumeMountPointW = modkernel32.NewProc("FindNextVolumeMountPointW") procFindNextVolumeW = modkernel32.NewProc("FindNextVolumeW") @@ -347,6 +357,7 @@ var ( procCreateEnvironmentBlock = moduserenv.NewProc("CreateEnvironmentBlock") procDestroyEnvironmentBlock = moduserenv.NewProc("DestroyEnvironmentBlock") procGetUserProfileDirectoryW = moduserenv.NewProc("GetUserProfileDirectoryW") + procWinVerifyTrustEx = modwintrust.NewProc("WinVerifyTrustEx") procFreeAddrInfoW = modws2_32.NewProc("FreeAddrInfoW") procGetAddrInfoW = modws2_32.NewProc("GetAddrInfoW") procWSACleanup = modws2_32.NewProc("WSACleanup") @@ -1199,6 +1210,12 @@ func CertEnumCertificatesInStore(store Handle, prevContext *CertContext) (contex return } +func CertFindExtension(objId *byte, countExtensions uint32, extensions *CertExtension) (ret *CertExtension) { + r0, _, _ := syscall.Syscall(procCertFindExtension.Addr(), 3, uintptr(unsafe.Pointer(objId)), uintptr(countExtensions), uintptr(unsafe.Pointer(extensions))) + ret = (*CertExtension)(unsafe.Pointer(r0)) + return +} + func CertFreeCertificateChain(ctx *CertChainContext) { syscall.Syscall(procCertFreeCertificateChain.Addr(), 1, uintptr(unsafe.Pointer(ctx)), 0, 0) return @@ -1220,6 +1237,12 @@ func CertGetCertificateChain(engine Handle, leaf *CertContext, time *Filetime, a return } +func CertGetNameString(certContext *CertContext, nameType uint32, flags uint32, typePara unsafe.Pointer, name *uint16, size uint32) (chars uint32) { + r0, _, _ := syscall.Syscall6(procCertGetNameStringW.Addr(), 6, uintptr(unsafe.Pointer(certContext)), uintptr(nameType), uintptr(flags), uintptr(typePara), uintptr(unsafe.Pointer(name)), uintptr(size)) + chars = uint32(r0) + return +} + func CertOpenStore(storeProvider uintptr, msgAndCertEncodingType uint32, cryptProv uintptr, flags uint32, para uintptr) (handle Handle, err error) { r0, _, e1 := syscall.Syscall6(procCertOpenStore.Addr(), 5, uintptr(storeProvider), uintptr(msgAndCertEncodingType), uintptr(cryptProv), uintptr(flags), uintptr(para), 0) handle = Handle(r0) @@ -1246,6 +1269,38 @@ func CertVerifyCertificateChainPolicy(policyOID uintptr, chain *CertChainContext return } +func CryptDecodeObject(encodingType uint32, structType *byte, encodedBytes *byte, lenEncodedBytes uint32, flags uint32, decoded unsafe.Pointer, decodedLen *uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procCryptDecodeObject.Addr(), 7, uintptr(encodingType), uintptr(unsafe.Pointer(structType)), uintptr(unsafe.Pointer(encodedBytes)), uintptr(lenEncodedBytes), uintptr(flags), uintptr(decoded), uintptr(unsafe.Pointer(decodedLen)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func CryptProtectData(dataIn *DataBlob, name *uint16, optionalEntropy *DataBlob, reserved uintptr, promptStruct *CryptProtectPromptStruct, flags uint32, dataOut *DataBlob) (err error) { + r1, _, e1 := syscall.Syscall9(procCryptProtectData.Addr(), 7, uintptr(unsafe.Pointer(dataIn)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(optionalEntropy)), uintptr(reserved), uintptr(unsafe.Pointer(promptStruct)), uintptr(flags), uintptr(unsafe.Pointer(dataOut)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func CryptQueryObject(objectType uint32, object unsafe.Pointer, expectedContentTypeFlags uint32, expectedFormatTypeFlags uint32, flags uint32, msgAndCertEncodingType *uint32, contentType *uint32, formatType *uint32, certStore *Handle, msg *Handle, context *unsafe.Pointer) (err error) { + r1, _, e1 := syscall.Syscall12(procCryptQueryObject.Addr(), 11, uintptr(objectType), uintptr(object), uintptr(expectedContentTypeFlags), uintptr(expectedFormatTypeFlags), uintptr(flags), uintptr(unsafe.Pointer(msgAndCertEncodingType)), uintptr(unsafe.Pointer(contentType)), uintptr(unsafe.Pointer(formatType)), uintptr(unsafe.Pointer(certStore)), uintptr(unsafe.Pointer(msg)), uintptr(unsafe.Pointer(context)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func CryptUnprotectData(dataIn *DataBlob, name **uint16, optionalEntropy *DataBlob, reserved uintptr, promptStruct *CryptProtectPromptStruct, flags uint32, dataOut *DataBlob) (err error) { + r1, _, e1 := syscall.Syscall9(procCryptUnprotectData.Addr(), 7, uintptr(unsafe.Pointer(dataIn)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(optionalEntropy)), uintptr(reserved), uintptr(unsafe.Pointer(promptStruct)), uintptr(flags), uintptr(unsafe.Pointer(dataOut)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func PFXImportCertStore(pfx *CryptDataBlob, password *uint16, flags uint32) (store Handle, err error) { r0, _, e1 := syscall.Syscall(procPFXImportCertStore.Addr(), 3, uintptr(unsafe.Pointer(pfx)), uintptr(unsafe.Pointer(password)), uintptr(flags)) store = Handle(r0) @@ -1525,6 +1580,36 @@ func FindClose(handle Handle) (err error) { return } +func FindCloseChangeNotification(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procFindCloseChangeNotification.Addr(), 1, uintptr(handle), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func FindFirstChangeNotification(path string, watchSubtree bool, notifyFilter uint32) (handle Handle, err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(path) + if err != nil { + return + } + return _FindFirstChangeNotification(_p0, watchSubtree, notifyFilter) +} + +func _FindFirstChangeNotification(path *uint16, watchSubtree bool, notifyFilter uint32) (handle Handle, err error) { + var _p1 uint32 + if watchSubtree { + _p1 = 1 + } + r0, _, e1 := syscall.Syscall(procFindFirstChangeNotificationW.Addr(), 3, uintptr(unsafe.Pointer(path)), uintptr(_p1), uintptr(notifyFilter)) + handle = Handle(r0) + if handle == InvalidHandle { + err = errnoErr(e1) + } + return +} + func findFirstFile1(name *uint16, data *win32finddata1) (handle Handle, err error) { r0, _, e1 := syscall.Syscall(procFindFirstFileW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(data)), 0) handle = Handle(r0) @@ -1552,6 +1637,14 @@ func FindFirstVolume(volumeName *uint16, bufferLength uint32) (handle Handle, er return } +func FindNextChangeNotification(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procFindNextChangeNotification.Addr(), 1, uintptr(handle), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func findNextFile1(handle Handle, data *win32finddata1) (err error) { r1, _, e1 := syscall.Syscall(procFindNextFileW.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(data)), 0) if r1 == 0 { @@ -2904,9 +2997,12 @@ func GetShellWindow() (shellWindow HWND) { return } -func GetWindowThreadProcessId(hwnd HWND, pid *uint32) (tid uint32) { - r0, _, _ := syscall.Syscall(procGetWindowThreadProcessId.Addr(), 2, uintptr(hwnd), uintptr(unsafe.Pointer(pid)), 0) +func GetWindowThreadProcessId(hwnd HWND, pid *uint32) (tid uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetWindowThreadProcessId.Addr(), 2, uintptr(hwnd), uintptr(unsafe.Pointer(pid)), 0) tid = uint32(r0) + if tid == 0 { + err = errnoErr(e1) + } return } @@ -2947,6 +3043,14 @@ func GetUserProfileDirectory(t Token, dir *uint16, dirLen *uint32) (err error) { return } +func WinVerifyTrustEx(hwnd HWND, actionId *GUID, data *WinTrustData) (ret error) { + r0, _, _ := syscall.Syscall(procWinVerifyTrustEx.Addr(), 3, uintptr(hwnd), uintptr(unsafe.Pointer(actionId)), uintptr(unsafe.Pointer(data))) + if r0 != 0 { + ret = syscall.Errno(r0) + } + return +} + func FreeAddrInfoW(addrinfo *AddrinfoW) { syscall.Syscall(procFreeAddrInfoW.Addr(), 1, uintptr(unsafe.Pointer(addrinfo)), 0, 0) return diff --git a/vendor/golang.org/x/tools/cmd/goimports/goimports.go b/vendor/golang.org/x/tools/cmd/goimports/goimports.go index 27708972d..b354c9e82 100644 --- a/vendor/golang.org/x/tools/cmd/goimports/goimports.go +++ b/vendor/golang.org/x/tools/cmd/goimports/goimports.go @@ -11,11 +11,11 @@ import ( "flag" "fmt" "go/scanner" + exec "golang.org/x/sys/execabs" "io" "io/ioutil" "log" "os" - "os/exec" "path/filepath" "runtime" "runtime/pprof" diff --git a/vendor/golang.org/x/tools/internal/gocommand/invoke.go b/vendor/golang.org/x/tools/internal/gocommand/invoke.go index f65aad4ec..8659a0c5d 100644 --- a/vendor/golang.org/x/tools/internal/gocommand/invoke.go +++ b/vendor/golang.org/x/tools/internal/gocommand/invoke.go @@ -9,9 +9,9 @@ import ( "bytes" "context" "fmt" + exec "golang.org/x/sys/execabs" "io" "os" - "os/exec" "regexp" "strconv" "strings" diff --git a/vendor/google.golang.org/api/internal/creds.go b/vendor/google.golang.org/api/internal/creds.go index c93daa98c..1f635e430 100644 --- a/vendor/google.golang.org/api/internal/creds.go +++ b/vendor/google.golang.org/api/internal/creds.go @@ -64,28 +64,40 @@ const ( // credentialsFromJSON returns a google.Credentials based on the input. // -// - If the JSON is a service account and no scopes provided, returns self-signed JWT auth flow -// - Otherwise, returns OAuth 2.0 flow. +// - A self-signed JWT auth flow will be executed if: the data file is a service +// account, no user are scopes provided, an audience is provided, a user +// specified endpoint is not provided, and credentials will not be +// impersonated. +// +// - Otherwise, executes a stanard OAuth 2.0 flow. func credentialsFromJSON(ctx context.Context, data []byte, ds *DialSettings) (*google.Credentials, error) { cred, err := google.CredentialsFromJSON(ctx, data, ds.GetScopes()...) if err != nil { return nil, err } - if len(data) > 0 && len(ds.Scopes) == 0 && (ds.DefaultAudience != "" || len(ds.Audiences) > 0) { - var f struct { - Type string `json:"type"` - // The rest JSON fields are omitted because they are not used. - } - if err := json.Unmarshal(cred.JSON, &f); err != nil { + // Standard OAuth 2.0 Flow + if len(data) == 0 || + len(ds.Scopes) > 0 || + (ds.DefaultAudience == "" && len(ds.Audiences) == 0) || + ds.ImpersonationConfig != nil || + ds.Endpoint != "" { + return cred, nil + } + + // Check if JSON is a service account and if so create a self-signed JWT. + var f struct { + Type string `json:"type"` + // The rest JSON fields are omitted because they are not used. + } + if err := json.Unmarshal(cred.JSON, &f); err != nil { + return nil, err + } + if f.Type == serviceAccountKey { + ts, err := selfSignedJWTTokenSource(data, ds.DefaultAudience, ds.Audiences) + if err != nil { return nil, err } - if f.Type == serviceAccountKey { - ts, err := selfSignedJWTTokenSource(data, ds.DefaultAudience, ds.Audiences) - if err != nil { - return nil, err - } - cred.TokenSource = ts - } + cred.TokenSource = ts } return cred, err } diff --git a/vendor/google.golang.org/api/internal/gensupport/send.go b/vendor/google.golang.org/api/internal/gensupport/send.go index 3338c8d19..276d6f696 100644 --- a/vendor/google.golang.org/api/internal/gensupport/send.go +++ b/vendor/google.golang.org/api/internal/gensupport/send.go @@ -12,23 +12,6 @@ import ( "time" ) -// Hook is the type of a function that is called once before each HTTP request -// that is sent by a generated API. It returns a function that is called after -// the request returns. -// Hooks are not called if the context is nil. -type Hook func(ctx context.Context, req *http.Request) func(resp *http.Response) - -var hooks []Hook - -// RegisterHook registers a Hook to be called before each HTTP request by a -// generated API. Hooks are called in the order they are registered. Each -// hook can return a function; if it is non-nil, it is called after the HTTP -// request returns. These functions are called in the reverse order. -// RegisterHook should not be called concurrently with itself or SendRequest. -func RegisterHook(h Hook) { - hooks = append(hooks, h) -} - // SendRequest sends a single HTTP request using the given client. // If ctx is non-nil, it calls all hooks, then sends the request with // req.WithContext, then calls any functions returned by the hooks in @@ -42,23 +25,7 @@ func SendRequest(ctx context.Context, client *http.Client, req *http.Request) (* if ctx == nil { return client.Do(req) } - // Call hooks in order of registration, store returned funcs. - post := make([]func(resp *http.Response), len(hooks)) - for i, h := range hooks { - fn := h(ctx, req) - post[i] = fn - } - - // Send request. - resp, err := send(ctx, client, req) - - // Call returned funcs in reverse order. - for i := len(post) - 1; i >= 0; i-- { - if fn := post[i]; fn != nil { - fn(resp) - } - } - return resp, err + return send(ctx, client, req) } func send(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { @@ -92,23 +59,7 @@ func SendRequestWithRetry(ctx context.Context, client *http.Client, req *http.Re if ctx == nil { return client.Do(req) } - // Call hooks in order of registration, store returned funcs. - post := make([]func(resp *http.Response), len(hooks)) - for i, h := range hooks { - fn := h(ctx, req) - post[i] = fn - } - - // Send request with retry. - resp, err := sendAndRetry(ctx, client, req) - - // Call returned funcs in reverse order. - for i := len(post) - 1; i >= 0; i-- { - if fn := post[i]; fn != nil { - fn(resp) - } - } - return resp, err + return sendAndRetry(ctx, client, req) } func sendAndRetry(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { diff --git a/vendor/google.golang.org/api/storage/v1/storage-gen.go b/vendor/google.golang.org/api/storage/v1/storage-gen.go index 10383bb2f..3f44ddb56 100644 --- a/vendor/google.golang.org/api/storage/v1/storage-gen.go +++ b/vendor/google.golang.org/api/storage/v1/storage-gen.go @@ -1,4 +1,4 @@ -// Copyright 2020 Google LLC. +// Copyright 2021 Google LLC. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -2446,7 +2446,7 @@ func (c *BucketAccessControlsDeleteCall) Header() http.Header { func (c *BucketAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210128") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2594,7 +2594,7 @@ func (c *BucketAccessControlsGetCall) Header() http.Header { func (c *BucketAccessControlsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210128") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2761,7 +2761,7 @@ func (c *BucketAccessControlsInsertCall) Header() http.Header { func (c *BucketAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210128") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2934,7 +2934,7 @@ func (c *BucketAccessControlsListCall) Header() http.Header { func (c *BucketAccessControlsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210128") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3095,7 +3095,7 @@ func (c *BucketAccessControlsPatchCall) Header() http.Header { func (c *BucketAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210128") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3269,7 +3269,7 @@ func (c *BucketAccessControlsUpdateCall) Header() http.Header { func (c *BucketAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210128") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3455,7 +3455,7 @@ func (c *BucketsDeleteCall) Header() http.Header { func (c *BucketsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210128") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3634,7 +3634,7 @@ func (c *BucketsGetCall) Header() http.Header { func (c *BucketsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210128") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3840,7 +3840,7 @@ func (c *BucketsGetIamPolicyCall) Header() http.Header { func (c *BucketsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210128") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4057,7 +4057,7 @@ func (c *BucketsInsertCall) Header() http.Header { func (c *BucketsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210128") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4314,7 +4314,7 @@ func (c *BucketsListCall) Header() http.Header { func (c *BucketsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210128") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4524,7 +4524,7 @@ func (c *BucketsLockRetentionPolicyCall) Header() http.Header { func (c *BucketsLockRetentionPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210128") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4759,7 +4759,7 @@ func (c *BucketsPatchCall) Header() http.Header { func (c *BucketsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210128") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4988,7 +4988,7 @@ func (c *BucketsSetIamPolicyCall) Header() http.Header { func (c *BucketsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210128") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5163,7 +5163,7 @@ func (c *BucketsTestIamPermissionsCall) Header() http.Header { func (c *BucketsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210128") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5403,7 +5403,7 @@ func (c *BucketsUpdateCall) Header() http.Header { func (c *BucketsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210128") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5615,7 +5615,7 @@ func (c *ChannelsStopCall) Header() http.Header { func (c *ChannelsStopCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210128") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5732,7 +5732,7 @@ func (c *DefaultObjectAccessControlsDeleteCall) Header() http.Header { func (c *DefaultObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210128") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5880,7 +5880,7 @@ func (c *DefaultObjectAccessControlsGetCall) Header() http.Header { func (c *DefaultObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210128") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6048,7 +6048,7 @@ func (c *DefaultObjectAccessControlsInsertCall) Header() http.Header { func (c *DefaultObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210128") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6238,7 +6238,7 @@ func (c *DefaultObjectAccessControlsListCall) Header() http.Header { func (c *DefaultObjectAccessControlsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210128") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6411,7 +6411,7 @@ func (c *DefaultObjectAccessControlsPatchCall) Header() http.Header { func (c *DefaultObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210128") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6585,7 +6585,7 @@ func (c *DefaultObjectAccessControlsUpdateCall) Header() http.Header { func (c *DefaultObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210128") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6757,7 +6757,7 @@ func (c *NotificationsDeleteCall) Header() http.Header { func (c *NotificationsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210128") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6905,7 +6905,7 @@ func (c *NotificationsGetCall) Header() http.Header { func (c *NotificationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210128") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7075,7 +7075,7 @@ func (c *NotificationsInsertCall) Header() http.Header { func (c *NotificationsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210128") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7250,7 +7250,7 @@ func (c *NotificationsListCall) Header() http.Header { func (c *NotificationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210128") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7423,7 +7423,7 @@ func (c *ObjectAccessControlsDeleteCall) Header() http.Header { func (c *ObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210128") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7595,7 +7595,7 @@ func (c *ObjectAccessControlsGetCall) Header() http.Header { func (c *ObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210128") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7786,7 +7786,7 @@ func (c *ObjectAccessControlsInsertCall) Header() http.Header { func (c *ObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210128") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7983,7 +7983,7 @@ func (c *ObjectAccessControlsListCall) Header() http.Header { func (c *ObjectAccessControlsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210128") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8168,7 +8168,7 @@ func (c *ObjectAccessControlsPatchCall) Header() http.Header { func (c *ObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210128") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8366,7 +8366,7 @@ func (c *ObjectAccessControlsUpdateCall) Header() http.Header { func (c *ObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210128") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8603,7 +8603,7 @@ func (c *ObjectsComposeCall) Header() http.Header { func (c *ObjectsComposeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210128") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8946,7 +8946,7 @@ func (c *ObjectsCopyCall) Header() http.Header { func (c *ObjectsCopyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210128") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9274,7 +9274,7 @@ func (c *ObjectsDeleteCall) Header() http.Header { func (c *ObjectsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210128") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9507,7 +9507,7 @@ func (c *ObjectsGetCall) Header() http.Header { func (c *ObjectsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210128") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9757,7 +9757,7 @@ func (c *ObjectsGetIamPolicyCall) Header() http.Header { func (c *ObjectsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210128") for k, v := range c.header_ { reqHeaders[k] = v } @@ -10074,7 +10074,7 @@ func (c *ObjectsInsertCall) Header() http.Header { func (c *ObjectsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210128") for k, v := range c.header_ { reqHeaders[k] = v } @@ -10447,7 +10447,7 @@ func (c *ObjectsListCall) Header() http.Header { func (c *ObjectsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210128") for k, v := range c.header_ { reqHeaders[k] = v } @@ -10764,7 +10764,7 @@ func (c *ObjectsPatchCall) Header() http.Header { func (c *ObjectsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210128") for k, v := range c.header_ { reqHeaders[k] = v } @@ -11156,7 +11156,7 @@ func (c *ObjectsRewriteCall) Header() http.Header { func (c *ObjectsRewriteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210128") for k, v := range c.header_ { reqHeaders[k] = v } @@ -11459,7 +11459,7 @@ func (c *ObjectsSetIamPolicyCall) Header() http.Header { func (c *ObjectsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210128") for k, v := range c.header_ { reqHeaders[k] = v } @@ -11659,7 +11659,7 @@ func (c *ObjectsTestIamPermissionsCall) Header() http.Header { func (c *ObjectsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210128") for k, v := range c.header_ { reqHeaders[k] = v } @@ -11920,7 +11920,7 @@ func (c *ObjectsUpdateCall) Header() http.Header { func (c *ObjectsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210128") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12238,7 +12238,7 @@ func (c *ObjectsWatchAllCall) Header() http.Header { func (c *ObjectsWatchAllCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210128") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12454,7 +12454,7 @@ func (c *ProjectsHmacKeysCreateCall) Header() http.Header { func (c *ProjectsHmacKeysCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210128") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12604,7 +12604,7 @@ func (c *ProjectsHmacKeysDeleteCall) Header() http.Header { func (c *ProjectsHmacKeysDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210128") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12739,7 +12739,7 @@ func (c *ProjectsHmacKeysGetCall) Header() http.Header { func (c *ProjectsHmacKeysGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210128") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12939,7 +12939,7 @@ func (c *ProjectsHmacKeysListCall) Header() http.Header { func (c *ProjectsHmacKeysListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210128") for k, v := range c.header_ { reqHeaders[k] = v } @@ -13134,7 +13134,7 @@ func (c *ProjectsHmacKeysUpdateCall) Header() http.Header { func (c *ProjectsHmacKeysUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210128") for k, v := range c.header_ { reqHeaders[k] = v } @@ -13311,7 +13311,7 @@ func (c *ProjectsServiceAccountGetCall) Header() http.Header { func (c *ProjectsServiceAccountGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210128") for k, v := range c.header_ { reqHeaders[k] = v } diff --git a/vendor/google.golang.org/api/transport/http/dial.go b/vendor/google.golang.org/api/transport/http/dial.go index 8578cac9e..fcd0209fb 100644 --- a/vendor/google.golang.org/api/transport/http/dial.go +++ b/vendor/google.golang.org/api/transport/http/dial.go @@ -88,7 +88,7 @@ func newTransport(ctx context.Context, base http.RoundTripper, settings *interna } ts := creds.TokenSource - if settings.TokenSource != nil { + if settings.ImpersonationConfig == nil && settings.TokenSource != nil { ts = settings.TokenSource } trans = &oauth2.Transport{ diff --git a/vendor/google.golang.org/grpc/Makefile b/vendor/google.golang.org/grpc/Makefile index 3f661a787..1f0722f16 100644 --- a/vendor/google.golang.org/grpc/Makefile +++ b/vendor/google.golang.org/grpc/Makefile @@ -1,13 +1,13 @@ all: vet test testrace -build: deps +build: go build google.golang.org/grpc/... clean: go clean -i google.golang.org/grpc/... deps: - go get -d -v google.golang.org/grpc/... + GO111MODULE=on go get -d -v google.golang.org/grpc/... proto: @ if ! which protoc > /dev/null; then \ @@ -16,30 +16,18 @@ proto: fi go generate google.golang.org/grpc/... -test: testdeps +test: go test -cpu 1,4 -timeout 7m google.golang.org/grpc/... -testsubmodule: testdeps +testsubmodule: cd security/advancedtls && go test -cpu 1,4 -timeout 7m google.golang.org/grpc/security/advancedtls/... cd security/authorization && go test -cpu 1,4 -timeout 7m google.golang.org/grpc/security/authorization/... -testappengine: testappenginedeps - goapp test -cpu 1,4 -timeout 7m google.golang.org/grpc/... - -testappenginedeps: - goapp get -d -v -t -tags 'appengine appenginevm' google.golang.org/grpc/... - -testdeps: - go get -d -v -t google.golang.org/grpc/... - -testrace: testdeps +testrace: go test -race -cpu 1,4 -timeout 7m google.golang.org/grpc/... -updatedeps: - go get -d -v -u -f google.golang.org/grpc/... - -updatetestdeps: - go get -d -v -t -u -f google.golang.org/grpc/... +testdeps: + GO111MODULE=on go get -d -v -t google.golang.org/grpc/... vet: vetdeps ./vet.sh @@ -51,14 +39,10 @@ vetdeps: all \ build \ clean \ - deps \ proto \ test \ testappengine \ testappenginedeps \ - testdeps \ testrace \ - updatedeps \ - updatetestdeps \ vet \ vetdeps diff --git a/vendor/google.golang.org/grpc/SECURITY.md b/vendor/google.golang.org/grpc/SECURITY.md new file mode 100644 index 000000000..be6e10870 --- /dev/null +++ b/vendor/google.golang.org/grpc/SECURITY.md @@ -0,0 +1,3 @@ +# Security Policy + +For information on gRPC Security Policy and reporting potentional security issues, please see [gRPC CVE Process](https://github.com/grpc/proposal/blob/master/P4-grpc-cve-process.md). diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go index 32d782f1c..e0d34288c 100644 --- a/vendor/google.golang.org/grpc/balancer/base/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go @@ -64,7 +64,7 @@ type baseBalancer struct { csEvltr *balancer.ConnectivityStateEvaluator state connectivity.State - subConns map[resolver.Address]balancer.SubConn + subConns map[resolver.Address]balancer.SubConn // `attributes` is stripped from the keys of this map (the addresses) scStates map[balancer.SubConn]connectivity.State picker balancer.Picker config Config @@ -101,17 +101,41 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { // addrsSet is the set converted from addrs, it's used for quick lookup of an address. addrsSet := make(map[resolver.Address]struct{}) for _, a := range s.ResolverState.Addresses { - addrsSet[a] = struct{}{} - if _, ok := b.subConns[a]; !ok { + // Strip attributes from addresses before using them as map keys. So + // that when two addresses only differ in attributes pointers (but with + // the same attribute content), they are considered the same address. + // + // Note that this doesn't handle the case where the attribute content is + // different. So if users want to set different attributes to create + // duplicate connections to the same backend, it doesn't work. This is + // fine for now, because duplicate is done by setting Metadata today. + // + // TODO: read attributes to handle duplicate connections. + aNoAttrs := a + aNoAttrs.Attributes = nil + addrsSet[aNoAttrs] = struct{}{} + if sc, ok := b.subConns[aNoAttrs]; !ok { // a is a new address (not existing in b.subConns). + // + // When creating SubConn, the original address with attributes is + // passed through. So that connection configurations in attributes + // (like creds) will be used. sc, err := b.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{HealthCheckEnabled: b.config.HealthCheck}) if err != nil { logger.Warningf("base.baseBalancer: failed to create new SubConn: %v", err) continue } - b.subConns[a] = sc + b.subConns[aNoAttrs] = sc b.scStates[sc] = connectivity.Idle sc.Connect() + } else { + // Always update the subconn's address in case the attributes + // changed. + // + // The SubConn does a reflect.DeepEqual of the new and old + // addresses. So this is a noop if the current address is the same + // as the old one (including attributes). + sc.UpdateAddresses([]resolver.Address{a}) } } for a, sc := range b.subConns { diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index b35ba94f2..77a08fd33 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -109,11 +109,11 @@ type defaultConfigSelector struct { sc *ServiceConfig } -func (dcs *defaultConfigSelector) SelectConfig(rpcInfo iresolver.RPCInfo) *iresolver.RPCConfig { +func (dcs *defaultConfigSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*iresolver.RPCConfig, error) { return &iresolver.RPCConfig{ Context: rpcInfo.Context, MethodConfig: getMethodConfig(dcs.sc, rpcInfo.Method), - } + }, nil } // DialContext creates a client connection to the given target. By default, it's @@ -270,7 +270,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * cc.authority = creds.Info().ServerName } else if cc.dopts.insecure && cc.dopts.authority != "" { cc.authority = cc.dopts.authority - } else if strings.HasPrefix(cc.target, "unix:") { + } else if strings.HasPrefix(cc.target, "unix:") || strings.HasPrefix(cc.target, "unix-abstract:") { cc.authority = "localhost" } else if strings.HasPrefix(cc.parsedTarget.Endpoint, ":") { cc.authority = "localhost" + cc.parsedTarget.Endpoint diff --git a/vendor/google.golang.org/grpc/go.mod b/vendor/google.golang.org/grpc/go.mod index 6feb697bb..cab74e557 100644 --- a/vendor/google.golang.org/grpc/go.mod +++ b/vendor/google.golang.org/grpc/go.mod @@ -3,8 +3,8 @@ module google.golang.org/grpc go 1.11 require ( - github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354 - github.com/envoyproxy/go-control-plane v0.9.7 + github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403 + github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b github.com/golang/protobuf v1.4.2 github.com/google/go-cmp v0.5.0 diff --git a/vendor/google.golang.org/grpc/go.sum b/vendor/google.golang.org/grpc/go.sum index 293f54903..77ee70b44 100644 --- a/vendor/google.golang.org/grpc/go.sum +++ b/vendor/google.golang.org/grpc/go.sum @@ -6,13 +6,14 @@ github.com/census-instrumentation/opencensus-proto v0.2.1 h1:glEXhBS5PSLLv4IXzLA github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354 h1:9kRtNpqLHbZVO/NNxhHp2ymxFxsHOe3x2efJGn//Tas= -github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403 h1:cqQfy1jclcSy/FwLjemeg3SR1yaINm74aQyupQ0Bl8M= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.7 h1:EARl0OvqMoxq/UMgMSCLnXzkaXbxzskluEBlMQCJPms= -github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad h1:EmNYJhPYy0pOFjCx2PrgtaBXmee0iUX9hLlxE1xHOJE= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= @@ -36,10 +37,13 @@ github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -87,7 +91,9 @@ google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/target.go b/vendor/google.golang.org/grpc/internal/grpcutil/target.go index baa3479f1..8833021da 100644 --- a/vendor/google.golang.org/grpc/internal/grpcutil/target.go +++ b/vendor/google.golang.org/grpc/internal/grpcutil/target.go @@ -41,10 +41,30 @@ func split2(s, sep string) (string, string, bool) { // not parse "unix:[path]" cases. This should be true in cases where a custom // dialer is present, to prevent a behavior change. // -// If target is not a valid scheme://authority/endpoint, it returns {Endpoint: -// target}. +// If target is not a valid scheme://authority/endpoint as specified in +// https://github.com/grpc/grpc/blob/master/doc/naming.md, +// it returns {Endpoint: target}. func ParseTarget(target string, skipUnixColonParsing bool) (ret resolver.Target) { var ok bool + if strings.HasPrefix(target, "unix-abstract:") { + if strings.HasPrefix(target, "unix-abstract://") { + // Maybe, with Authority specified, try to parse it + var remain string + ret.Scheme, remain, _ = split2(target, "://") + ret.Authority, ret.Endpoint, ok = split2(remain, "/") + if !ok { + // No Authority, add the "//" back + ret.Endpoint = "//" + remain + } else { + // Found Authority, add the "/" back + ret.Endpoint = "/" + ret.Endpoint + } + } else { + // Without Authority specified, split target on ":" + ret.Scheme, ret.Endpoint, _ = split2(target, ":") + } + return ret + } ret.Scheme, ret.Endpoint, ok = split2(target, "://") if !ok { if strings.HasPrefix(target, "unix:") && !skipUnixColonParsing { diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index f83d66259..1e2834c70 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -60,7 +60,11 @@ var ( // GetXDSHandshakeInfoForTesting returns a pointer to the xds.HandshakeInfo // stored in the passed in attributes. This is set by // credentials/xds/xds.go. - GetXDSHandshakeInfoForTesting interface{} // func (attr *attributes.Attributes) *xds.HandshakeInfo + GetXDSHandshakeInfoForTesting interface{} // func (*attributes.Attributes) *xds.HandshakeInfo + // GetServerCredentials returns the transport credentials configured on a + // gRPC server. An xDS-enabled server needs to know what type of credentials + // is configured on the underlying gRPC server. This is set by server.go. + GetServerCredentials interface{} // func (*grpc.Server) credentials.TransportCredentials ) // HealthChecker defines the signature of the client-side LB channel health checking function. diff --git a/vendor/google.golang.org/grpc/internal/metadata/metadata.go b/vendor/google.golang.org/grpc/internal/metadata/metadata.go new file mode 100644 index 000000000..302262613 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/metadata/metadata.go @@ -0,0 +1,50 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package metadata contains functions to set and get metadata from addresses. +// +// This package is experimental. +package metadata + +import ( + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/resolver" +) + +type mdKeyType string + +const mdKey = mdKeyType("grpc.internal.address.metadata") + +// Get returns the metadata of addr. +func Get(addr resolver.Address) metadata.MD { + attrs := addr.Attributes + if attrs == nil { + return nil + } + md, _ := attrs.Value(mdKey).(metadata.MD) + return md +} + +// Set sets (overrides) the metadata in addr. +// +// When a SubConn is created with this address, the RPCs sent on it will all +// have this metadata. +func Set(addr resolver.Address, md metadata.MD) resolver.Address { + addr.Attributes = addr.Attributes.WithValues(mdKey, md) + return addr +} diff --git a/vendor/google.golang.org/grpc/internal/resolver/config_selector.go b/vendor/google.golang.org/grpc/internal/resolver/config_selector.go index 5ef9262e5..e69900400 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/config_selector.go +++ b/vendor/google.golang.org/grpc/internal/resolver/config_selector.go @@ -29,8 +29,10 @@ import ( // ConfigSelector controls what configuration to use for every RPC. type ConfigSelector interface { - // Selects the configuration for the RPC. - SelectConfig(RPCInfo) *RPCConfig + // Selects the configuration for the RPC, or terminates it using the error. + // This error will be converted by the gRPC library to a status error with + // code UNKNOWN if it is not returned as a status error. + SelectConfig(RPCInfo) (*RPCConfig, error) } // RPCInfo contains RPC information needed by a ConfigSelector. @@ -86,7 +88,7 @@ func (scs *SafeConfigSelector) UpdateConfigSelector(cs ConfigSelector) { } // SelectConfig defers to the current ConfigSelector in scs. -func (scs *SafeConfigSelector) SelectConfig(r RPCInfo) *RPCConfig { +func (scs *SafeConfigSelector) SelectConfig(r RPCInfo) (*RPCConfig, error) { scs.mu.RLock() defer scs.mu.RUnlock() return scs.cs.SelectConfig(r) diff --git a/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go b/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go index 8e78d4719..0d5a811dd 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go +++ b/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go @@ -26,20 +26,28 @@ import ( "google.golang.org/grpc/resolver" ) -const scheme = "unix" +const unixScheme = "unix" +const unixAbstractScheme = "unix-abstract" -type builder struct{} +type builder struct { + scheme string +} -func (*builder) Build(target resolver.Target, cc resolver.ClientConn, _ resolver.BuildOptions) (resolver.Resolver, error) { +func (b *builder) Build(target resolver.Target, cc resolver.ClientConn, _ resolver.BuildOptions) (resolver.Resolver, error) { if target.Authority != "" { return nil, fmt.Errorf("invalid (non-empty) authority: %v", target.Authority) } - cc.UpdateState(resolver.State{Addresses: []resolver.Address{networktype.Set(resolver.Address{Addr: target.Endpoint}, "unix")}}) + addr := resolver.Address{Addr: target.Endpoint} + if b.scheme == unixAbstractScheme { + // prepend "\x00" to address for unix-abstract + addr.Addr = "\x00" + addr.Addr + } + cc.UpdateState(resolver.State{Addresses: []resolver.Address{networktype.Set(addr, "unix")}}) return &nopResolver{}, nil } -func (*builder) Scheme() string { - return scheme +func (b *builder) Scheme() string { + return b.scheme } type nopResolver struct { @@ -50,5 +58,6 @@ func (*nopResolver) ResolveNow(resolver.ResolveNowOptions) {} func (*nopResolver) Close() {} func init() { - resolver.Register(&builder{}) + resolver.Register(&builder{scheme: unixScheme}) + resolver.Register(&builder{scheme: unixAbstractScheme}) } diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go index 2fdcb76e6..4b2964f2a 100644 --- a/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go +++ b/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go @@ -44,25 +44,23 @@ func GetCPUTime() int64 { } // Rusage is an alias for syscall.Rusage under linux environment. -type Rusage syscall.Rusage +type Rusage = syscall.Rusage // GetRusage returns the resource usage of current process. -func GetRusage() (rusage *Rusage) { - rusage = new(Rusage) - syscall.Getrusage(syscall.RUSAGE_SELF, (*syscall.Rusage)(rusage)) - return +func GetRusage() *Rusage { + rusage := new(Rusage) + syscall.Getrusage(syscall.RUSAGE_SELF, rusage) + return rusage } // CPUTimeDiff returns the differences of user CPU time and system CPU time used // between two Rusage structs. func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) { - f := (*syscall.Rusage)(first) - l := (*syscall.Rusage)(latest) var ( - utimeDiffs = l.Utime.Sec - f.Utime.Sec - utimeDiffus = l.Utime.Usec - f.Utime.Usec - stimeDiffs = l.Stime.Sec - f.Stime.Sec - stimeDiffus = l.Stime.Usec - f.Stime.Usec + utimeDiffs = latest.Utime.Sec - first.Utime.Sec + utimeDiffus = latest.Utime.Usec - first.Utime.Usec + stimeDiffs = latest.Stime.Sec - first.Stime.Sec + stimeDiffus = latest.Stime.Usec - first.Stime.Usec ) uTimeElapsed := float64(utimeDiffs) + float64(utimeDiffus)*1.0e-6 diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go index adae60d65..7913ef1db 100644 --- a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go +++ b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go @@ -50,7 +50,7 @@ func GetCPUTime() int64 { type Rusage struct{} // GetRusage is a no-op function under non-linux or appengine environment. -func GetRusage() (rusage *Rusage) { +func GetRusage() *Rusage { log() return nil } diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index 1355008c7..8902b7f90 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -33,6 +33,7 @@ import ( "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" "google.golang.org/grpc/internal/grpcutil" + imetadata "google.golang.org/grpc/internal/metadata" "google.golang.org/grpc/internal/transport/networktype" "google.golang.org/grpc/codes" @@ -60,7 +61,7 @@ type http2Client struct { cancel context.CancelFunc ctxDone <-chan struct{} // Cache the ctx.Done() chan. userAgent string - md interface{} + md metadata.MD conn net.Conn // underlying communication channel loopy *loopyWriter remoteAddr net.Addr @@ -142,7 +143,7 @@ func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error address := addr.Addr networkType, ok := networktype.Get(addr) if fn != nil { - if networkType == "unix" { + if networkType == "unix" && !strings.HasPrefix(address, "\x00") { // For backward compatibility, if the user dialed "unix:///path", // the passthrough resolver would be used and the user's custom // dialer would see "unix:///path". Since the unix resolver is used @@ -277,7 +278,6 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ctxDone: ctx.Done(), // Cache Done chan. cancel: cancel, userAgent: opts.UserAgent, - md: addr.Metadata, conn: conn, remoteAddr: conn.RemoteAddr(), localAddr: conn.LocalAddr(), @@ -305,6 +305,12 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts keepaliveEnabled: keepaliveEnabled, bufferPool: newBufferPool(), } + + if md, ok := addr.Metadata.(*metadata.MD); ok { + t.md = *md + } else if md := imetadata.Get(addr); md != nil { + t.md = md + } t.controlBuf = newControlBuffer(t.ctxDone) if opts.InitialWindowSize >= defaultWindowSize { t.initialWindowSize = opts.InitialWindowSize @@ -521,14 +527,12 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) } } } - if md, ok := t.md.(*metadata.MD); ok { - for k, vv := range *md { - if isReservedHeader(k) { - continue - } - for _, v := range vv { - headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) - } + for k, vv := range t.md { + if isReservedHeader(k) { + continue + } + for _, v := range vv { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) } } return headerFields, nil diff --git a/vendor/google.golang.org/grpc/regenerate.sh b/vendor/google.golang.org/grpc/regenerate.sh index 3443ad975..ed52187df 100644 --- a/vendor/google.golang.org/grpc/regenerate.sh +++ b/vendor/google.golang.org/grpc/regenerate.sh @@ -45,19 +45,6 @@ mkdir -p ${WORKDIR}/googleapis/google/rpc echo "curl https://raw.githubusercontent.com/googleapis/googleapis/master/google/rpc/code.proto" curl --silent https://raw.githubusercontent.com/googleapis/googleapis/master/google/rpc/code.proto > ${WORKDIR}/googleapis/google/rpc/code.proto -# Pull in the following repos to build the MeshCA config proto. -ENVOY_API_REPOS=( - "https://github.com/envoyproxy/data-plane-api" - "https://github.com/cncf/udpa" - "https://github.com/envoyproxy/protoc-gen-validate" -) -for repo in ${ENVOY_API_REPOS[@]}; do - dirname=$(basename ${repo}) - mkdir -p ${WORKDIR}/${dirname} - echo "git clone ${repo}" - git clone --quiet ${repo} ${WORKDIR}/${dirname} -done - # Pull in the MeshCA service proto. mkdir -p ${WORKDIR}/istio/istio/google/security/meshca/v1 echo "curl https://raw.githubusercontent.com/istio/istio/master/security/proto/providers/google/meshca.proto" @@ -84,15 +71,15 @@ SOURCES=( ${WORKDIR}/grpc-proto/grpc/lookup/v1/rls.proto ${WORKDIR}/grpc-proto/grpc/lookup/v1/rls_config.proto ${WORKDIR}/grpc-proto/grpc/service_config/service_config.proto - ${WORKDIR}/grpc-proto/grpc/tls/provider/meshca/experimental/config.proto + ${WORKDIR}/grpc-proto/grpc/testing/*.proto + ${WORKDIR}/grpc-proto/grpc/core/*.proto ${WORKDIR}/istio/istio/google/security/meshca/v1/meshca.proto ) # These options of the form 'Mfoo.proto=bar' instruct the codegen to use an # import path of 'bar' in the generated code when 'foo.proto' is imported in # one of the sources. -OPTS=Mgrpc/service_config/service_config.proto=/internal/proto/grpc_service_config,\ -Menvoy/config/core/v3/config_source.proto=github.com/envoyproxy/go-control-plane/envoy/config/core/v3 +OPTS=Mgrpc/service_config/service_config.proto=/internal/proto/grpc_service_config,Mgrpc/core/stats.proto=google.golang.org/grpc/interop/grpc_testing/core for src in ${SOURCES[@]}; do echo "protoc ${src}" @@ -100,9 +87,6 @@ for src in ${SOURCES[@]}; do -I"." \ -I${WORKDIR}/grpc-proto \ -I${WORKDIR}/googleapis \ - -I${WORKDIR}/data-plane-api \ - -I${WORKDIR}/udpa \ - -I${WORKDIR}/protoc-gen-validate \ -I${WORKDIR}/istio \ ${src} done @@ -113,9 +97,6 @@ for src in ${LEGACY_SOURCES[@]}; do -I"." \ -I${WORKDIR}/grpc-proto \ -I${WORKDIR}/googleapis \ - -I${WORKDIR}/data-plane-api \ - -I${WORKDIR}/udpa \ - -I${WORKDIR}/protoc-gen-validate \ -I${WORKDIR}/istio \ ${src} done @@ -132,6 +113,10 @@ rm ${WORKDIR}/out/google.golang.org/grpc/reflection/grpc_testingv3/*.pb.go # grpc/service_config/service_config.proto does not have a go_package option. mv ${WORKDIR}/out/grpc/service_config/service_config.pb.go internal/proto/grpc_service_config +# grpc/testing does not have a go_package option. +mv ${WORKDIR}/out/grpc/testing/*.pb.go interop/grpc_testing/ +mv ${WORKDIR}/out/grpc/core/*.pb.go interop/grpc_testing/core/ + # istio/google/security/meshca/v1/meshca.proto does not have a go_package option. mkdir -p ${WORKDIR}/out/google.golang.org/grpc/credentials/tls/certprovider/meshca/internal/v1/ mv ${WORKDIR}/out/istio/google/security/meshca/v1/* ${WORKDIR}/out/google.golang.org/grpc/credentials/tls/certprovider/meshca/internal/v1/ diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index 968eb598e..7a2aa28a1 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -40,6 +40,7 @@ import ( "google.golang.org/grpc/encoding" "google.golang.org/grpc/encoding/proto" "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcrand" @@ -58,6 +59,12 @@ const ( defaultServerMaxSendMessageSize = math.MaxInt32 ) +func init() { + internal.GetServerCredentials = func(srv *Server) credentials.TransportCredentials { + return srv.opts.creds + } +} + var statusOK = status.New(codes.OK, "") var logger = grpclog.Component("core") diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index 8d4694a33..eda1248d6 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -175,7 +175,10 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth var mc serviceconfig.MethodConfig var onCommit func() - rpcConfig := cc.safeConfigSelector.SelectConfig(iresolver.RPCInfo{Context: ctx, Method: method}) + rpcConfig, err := cc.safeConfigSelector.SelectConfig(iresolver.RPCInfo{Context: ctx, Method: method}) + if err != nil { + return nil, status.Convert(err).Err() + } if rpcConfig != nil { if rpcConfig.Context != nil { ctx = rpcConfig.Context diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index dfafd656c..685b91c70 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.34.1" +const Version = "1.35.0" diff --git a/vendor/modules.txt b/vendor/modules.txt index d13bfe521..73931b2bf 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -19,7 +19,9 @@ github.com/VictoriaMetrics/metrics # github.com/VictoriaMetrics/metricsql v0.10.0 github.com/VictoriaMetrics/metricsql github.com/VictoriaMetrics/metricsql/binaryop -# github.com/aws/aws-sdk-go v1.36.25 +# github.com/VividCortex/ewma v1.1.1 +github.com/VividCortex/ewma +# github.com/aws/aws-sdk-go v1.37.1 github.com/aws/aws-sdk-go/aws github.com/aws/aws-sdk-go/aws/arn github.com/aws/aws-sdk-go/aws/awserr @@ -31,6 +33,7 @@ github.com/aws/aws-sdk-go/aws/credentials github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds github.com/aws/aws-sdk-go/aws/credentials/endpointcreds github.com/aws/aws-sdk-go/aws/credentials/processcreds +github.com/aws/aws-sdk-go/aws/credentials/ssocreds github.com/aws/aws-sdk-go/aws/credentials/stscreds github.com/aws/aws-sdk-go/aws/csm github.com/aws/aws-sdk-go/aws/defaults @@ -56,20 +59,31 @@ github.com/aws/aws-sdk-go/private/protocol github.com/aws/aws-sdk-go/private/protocol/eventstream github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi github.com/aws/aws-sdk-go/private/protocol/json/jsonutil +github.com/aws/aws-sdk-go/private/protocol/jsonrpc github.com/aws/aws-sdk-go/private/protocol/query github.com/aws/aws-sdk-go/private/protocol/query/queryutil github.com/aws/aws-sdk-go/private/protocol/rest +github.com/aws/aws-sdk-go/private/protocol/restjson github.com/aws/aws-sdk-go/private/protocol/restxml github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil github.com/aws/aws-sdk-go/service/s3 github.com/aws/aws-sdk-go/service/s3/s3iface github.com/aws/aws-sdk-go/service/s3/s3manager +github.com/aws/aws-sdk-go/service/sso +github.com/aws/aws-sdk-go/service/sso/ssoiface github.com/aws/aws-sdk-go/service/sts github.com/aws/aws-sdk-go/service/sts/stsiface # github.com/beorn7/perks v1.0.1 github.com/beorn7/perks/quantile # github.com/cespare/xxhash/v2 v2.1.1 github.com/cespare/xxhash/v2 +# github.com/cheggaaa/pb/v3 v3.0.5 +github.com/cheggaaa/pb/v3 +github.com/cheggaaa/pb/v3/termutil +# github.com/cpuguy83/go-md2man/v2 v2.0.0 +github.com/cpuguy83/go-md2man/v2/md2man +# github.com/fatih/color v1.10.0 +github.com/fatih/color # github.com/go-kit/kit v0.10.0 github.com/go-kit/kit/log github.com/go-kit/kit/log/level @@ -89,13 +103,17 @@ github.com/golang/protobuf/ptypes/timestamp github.com/golang/snappy # github.com/googleapis/gax-go/v2 v2.0.5 github.com/googleapis/gax-go/v2 +# github.com/influxdata/influxdb v1.8.4 +github.com/influxdata/influxdb/client/v2 +github.com/influxdata/influxdb/models +github.com/influxdata/influxdb/pkg/escape # github.com/jmespath/go-jmespath v0.4.0 github.com/jmespath/go-jmespath # github.com/jstemmer/go-junit-report v0.9.1 github.com/jstemmer/go-junit-report github.com/jstemmer/go-junit-report/formatter github.com/jstemmer/go-junit-report/parser -# github.com/klauspost/compress v1.11.6 +# github.com/klauspost/compress v1.11.7 github.com/klauspost/compress/flate github.com/klauspost/compress/fse github.com/klauspost/compress/gzip @@ -104,6 +122,12 @@ github.com/klauspost/compress/snappy github.com/klauspost/compress/zlib github.com/klauspost/compress/zstd github.com/klauspost/compress/zstd/internal/xxhash +# github.com/mattn/go-colorable v0.1.8 +github.com/mattn/go-colorable +# github.com/mattn/go-isatty v0.0.12 +github.com/mattn/go-isatty +# github.com/mattn/go-runewidth v0.0.10 +github.com/mattn/go-runewidth # github.com/matttproud/golang_protobuf_extensions v1.0.1 github.com/matttproud/golang_protobuf_extensions/pbutil # github.com/oklog/ulid v1.3.1 @@ -139,6 +163,12 @@ github.com/prometheus/prometheus/tsdb/record github.com/prometheus/prometheus/tsdb/tombstones github.com/prometheus/prometheus/tsdb/tsdbutil github.com/prometheus/prometheus/tsdb/wal +# github.com/rivo/uniseg v0.2.0 +github.com/rivo/uniseg +# github.com/russross/blackfriday/v2 v2.1.0 +github.com/russross/blackfriday/v2 +# github.com/urfave/cli/v2 v2.3.0 +github.com/urfave/cli/v2 # github.com/valyala/bytebufferpool v1.0.0 github.com/valyala/bytebufferpool # github.com/valyala/fastjson v1.6.3 @@ -154,7 +184,7 @@ github.com/valyala/gozstd github.com/valyala/histogram # github.com/valyala/quicktemplate v1.6.3 github.com/valyala/quicktemplate -# go.opencensus.io v0.22.5 +# go.opencensus.io v0.22.6 go.opencensus.io go.opencensus.io/internal go.opencensus.io/internal/tagencoding @@ -176,10 +206,10 @@ go.uber.org/atomic # golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5 golang.org/x/lint golang.org/x/lint/golint -# golang.org/x/mod v0.4.0 +# golang.org/x/mod v0.4.1 golang.org/x/mod/module golang.org/x/mod/semver -# golang.org/x/net v0.0.0-20201224014010-6772e930b67b +# golang.org/x/net v0.0.0-20210119194325-5f4716e94777 golang.org/x/net/context golang.org/x/net/context/ctxhttp golang.org/x/net/http/httpguts @@ -188,7 +218,7 @@ golang.org/x/net/http2/hpack golang.org/x/net/idna golang.org/x/net/internal/timeseries golang.org/x/net/trace -# golang.org/x/oauth2 v0.0.0-20210112200429-01de73cf58bd +# golang.org/x/oauth2 v0.0.0-20210201163806-010130855d6c golang.org/x/oauth2 golang.org/x/oauth2/google golang.org/x/oauth2/google/internal/externalaccount @@ -197,7 +227,8 @@ golang.org/x/oauth2/jws golang.org/x/oauth2/jwt # golang.org/x/sync v0.0.0-20201207232520-09787c993a3a golang.org/x/sync/errgroup -# golang.org/x/sys v0.0.0-20210113000019-eaf3bda374d2 +# golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c +golang.org/x/sys/execabs golang.org/x/sys/internal/unsafeheader golang.org/x/sys/unix golang.org/x/sys/windows @@ -206,7 +237,7 @@ golang.org/x/text/secure/bidirule golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm -# golang.org/x/tools v0.0.0-20210112230658-8b4aab62c064 +# golang.org/x/tools v0.1.0 golang.org/x/tools/cmd/goimports golang.org/x/tools/go/ast/astutil golang.org/x/tools/go/gcexportdata @@ -222,7 +253,7 @@ golang.org/x/tools/internal/imports # golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 golang.org/x/xerrors golang.org/x/xerrors/internal -# google.golang.org/api v0.36.0 +# google.golang.org/api v0.38.0 google.golang.org/api/googleapi google.golang.org/api/googleapi/transport google.golang.org/api/internal @@ -248,13 +279,13 @@ google.golang.org/appengine/internal/modules google.golang.org/appengine/internal/remote_api google.golang.org/appengine/internal/urlfetch google.golang.org/appengine/urlfetch -# google.golang.org/genproto v0.0.0-20210111234610-22ae2b108f89 +# google.golang.org/genproto v0.0.0-20210201151548-94839c025ad4 google.golang.org/genproto/googleapis/api/annotations google.golang.org/genproto/googleapis/iam/v1 google.golang.org/genproto/googleapis/rpc/code google.golang.org/genproto/googleapis/rpc/status google.golang.org/genproto/googleapis/type/expr -# google.golang.org/grpc v1.34.1 +# google.golang.org/grpc v1.35.0 google.golang.org/grpc google.golang.org/grpc/attributes google.golang.org/grpc/backoff @@ -281,6 +312,7 @@ google.golang.org/grpc/internal/grpclog google.golang.org/grpc/internal/grpcrand google.golang.org/grpc/internal/grpcsync google.golang.org/grpc/internal/grpcutil +google.golang.org/grpc/internal/metadata google.golang.org/grpc/internal/resolver google.golang.org/grpc/internal/resolver/dns google.golang.org/grpc/internal/resolver/passthrough