mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2024-11-21 14:44:00 +00:00
Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files
This commit is contained in:
commit
9682c23786
104 changed files with 8637 additions and 5081 deletions
2
.github/workflows/main.yml
vendored
2
.github/workflows/main.yml
vendored
|
@ -29,8 +29,6 @@ jobs:
|
||||||
make install-errcheck
|
make install-errcheck
|
||||||
make install-golangci-lint
|
make install-golangci-lint
|
||||||
- name: Build
|
- name: Build
|
||||||
env:
|
|
||||||
GO111MODULE: on
|
|
||||||
run: |
|
run: |
|
||||||
export PATH=$PATH:$(go env GOPATH)/bin # temporary fix. See https://github.com/actions/setup-go/issues/14
|
export PATH=$PATH:$(go env GOPATH)/bin # temporary fix. See https://github.com/actions/setup-go/issues/14
|
||||||
make check-all
|
make check-all
|
||||||
|
|
17
Makefile
17
Makefile
|
@ -170,7 +170,6 @@ publish-release:
|
||||||
git checkout $(TAG)-cluster && $(MAKE) release publish && \
|
git checkout $(TAG)-cluster && $(MAKE) release publish && \
|
||||||
git checkout $(TAG)-enterprise && $(MAKE) release publish && \
|
git checkout $(TAG)-enterprise && $(MAKE) release publish && \
|
||||||
git checkout $(TAG)-enterprise-cluster && $(MAKE) release publish && \
|
git checkout $(TAG)-enterprise-cluster && $(MAKE) release publish && \
|
||||||
git checkout $(TAG) && git push $(TAG) && git push $(TAG)-cluster && \
|
|
||||||
$(MAKE) github-create-release && \
|
$(MAKE) github-create-release && \
|
||||||
$(MAKE) github-upload-assets
|
$(MAKE) github-upload-assets
|
||||||
|
|
||||||
|
@ -322,7 +321,7 @@ lint: install-golint
|
||||||
golint app/...
|
golint app/...
|
||||||
|
|
||||||
install-golint:
|
install-golint:
|
||||||
which golint || GO111MODULE=off go get golang.org/x/lint/golint
|
which golint || go install golang.org/x/lint/golint@latest
|
||||||
|
|
||||||
errcheck: install-errcheck
|
errcheck: install-errcheck
|
||||||
errcheck -exclude=errcheck_excludes.txt ./lib/...
|
errcheck -exclude=errcheck_excludes.txt ./lib/...
|
||||||
|
@ -337,9 +336,9 @@ errcheck: install-errcheck
|
||||||
errcheck -exclude=errcheck_excludes.txt ./app/vmctl/...
|
errcheck -exclude=errcheck_excludes.txt ./app/vmctl/...
|
||||||
|
|
||||||
install-errcheck:
|
install-errcheck:
|
||||||
which errcheck || GO111MODULE=off go get github.com/kisielk/errcheck
|
which errcheck || go install github.com/kisielk/errcheck@latest
|
||||||
|
|
||||||
check-all: fmt vet lint errcheck golangci-lint
|
check-all: fmt vet lint errcheck golangci-lint govulncheck
|
||||||
|
|
||||||
test:
|
test:
|
||||||
go test ./lib/... ./app/...
|
go test ./lib/... ./app/...
|
||||||
|
@ -386,7 +385,7 @@ quicktemplate-gen: install-qtc
|
||||||
qtc
|
qtc
|
||||||
|
|
||||||
install-qtc:
|
install-qtc:
|
||||||
which qtc || GO111MODULE=off go get github.com/valyala/quicktemplate/qtc
|
which qtc || go install github.com/valyala/quicktemplate/qtc@latest
|
||||||
|
|
||||||
|
|
||||||
golangci-lint: install-golangci-lint
|
golangci-lint: install-golangci-lint
|
||||||
|
@ -395,8 +394,14 @@ golangci-lint: install-golangci-lint
|
||||||
install-golangci-lint:
|
install-golangci-lint:
|
||||||
which golangci-lint || curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(shell go env GOPATH)/bin v1.48.0
|
which golangci-lint || curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(shell go env GOPATH)/bin v1.48.0
|
||||||
|
|
||||||
|
govulncheck: install-govulncheck
|
||||||
|
govulncheck ./...
|
||||||
|
|
||||||
|
install-govulncheck:
|
||||||
|
which govulncheck || go install golang.org/x/vuln/cmd/govulncheck@latest
|
||||||
|
|
||||||
install-wwhrd:
|
install-wwhrd:
|
||||||
which wwhrd || GO111MODULE=off go get github.com/frapposelli/wwhrd
|
which wwhrd || go install github.com/frapposelli/wwhrd@latest
|
||||||
|
|
||||||
check-licenses: install-wwhrd
|
check-licenses: install-wwhrd
|
||||||
wwhrd check -f .wwhrd.yml
|
wwhrd check -f .wwhrd.yml
|
||||||
|
|
67
README.md
67
README.md
|
@ -290,6 +290,9 @@ VictoriaMetrics provides an ability to explore time series cardinality at `cardi
|
||||||
- To identify values with the highest number of series for the selected label (aka `focusLabel`).
|
- To identify values with the highest number of series for the selected label (aka `focusLabel`).
|
||||||
- To identify label=name pairs with the highest number of series.
|
- To identify label=name pairs with the highest number of series.
|
||||||
- To identify labels with the highest number of unique values.
|
- To identify labels with the highest number of unique values.
|
||||||
|
Note that [cluster version of VictoriaMetrics](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html)
|
||||||
|
may show lower than expected number of unique label values for labels with small number of unique values.
|
||||||
|
This is because of [implementation limits](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/5a6e617b5e41c9170e7c562aecd15ee0c901d489/app/vmselect/netstorage/netstorage.go#L1039-L1045).
|
||||||
|
|
||||||
By default cardinality explorer analyzes time series for the current date. It provides the ability to select different day at the top right corner.
|
By default cardinality explorer analyzes time series for the current date. It provides the ability to select different day at the top right corner.
|
||||||
By default all the time series for the selected date are analyzed. It is possible to narrow down the analysis to series
|
By default all the time series for the selected date are analyzed. It is possible to narrow down the analysis to series
|
||||||
|
@ -326,11 +329,19 @@ See also [vmagent](https://docs.victoriametrics.com/vmagent.html), which can be
|
||||||
|
|
||||||
VictoriaMetrics accepts data from [DataDog agent](https://docs.datadoghq.com/agent/) or [DogStatsD](https://docs.datadoghq.com/developers/dogstatsd/) via ["submit metrics" API](https://docs.datadoghq.com/api/latest/metrics/#submit-metrics) at `/datadog/api/v1/series` path.
|
VictoriaMetrics accepts data from [DataDog agent](https://docs.datadoghq.com/agent/) or [DogStatsD](https://docs.datadoghq.com/developers/dogstatsd/) via ["submit metrics" API](https://docs.datadoghq.com/api/latest/metrics/#submit-metrics) at `/datadog/api/v1/series` path.
|
||||||
|
|
||||||
Run DataDog agent with `DD_DD_URL=http://victoriametrics-host:8428/datadog` environment variable in order to write data to VictoriaMetrics at `victoriametrics-host` host. Another option is to set `dd_url` param at [DataDog agent configuration file](https://docs.datadoghq.com/agent/guide/agent-configuration-files/) to `http://victoriametrics-host:8428/datadog`.
|
Single-node VictoriaMetrics:
|
||||||
|
|
||||||
|
Run DataDog agent with environment variable `DD_DD_URL=http://victoriametrics-host:8428/datadog`. Alternatively, set `dd_url` param at [DataDog agent configuration file](https://docs.datadoghq.com/agent/guide/agent-configuration-files/) to `http://victoriametrics-host:8428/datadog`.
|
||||||
|
|
||||||
|
Cluster version of VictoriaMetrics:
|
||||||
|
|
||||||
|
Run DataDog agent with environment variable `DD_DD_URL=http://vinsert-host:8480/insert/0/datadog`. Alternatively, set `dd_url` param at [DataDog agent configuration file](https://docs.datadoghq.com/agent/guide/agent-configuration-files/) to `DD_DD_URL=http://vinsert-host:8480/insert/0/datadog`.
|
||||||
|
|
||||||
VictoriaMetrics doesn't check `DD_API_KEY` param, so it can be set to arbitrary value.
|
VictoriaMetrics doesn't check `DD_API_KEY` param, so it can be set to arbitrary value.
|
||||||
|
|
||||||
Example on how to send data to VictoriaMetrics via DataDog "submit metrics" API from command line:
|
Example of how to send data to VictoriaMetrics via [DataDog "submit metrics"](https://docs.victoriametrics.com/url-examples.html#datadogapiv1series) from command line:
|
||||||
|
|
||||||
|
Single-node VictoriaMetrics:
|
||||||
|
|
||||||
```console
|
```console
|
||||||
echo '
|
echo '
|
||||||
|
@ -351,15 +362,56 @@ echo '
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
' | curl -X POST --data-binary @- http://localhost:8428/datadog/api/v1/series
|
' | curl -X POST --data-binary @- http://victoriametrics-host:8428/datadog/api/v1/series
|
||||||
```
|
```
|
||||||
|
|
||||||
The imported data can be read via [export API](https://docs.victoriametrics.com/#how-to-export-data-in-json-line-format):
|
Cluster version of VictoriaMetrics:
|
||||||
|
|
||||||
<div class="with-copy" markdown="1">
|
<div class="with-copy" markdown="1">
|
||||||
|
|
||||||
```console
|
```console
|
||||||
curl http://localhost:8428/api/v1/export -d 'match[]=system.load.1'
|
echo '
|
||||||
|
{
|
||||||
|
"series": [
|
||||||
|
{
|
||||||
|
"host": "test.example.com",
|
||||||
|
"interval": 20,
|
||||||
|
"metric": "system.load.1",
|
||||||
|
"points": [[
|
||||||
|
0,
|
||||||
|
0.5
|
||||||
|
]],
|
||||||
|
"tags": [
|
||||||
|
"environment:test"
|
||||||
|
],
|
||||||
|
"type": "rate"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
' | curl -X POST --data-binary @- http://vminsert-host:8480/insert/0/datadog/api/v1/series
|
||||||
|
```
|
||||||
|
|
||||||
|
</div>
|
||||||
|
|
||||||
|
|
||||||
|
The imported data can be read via [export API](https://docs.victoriametrics.com/url-examples.html#apiv1export):
|
||||||
|
|
||||||
|
Single-node VictoriaMetrics:
|
||||||
|
|
||||||
|
<div class="with-copy" markdown="1">
|
||||||
|
|
||||||
|
```console
|
||||||
|
curl http://victoriametrics-host:8428/api/v1/export -d 'match[]=system.load.1'
|
||||||
|
```
|
||||||
|
|
||||||
|
</div>
|
||||||
|
|
||||||
|
Cluster version of VictoriaMetrics:
|
||||||
|
|
||||||
|
<div class="with-copy" markdown="1">
|
||||||
|
|
||||||
|
```console
|
||||||
|
curl http://vmselect-host:8481/select/0/prometheus/api/v1/export -d 'match[]=system.load.1'
|
||||||
```
|
```
|
||||||
|
|
||||||
</div>
|
</div>
|
||||||
|
@ -638,7 +690,7 @@ VictoriaMetrics accepts `round_digits` query arg for `/api/v1/query` and `/api/v
|
||||||
|
|
||||||
VictoriaMetrics accepts `limit` query arg for `/api/v1/labels` and `/api/v1/label/<labelName>/values` handlers for limiting the number of returned entries. For example, the query to `/api/v1/labels?limit=5` returns a sample of up to 5 unique labels, while ignoring the rest of labels. If the provided `limit` value exceeds the corresponding `-search.maxTagKeys` / `-search.maxTagValues` command-line flag values, then limits specified in the command-line flags are used.
|
VictoriaMetrics accepts `limit` query arg for `/api/v1/labels` and `/api/v1/label/<labelName>/values` handlers for limiting the number of returned entries. For example, the query to `/api/v1/labels?limit=5` returns a sample of up to 5 unique labels, while ignoring the rest of labels. If the provided `limit` value exceeds the corresponding `-search.maxTagKeys` / `-search.maxTagValues` command-line flag values, then limits specified in the command-line flags are used.
|
||||||
|
|
||||||
By default, VictoriaMetrics returns time series for the last 5 minutes from `/api/v1/series`, while the Prometheus API defaults to all time. Use `start` and `end` to select a different time range.
|
By default, VictoriaMetrics returns time series for the last 5 minutes from `/api/v1/series`, `/api/v1/labels` and `/api/v1/label/<labelName>/values` while the Prometheus API defaults to all time. Explicitly set `start` and `end` to select the desired time range.
|
||||||
VictoriaMetrics accepts `limit` query arg for `/api/v1/series` handlers for limiting the number of returned entries. For example, the query to `/api/v1/series?limit=5` returns a sample of up to 5 series, while ignoring the rest. If the provided `limit` value exceeds the corresponding `-search.maxSeries` command-line flag values, then limits specified in the command-line flags are used.
|
VictoriaMetrics accepts `limit` query arg for `/api/v1/series` handlers for limiting the number of returned entries. For example, the query to `/api/v1/series?limit=5` returns a sample of up to 5 series, while ignoring the rest. If the provided `limit` value exceeds the corresponding `-search.maxSeries` command-line flag values, then limits specified in the command-line flags are used.
|
||||||
|
|
||||||
Additionally, VictoriaMetrics provides the following handlers:
|
Additionally, VictoriaMetrics provides the following handlers:
|
||||||
|
@ -1573,7 +1625,8 @@ The exceeded limits can be [monitored](#monitoring) with the following metrics:
|
||||||
|
|
||||||
These limits are approximate, so VictoriaMetrics can underflow/overflow the limit by a small percentage (usually less than 1%).
|
These limits are approximate, so VictoriaMetrics can underflow/overflow the limit by a small percentage (usually less than 1%).
|
||||||
|
|
||||||
See also more advanced [cardinality limiter in vmagent](https://docs.victoriametrics.com/vmagent.html#cardinality-limiter).
|
See also more advanced [cardinality limiter in vmagent](https://docs.victoriametrics.com/vmagent.html#cardinality-limiter)
|
||||||
|
and [cardinality explorer docs](#cardinality-explorer).
|
||||||
|
|
||||||
## Troubleshooting
|
## Troubleshooting
|
||||||
|
|
||||||
|
|
|
@ -625,6 +625,8 @@ Both limits can be set simultaneously. If any of these limits is reached, then s
|
||||||
|
|
||||||
These limits are approximate, so `vmagent` can underflow/overflow the limit by a small percentage (usually less than 1%).
|
These limits are approximate, so `vmagent` can underflow/overflow the limit by a small percentage (usually less than 1%).
|
||||||
|
|
||||||
|
See also [cardinality explorer docs](https://docs.victoriametrics.com/#cardinality-explorer).
|
||||||
|
|
||||||
## Monitoring
|
## Monitoring
|
||||||
|
|
||||||
`vmagent` exports various metrics in Prometheus exposition format at `http://vmagent-host:8429/metrics` page. We recommend setting up regular scraping of this page
|
`vmagent` exports various metrics in Prometheus exposition format at `http://vmagent-host:8429/metrics` page. We recommend setting up regular scraping of this page
|
||||||
|
|
|
@ -521,6 +521,74 @@ To avoid such situation try to filter out VM process metrics via `--vm-native-fi
|
||||||
Instead, use [relabeling in VictoriaMetrics](https://github.com/VictoriaMetrics/vmctl/issues/4#issuecomment-683424375).
|
Instead, use [relabeling in VictoriaMetrics](https://github.com/VictoriaMetrics/vmctl/issues/4#issuecomment-683424375).
|
||||||
5. When importing in or from cluster version remember to use correct [URL format](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format)
|
5. When importing in or from cluster version remember to use correct [URL format](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format)
|
||||||
and specify `accountID` param.
|
and specify `accountID` param.
|
||||||
|
6. When migrating large volumes of data it might be useful to use `--vm-native-step-interval` flag to split single process into smaller steps.
|
||||||
|
|
||||||
|
#### Using time-based chunking of migration
|
||||||
|
|
||||||
|
It is possible split migration process into set of smaller batches based on time. This is especially useful when migrating large volumes of data as this adds indication of progress and ability to restore process from certain point in case of failure.
|
||||||
|
|
||||||
|
To use this you need to specify `--vm-native-step-interval` flag. Supported values are: `month`, `day`, `hour`.
|
||||||
|
Note that in order to use this it is required `--vm-native-filter-time-start` to be set to calculate time ranges for export process.
|
||||||
|
|
||||||
|
Every range is being processed independently, which means that:
|
||||||
|
- after range processing is finished all data within range is migrated
|
||||||
|
- if process fails on one of stages it is guaranteed that data of prior stages is already written, so it is possible to restart process starting from failed range
|
||||||
|
|
||||||
|
It is recommended using the `month` step when migrating the data over multiple months, since the migration with `day` and `hour` steps may take longer time to complete
|
||||||
|
because of additional overhead.
|
||||||
|
|
||||||
|
Usage example:
|
||||||
|
```console
|
||||||
|
./vmctl vm-native
|
||||||
|
--vm-native-filter-time-start 2022-06-17T00:07:00Z \
|
||||||
|
--vm-native-filter-time-end 2022-10-03T00:07:00Z \
|
||||||
|
--vm-native-src-addr http://localhost:8428 \
|
||||||
|
--vm-native-dst-addr http://localhost:8528 \
|
||||||
|
--vm-native-step-interval=month
|
||||||
|
VictoriaMetrics Native import mode
|
||||||
|
2022/08/30 19:48:24 Processing range 1/5: 2022-06-17T00:07:00Z - 2022-06-30T23:59:59Z
|
||||||
|
2022/08/30 19:48:24 Initing export pipe from "http://localhost:8428" with filters:
|
||||||
|
filter: match[]={__name__!=""}
|
||||||
|
start: 2022-06-17T00:07:00Z
|
||||||
|
end: 2022-06-30T23:59:59Z
|
||||||
|
Initing import process to "http://localhost:8428":
|
||||||
|
2022/08/30 19:48:24 Import finished!
|
||||||
|
Total: 16 B ↗ Speed: 28.89 KiB p/s
|
||||||
|
2022/08/30 19:48:24 Processing range 2/5: 2022-07-01T00:00:00Z - 2022-07-31T23:59:59Z
|
||||||
|
2022/08/30 19:48:24 Initing export pipe from "http://localhost:8428" with filters:
|
||||||
|
filter: match[]={__name__!=""}
|
||||||
|
start: 2022-07-01T00:00:00Z
|
||||||
|
end: 2022-07-31T23:59:59Z
|
||||||
|
Initing import process to "http://localhost:8428":
|
||||||
|
2022/08/30 19:48:24 Import finished!
|
||||||
|
Total: 16 B ↗ Speed: 164.35 KiB p/s
|
||||||
|
2022/08/30 19:48:24 Processing range 3/5: 2022-08-01T00:00:00Z - 2022-08-31T23:59:59Z
|
||||||
|
2022/08/30 19:48:24 Initing export pipe from "http://localhost:8428" with filters:
|
||||||
|
filter: match[]={__name__!=""}
|
||||||
|
start: 2022-08-01T00:00:00Z
|
||||||
|
end: 2022-08-31T23:59:59Z
|
||||||
|
Initing import process to "http://localhost:8428":
|
||||||
|
2022/08/30 19:48:24 Import finished!
|
||||||
|
Total: 16 B ↗ Speed: 191.42 KiB p/s
|
||||||
|
2022/08/30 19:48:24 Processing range 4/5: 2022-09-01T00:00:00Z - 2022-09-30T23:59:59Z
|
||||||
|
2022/08/30 19:48:24 Initing export pipe from "http://localhost:8428" with filters:
|
||||||
|
filter: match[]={__name__!=""}
|
||||||
|
start: 2022-09-01T00:00:00Z
|
||||||
|
end: 2022-09-30T23:59:59Z
|
||||||
|
Initing import process to "http://localhost:8428":
|
||||||
|
2022/08/30 19:48:24 Import finished!
|
||||||
|
Total: 16 B ↗ Speed: 141.04 KiB p/s
|
||||||
|
2022/08/30 19:48:24 Processing range 5/5: 2022-10-01T00:00:00Z - 2022-10-03T00:07:00Z
|
||||||
|
2022/08/30 19:48:24 Initing export pipe from "http://localhost:8428" with filters:
|
||||||
|
filter: match[]={__name__!=""}
|
||||||
|
start: 2022-10-01T00:00:00Z
|
||||||
|
end: 2022-10-03T00:07:00Z
|
||||||
|
Initing import process to "http://localhost:8428":
|
||||||
|
2022/08/30 19:48:24 Import finished!
|
||||||
|
Total: 16 B ↗ Speed: 186.32 KiB p/s
|
||||||
|
2022/08/30 19:48:24 Total time: 12.680582ms
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
## Verifying exported blocks from VictoriaMetrics
|
## Verifying exported blocks from VictoriaMetrics
|
||||||
|
|
||||||
|
|
|
@ -4,6 +4,8 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/urfave/cli/v2"
|
"github.com/urfave/cli/v2"
|
||||||
|
|
||||||
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/stepper"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -318,6 +320,7 @@ const (
|
||||||
vmNativeFilterMatch = "vm-native-filter-match"
|
vmNativeFilterMatch = "vm-native-filter-match"
|
||||||
vmNativeFilterTimeStart = "vm-native-filter-time-start"
|
vmNativeFilterTimeStart = "vm-native-filter-time-start"
|
||||||
vmNativeFilterTimeEnd = "vm-native-filter-time-end"
|
vmNativeFilterTimeEnd = "vm-native-filter-time-end"
|
||||||
|
vmNativeStepInterval = "vm-native-step-interval"
|
||||||
|
|
||||||
vmNativeSrcAddr = "vm-native-src-addr"
|
vmNativeSrcAddr = "vm-native-src-addr"
|
||||||
vmNativeSrcUser = "vm-native-src-user"
|
vmNativeSrcUser = "vm-native-src-user"
|
||||||
|
@ -345,6 +348,10 @@ var (
|
||||||
Name: vmNativeFilterTimeEnd,
|
Name: vmNativeFilterTimeEnd,
|
||||||
Usage: "The time filter may contain either unix timestamp in seconds or RFC3339 values. E.g. '2020-01-01T20:07:00Z'",
|
Usage: "The time filter may contain either unix timestamp in seconds or RFC3339 values. E.g. '2020-01-01T20:07:00Z'",
|
||||||
},
|
},
|
||||||
|
&cli.StringFlag{
|
||||||
|
Name: vmNativeStepInterval,
|
||||||
|
Usage: fmt.Sprintf("Split export data into chunks. Requires setting --%s. Valid values are '%s','%s','%s'.", vmNativeFilterTimeStart, stepper.StepMonth, stepper.StepDay, stepper.StepHour),
|
||||||
|
},
|
||||||
&cli.StringFlag{
|
&cli.StringFlag{
|
||||||
Name: vmNativeSrcAddr,
|
Name: vmNativeSrcAddr,
|
||||||
Usage: "VictoriaMetrics address to perform export from. \n" +
|
Usage: "VictoriaMetrics address to perform export from. \n" +
|
||||||
|
|
|
@ -11,6 +11,8 @@ import (
|
||||||
"syscall"
|
"syscall"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/urfave/cli/v2"
|
||||||
|
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/influx"
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/influx"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/opentsdb"
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/opentsdb"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/prometheus"
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/prometheus"
|
||||||
|
@ -18,7 +20,6 @@ import (
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/buildinfo"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/buildinfo"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
|
||||||
parser "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/native"
|
parser "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/native"
|
||||||
"github.com/urfave/cli/v2"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
|
@ -161,6 +162,7 @@ func main() {
|
||||||
match: c.String(vmNativeFilterMatch),
|
match: c.String(vmNativeFilterMatch),
|
||||||
timeStart: c.String(vmNativeFilterTimeStart),
|
timeStart: c.String(vmNativeFilterTimeStart),
|
||||||
timeEnd: c.String(vmNativeFilterTimeEnd),
|
timeEnd: c.String(vmNativeFilterTimeEnd),
|
||||||
|
chunk: c.String(vmNativeStepInterval),
|
||||||
},
|
},
|
||||||
src: &vmNativeClient{
|
src: &vmNativeClient{
|
||||||
addr: strings.Trim(c.String(vmNativeSrcAddr), "/"),
|
addr: strings.Trim(c.String(vmNativeSrcAddr), "/"),
|
||||||
|
|
63
app/vmctl/stepper/split.go
Normal file
63
app/vmctl/stepper/split.go
Normal file
|
@ -0,0 +1,63 @@
|
||||||
|
package stepper
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// StepMonth represents a one month interval
|
||||||
|
StepMonth string = "month"
|
||||||
|
// StepDay represents a one day interval
|
||||||
|
StepDay string = "day"
|
||||||
|
// StepHour represents a one hour interval
|
||||||
|
StepHour string = "hour"
|
||||||
|
)
|
||||||
|
|
||||||
|
// SplitDateRange splits start-end range in a subset of ranges respecting the given step
|
||||||
|
// Ranges with granularity of StepMonth are aligned to 1st of each month in order to improve export efficiency at block transfer level
|
||||||
|
func SplitDateRange(start, end time.Time, step string) ([][]time.Time, error) {
|
||||||
|
|
||||||
|
if start.After(end) {
|
||||||
|
return nil, fmt.Errorf("start time %q should come before end time %q", start.Format(time.RFC3339), end.Format(time.RFC3339))
|
||||||
|
}
|
||||||
|
|
||||||
|
var nextStep func(time.Time) (time.Time, time.Time)
|
||||||
|
|
||||||
|
switch step {
|
||||||
|
case StepMonth:
|
||||||
|
nextStep = func(t time.Time) (time.Time, time.Time) {
|
||||||
|
endOfMonth := time.Date(t.Year(), t.Month()+1, 1, 0, 0, 0, 0, t.Location()).Add(-1 * time.Nanosecond)
|
||||||
|
if t == endOfMonth {
|
||||||
|
endOfMonth = time.Date(t.Year(), t.Month()+2, 1, 0, 0, 0, 0, t.Location()).Add(-1 * time.Nanosecond)
|
||||||
|
t = time.Date(t.Year(), t.Month()+1, 1, 0, 0, 0, 0, t.Location())
|
||||||
|
}
|
||||||
|
return t, endOfMonth
|
||||||
|
}
|
||||||
|
case StepDay:
|
||||||
|
nextStep = func(t time.Time) (time.Time, time.Time) {
|
||||||
|
return t, t.AddDate(0, 0, 1)
|
||||||
|
}
|
||||||
|
case StepHour:
|
||||||
|
nextStep = func(t time.Time) (time.Time, time.Time) {
|
||||||
|
return t, t.Add(time.Hour * 1)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("failed to parse step value, valid values are: '%s', '%s', '%s'. provided: '%s'", StepMonth, StepDay, StepHour, step)
|
||||||
|
}
|
||||||
|
|
||||||
|
currentStep := start
|
||||||
|
|
||||||
|
ranges := make([][]time.Time, 0)
|
||||||
|
|
||||||
|
for end.After(currentStep) {
|
||||||
|
s, e := nextStep(currentStep)
|
||||||
|
if e.After(end) {
|
||||||
|
e = end
|
||||||
|
}
|
||||||
|
ranges = append(ranges, []time.Time{s, e})
|
||||||
|
currentStep = e
|
||||||
|
}
|
||||||
|
|
||||||
|
return ranges, nil
|
||||||
|
}
|
152
app/vmctl/stepper/split_test.go
Normal file
152
app/vmctl/stepper/split_test.go
Normal file
|
@ -0,0 +1,152 @@
|
||||||
|
package stepper
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type testTimeRange []string
|
||||||
|
|
||||||
|
func mustParseDatetime(t string) time.Time {
|
||||||
|
result, err := time.Parse(time.RFC3339, t)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_splitDateRange(t *testing.T) {
|
||||||
|
type args struct {
|
||||||
|
start string
|
||||||
|
end string
|
||||||
|
granularity string
|
||||||
|
}
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
args args
|
||||||
|
want []testTimeRange
|
||||||
|
wantErr bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "validates start is before end",
|
||||||
|
args: args{
|
||||||
|
start: "2022-02-01T00:00:00Z",
|
||||||
|
end: "2022-01-01T00:00:00Z",
|
||||||
|
granularity: StepMonth,
|
||||||
|
},
|
||||||
|
want: nil,
|
||||||
|
wantErr: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "validates granularity value",
|
||||||
|
args: args{
|
||||||
|
start: "2022-01-01T00:00:00Z",
|
||||||
|
end: "2022-02-01T00:00:00Z",
|
||||||
|
granularity: "non-existent-format",
|
||||||
|
},
|
||||||
|
want: nil,
|
||||||
|
wantErr: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "month chunking",
|
||||||
|
args: args{
|
||||||
|
start: "2022-01-03T11:11:11Z",
|
||||||
|
end: "2022-03-03T12:12:12Z",
|
||||||
|
granularity: StepMonth,
|
||||||
|
},
|
||||||
|
want: []testTimeRange{
|
||||||
|
{
|
||||||
|
"2022-01-03T11:11:11Z",
|
||||||
|
"2022-01-31T23:59:59.999999999Z",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"2022-02-01T00:00:00Z",
|
||||||
|
"2022-02-28T23:59:59.999999999Z",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"2022-03-01T00:00:00Z",
|
||||||
|
"2022-03-03T12:12:12Z",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
wantErr: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "daily chunking",
|
||||||
|
args: args{
|
||||||
|
start: "2022-01-03T11:11:11Z",
|
||||||
|
end: "2022-01-05T12:12:12Z",
|
||||||
|
granularity: StepDay,
|
||||||
|
},
|
||||||
|
want: []testTimeRange{
|
||||||
|
{
|
||||||
|
"2022-01-03T11:11:11Z",
|
||||||
|
"2022-01-04T11:11:11Z",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"2022-01-04T11:11:11Z",
|
||||||
|
"2022-01-05T11:11:11Z",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"2022-01-05T11:11:11Z",
|
||||||
|
"2022-01-05T12:12:12Z",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
wantErr: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "hourly chunking",
|
||||||
|
args: args{
|
||||||
|
start: "2022-01-03T11:11:11Z",
|
||||||
|
end: "2022-01-03T14:14:14Z",
|
||||||
|
granularity: StepHour,
|
||||||
|
},
|
||||||
|
want: []testTimeRange{
|
||||||
|
{
|
||||||
|
"2022-01-03T11:11:11Z",
|
||||||
|
"2022-01-03T12:11:11Z",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"2022-01-03T12:11:11Z",
|
||||||
|
"2022-01-03T13:11:11Z",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"2022-01-03T13:11:11Z",
|
||||||
|
"2022-01-03T14:11:11Z",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"2022-01-03T14:11:11Z",
|
||||||
|
"2022-01-03T14:14:14Z",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
wantErr: false,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
start := mustParseDatetime(tt.args.start)
|
||||||
|
end := mustParseDatetime(tt.args.end)
|
||||||
|
|
||||||
|
got, err := SplitDateRange(start, end, tt.args.granularity)
|
||||||
|
if (err != nil) != tt.wantErr {
|
||||||
|
t.Errorf("splitDateRange() error = %v, wantErr %v", err, tt.wantErr)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var testExpectedResults [][]time.Time
|
||||||
|
if tt.want != nil {
|
||||||
|
testExpectedResults = make([][]time.Time, 0)
|
||||||
|
for _, dr := range tt.want {
|
||||||
|
testExpectedResults = append(testExpectedResults, []time.Time{
|
||||||
|
mustParseDatetime(dr[0]),
|
||||||
|
mustParseDatetime(dr[1]),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !reflect.DeepEqual(got, testExpectedResults) {
|
||||||
|
t.Errorf("splitDateRange() got = %v, want %v", got, testExpectedResults)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
|
@ -6,9 +6,12 @@ import (
|
||||||
"io"
|
"io"
|
||||||
"log"
|
"log"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/dmitryk-dk/pb/v3"
|
||||||
|
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/barpool"
|
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/limiter"
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/limiter"
|
||||||
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/stepper"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/vm"
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/vm"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -31,6 +34,7 @@ type filter struct {
|
||||||
match string
|
match string
|
||||||
timeStart string
|
timeStart string
|
||||||
timeEnd string
|
timeEnd string
|
||||||
|
chunk string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f filter) String() string {
|
func (f filter) String() string {
|
||||||
|
@ -52,10 +56,54 @@ const (
|
||||||
)
|
)
|
||||||
|
|
||||||
func (p *vmNativeProcessor) run(ctx context.Context) error {
|
func (p *vmNativeProcessor) run(ctx context.Context) error {
|
||||||
|
if p.filter.chunk == "" {
|
||||||
|
return p.runSingle(ctx, p.filter)
|
||||||
|
}
|
||||||
|
|
||||||
|
startOfRange, err := time.Parse(time.RFC3339, p.filter.timeStart)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to parse %s, provided: %s, expected format: %s, error: %v", vmNativeFilterTimeStart, p.filter.timeStart, time.RFC3339, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var endOfRange time.Time
|
||||||
|
if p.filter.timeEnd != "" {
|
||||||
|
endOfRange, err = time.Parse(time.RFC3339, p.filter.timeEnd)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to parse %s, provided: %s, expected format: %s, error: %v", vmNativeFilterTimeEnd, p.filter.timeEnd, time.RFC3339, err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
endOfRange = time.Now()
|
||||||
|
}
|
||||||
|
|
||||||
|
ranges, err := stepper.SplitDateRange(startOfRange, endOfRange, p.filter.chunk)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to create date ranges for the given time filters: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for rangeIdx, r := range ranges {
|
||||||
|
formattedStartTime := r[0].Format(time.RFC3339)
|
||||||
|
formattedEndTime := r[1].Format(time.RFC3339)
|
||||||
|
log.Printf("Processing range %d/%d: %s - %s \n", rangeIdx+1, len(ranges), formattedStartTime, formattedEndTime)
|
||||||
|
f := filter{
|
||||||
|
match: p.filter.match,
|
||||||
|
timeStart: formattedStartTime,
|
||||||
|
timeEnd: formattedEndTime,
|
||||||
|
}
|
||||||
|
err := p.runSingle(ctx, f)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("processing failed for range %d/%d: %s - %s \n", rangeIdx+1, len(ranges), formattedStartTime, formattedEndTime)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *vmNativeProcessor) runSingle(ctx context.Context, f filter) error {
|
||||||
pr, pw := io.Pipe()
|
pr, pw := io.Pipe()
|
||||||
|
|
||||||
fmt.Printf("Initing export pipe from %q with filters: %s\n", p.src.addr, p.filter)
|
log.Printf("Initing export pipe from %q with filters: %s\n", p.src.addr, f)
|
||||||
exportReader, err := p.exportPipe(ctx)
|
exportReader, err := p.exportPipe(ctx, f)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to init export pipe: %s", err)
|
return fmt.Errorf("failed to init export pipe: %s", err)
|
||||||
}
|
}
|
||||||
|
@ -83,13 +131,20 @@ func (p *vmNativeProcessor) run(ctx context.Context) error {
|
||||||
}()
|
}()
|
||||||
|
|
||||||
fmt.Printf("Initing import process to %q:\n", p.dst.addr)
|
fmt.Printf("Initing import process to %q:\n", p.dst.addr)
|
||||||
bar := barpool.AddWithTemplate(nativeBarTpl, 0)
|
pool := pb.NewPool()
|
||||||
|
bar := pb.ProgressBarTemplate(nativeBarTpl).New(0)
|
||||||
|
pool.Add(bar)
|
||||||
barReader := bar.NewProxyReader(exportReader)
|
barReader := bar.NewProxyReader(exportReader)
|
||||||
if err := barpool.Start(); err != nil {
|
if err := pool.Start(); err != nil {
|
||||||
log.Printf("error start process bars pool: %s", err)
|
log.Printf("error start process bars pool: %s", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer barpool.Stop()
|
defer func() {
|
||||||
|
bar.Finish()
|
||||||
|
if err := pool.Stop(); err != nil {
|
||||||
|
fmt.Printf("failed to stop barpool: %+v\n", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
w := io.Writer(pw)
|
w := io.Writer(pw)
|
||||||
if p.rateLimit > 0 {
|
if p.rateLimit > 0 {
|
||||||
|
@ -111,7 +166,7 @@ func (p *vmNativeProcessor) run(ctx context.Context) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *vmNativeProcessor) exportPipe(ctx context.Context) (io.ReadCloser, error) {
|
func (p *vmNativeProcessor) exportPipe(ctx context.Context, f filter) (io.ReadCloser, error) {
|
||||||
u := fmt.Sprintf("%s/%s", p.src.addr, nativeExportAddr)
|
u := fmt.Sprintf("%s/%s", p.src.addr, nativeExportAddr)
|
||||||
req, err := http.NewRequestWithContext(ctx, "GET", u, nil)
|
req, err := http.NewRequestWithContext(ctx, "GET", u, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -119,12 +174,12 @@ func (p *vmNativeProcessor) exportPipe(ctx context.Context) (io.ReadCloser, erro
|
||||||
}
|
}
|
||||||
|
|
||||||
params := req.URL.Query()
|
params := req.URL.Query()
|
||||||
params.Set("match[]", p.filter.match)
|
params.Set("match[]", f.match)
|
||||||
if p.filter.timeStart != "" {
|
if f.timeStart != "" {
|
||||||
params.Set("start", p.filter.timeStart)
|
params.Set("start", f.timeStart)
|
||||||
}
|
}
|
||||||
if p.filter.timeEnd != "" {
|
if f.timeEnd != "" {
|
||||||
params.Set("end", p.filter.timeEnd)
|
params.Set("end", f.timeEnd)
|
||||||
}
|
}
|
||||||
req.URL.RawQuery = params.Encode()
|
req.URL.RawQuery = params.Encode()
|
||||||
|
|
||||||
|
|
|
@ -4,6 +4,8 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/stepper"
|
||||||
)
|
)
|
||||||
|
|
||||||
// If you want to run this test:
|
// If you want to run this test:
|
||||||
|
@ -16,6 +18,7 @@ import (
|
||||||
const (
|
const (
|
||||||
matchFilter = `{job="avalanche"}`
|
matchFilter = `{job="avalanche"}`
|
||||||
timeStartFilter = "2020-01-01T20:07:00Z"
|
timeStartFilter = "2020-01-01T20:07:00Z"
|
||||||
|
timeEndFilter = "2020-08-01T20:07:00Z"
|
||||||
srcAddr = "http://127.0.0.1:8428"
|
srcAddr = "http://127.0.0.1:8428"
|
||||||
dstAddr = "http://127.0.0.1:8528"
|
dstAddr = "http://127.0.0.1:8528"
|
||||||
)
|
)
|
||||||
|
@ -74,6 +77,26 @@ func Test_vmNativeProcessor_run(t *testing.T) {
|
||||||
closer: func(cancelFunc context.CancelFunc) {},
|
closer: func(cancelFunc context.CancelFunc) {},
|
||||||
wantErr: false,
|
wantErr: false,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "simulate correct work with chunking",
|
||||||
|
fields: fields{
|
||||||
|
filter: filter{
|
||||||
|
match: matchFilter,
|
||||||
|
timeStart: timeStartFilter,
|
||||||
|
timeEnd: timeEndFilter,
|
||||||
|
chunk: stepper.StepMonth,
|
||||||
|
},
|
||||||
|
rateLimit: 0,
|
||||||
|
dst: &vmNativeClient{
|
||||||
|
addr: dstAddr,
|
||||||
|
},
|
||||||
|
src: &vmNativeClient{
|
||||||
|
addr: srcAddr,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
closer: func(cancelFunc context.CancelFunc) {},
|
||||||
|
wantErr: false,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
|
|
@ -450,7 +450,7 @@ var deleteDuration = metrics.NewSummary(`vm_request_duration_seconds{path="/api/
|
||||||
func LabelValuesHandler(qt *querytracer.Tracer, startTime time.Time, labelName string, w http.ResponseWriter, r *http.Request) error {
|
func LabelValuesHandler(qt *querytracer.Tracer, startTime time.Time, labelName string, w http.ResponseWriter, r *http.Request) error {
|
||||||
defer labelValuesDuration.UpdateDuration(startTime)
|
defer labelValuesDuration.UpdateDuration(startTime)
|
||||||
|
|
||||||
cp, err := getCommonParams(r, startTime, false)
|
cp, err := getCommonParamsWithDefaultDuration(r, startTime, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -547,7 +547,7 @@ var tsdbStatusDuration = metrics.NewSummary(`vm_request_duration_seconds{path="/
|
||||||
func LabelsHandler(qt *querytracer.Tracer, startTime time.Time, w http.ResponseWriter, r *http.Request) error {
|
func LabelsHandler(qt *querytracer.Tracer, startTime time.Time, w http.ResponseWriter, r *http.Request) error {
|
||||||
defer labelsDuration.UpdateDuration(startTime)
|
defer labelsDuration.UpdateDuration(startTime)
|
||||||
|
|
||||||
cp, err := getCommonParams(r, startTime, false)
|
cp, err := getCommonParamsWithDefaultDuration(r, startTime, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -600,17 +600,14 @@ var seriesCountDuration = metrics.NewSummary(`vm_request_duration_seconds{path="
|
||||||
func SeriesHandler(qt *querytracer.Tracer, startTime time.Time, w http.ResponseWriter, r *http.Request) error {
|
func SeriesHandler(qt *querytracer.Tracer, startTime time.Time, w http.ResponseWriter, r *http.Request) error {
|
||||||
defer seriesDuration.UpdateDuration(startTime)
|
defer seriesDuration.UpdateDuration(startTime)
|
||||||
|
|
||||||
cp, err := getCommonParams(r, startTime, true)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// Do not set start to searchutils.minTimeMsecs by default as Prometheus does,
|
// Do not set start to searchutils.minTimeMsecs by default as Prometheus does,
|
||||||
// since this leads to fetching and scanning all the data from the storage,
|
// since this leads to fetching and scanning all the data from the storage,
|
||||||
// which can take a lot of time for big storages.
|
// which can take a lot of time for big storages.
|
||||||
// It is better setting start as end-defaultStep by default.
|
// It is better setting start as end-defaultStep by default.
|
||||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/91
|
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/91
|
||||||
if cp.start == 0 {
|
cp, err := getCommonParamsWithDefaultDuration(r, startTime, true)
|
||||||
cp.start = cp.end - defaultStep
|
if err != nil {
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
limit, err := searchutils.GetInt(r, "limit")
|
limit, err := searchutils.GetInt(r, "limit")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -828,7 +825,7 @@ func queryRangeHandler(qt *querytracer.Tracer, startTime time.Time, w http.Respo
|
||||||
end = start + defaultStep
|
end = start + defaultStep
|
||||||
}
|
}
|
||||||
if err := promql.ValidateMaxPointsPerSeries(start, end, step, *maxPointsPerTimeseries); err != nil {
|
if err := promql.ValidateMaxPointsPerSeries(start, end, step, *maxPointsPerTimeseries); err != nil {
|
||||||
return err
|
return fmt.Errorf("%w; (see -search.maxPointsPerTimeseries command-line flag)", err)
|
||||||
}
|
}
|
||||||
if mayCache {
|
if mayCache {
|
||||||
start, end = promql.AdjustStartEnd(start, end, step)
|
start, end = promql.AdjustStartEnd(start, end, step)
|
||||||
|
@ -1062,6 +1059,17 @@ func getExportParams(r *http.Request, startTime time.Time) (*commonParams, error
|
||||||
return cp, nil
|
return cp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func getCommonParamsWithDefaultDuration(r *http.Request, startTime time.Time, requireNonEmptyMatch bool) (*commonParams, error) {
|
||||||
|
cp, err := getCommonParams(r, startTime, requireNonEmptyMatch)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if cp.start == 0 {
|
||||||
|
cp.start = cp.end - defaultStep
|
||||||
|
}
|
||||||
|
return cp, nil
|
||||||
|
}
|
||||||
|
|
||||||
// getCommonParams obtains common params from r, which are used in /api/v1/* handlers:
|
// getCommonParams obtains common params from r, which are used in /api/v1/* handlers:
|
||||||
//
|
//
|
||||||
// - timeout
|
// - timeout
|
||||||
|
|
|
@ -42,7 +42,7 @@ func ValidateMaxPointsPerSeries(start, end, step int64, maxPoints int) error {
|
||||||
}
|
}
|
||||||
points := (end-start)/step + 1
|
points := (end-start)/step + 1
|
||||||
if points > int64(maxPoints) {
|
if points > int64(maxPoints) {
|
||||||
return fmt.Errorf("too many points for the given start=%d, end=%d and step=%d: %d; the maximum number of points is %d (see -search.maxPoints* command-line flags)",
|
return fmt.Errorf("too many points for the given start=%d, end=%d and step=%d: %d; the maximum number of points is %d",
|
||||||
start, end, step, points, maxPoints)
|
start, end, step, points, maxPoints)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -850,7 +850,7 @@ func evalRollupFuncWithSubquery(qt *querytracer.Tracer, ec *EvalConfig, funcName
|
||||||
ecSQ.Step = step
|
ecSQ.Step = step
|
||||||
ecSQ.MaxPointsPerSeries = *maxPointsSubqueryPerTimeseries
|
ecSQ.MaxPointsPerSeries = *maxPointsSubqueryPerTimeseries
|
||||||
if err := ValidateMaxPointsPerSeries(ecSQ.Start, ecSQ.End, ecSQ.Step, ecSQ.MaxPointsPerSeries); err != nil {
|
if err := ValidateMaxPointsPerSeries(ecSQ.Start, ecSQ.End, ecSQ.Step, ecSQ.MaxPointsPerSeries); err != nil {
|
||||||
return nil, err
|
return nil, fmt.Errorf("%w; (see -search.maxPointsSubqueryPerTimeseries command-line flag)", err)
|
||||||
}
|
}
|
||||||
// unconditionally align start and end args to step for subquery as Prometheus does.
|
// unconditionally align start and end args to step for subquery as Prometheus does.
|
||||||
ecSQ.Start, ecSQ.End = alignStartEnd(ecSQ.Start, ecSQ.End, ecSQ.Step)
|
ecSQ.Start, ecSQ.End = alignStartEnd(ecSQ.Start, ecSQ.End, ecSQ.Step)
|
||||||
|
|
|
@ -1349,15 +1349,11 @@ func rollupRateOverSum(rfa *rollupFuncArg) float64 {
|
||||||
// Assume that the value didn't change since rfa.prevValue.
|
// Assume that the value didn't change since rfa.prevValue.
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
dt := rfa.window
|
|
||||||
if !math.IsNaN(rfa.prevValue) {
|
|
||||||
dt = timestamps[len(timestamps)-1] - rfa.prevTimestamp
|
|
||||||
}
|
|
||||||
sum := float64(0)
|
sum := float64(0)
|
||||||
for _, v := range rfa.values {
|
for _, v := range rfa.values {
|
||||||
sum += v
|
sum += v
|
||||||
}
|
}
|
||||||
return sum / (float64(dt) / 1e3)
|
return sum / (float64(rfa.window) / 1e3)
|
||||||
}
|
}
|
||||||
|
|
||||||
func rollupRange(rfa *rollupFuncArg) float64 {
|
func rollupRange(rfa *rollupFuncArg) float64 {
|
||||||
|
|
|
@ -1307,7 +1307,7 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
||||||
if samplesScanned == 0 {
|
if samplesScanned == 0 {
|
||||||
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
t.Fatalf("expecting non-zero samplesScanned from rollupConfig.Do")
|
||||||
}
|
}
|
||||||
valuesExpected := []float64{nan, 2775, 5262.5, 3678.5714285714284, 2880}
|
valuesExpected := []float64{nan, 2775, 5262.5, 3862.5, 1800}
|
||||||
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||||
testRowsEqual(t, values, rc.Timestamps, valuesExpected, timestampsExpected)
|
testRowsEqual(t, values, rc.Timestamps, valuesExpected, timestampsExpected)
|
||||||
})
|
})
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
FROM golang:1.19.0 as build-web-stage
|
FROM golang:1.19.1 as build-web-stage
|
||||||
COPY build /build
|
COPY build /build
|
||||||
|
|
||||||
WORKDIR /build
|
WORKDIR /build
|
||||||
|
|
7483
dashboards/victoriametrics-cluster.json
Normal file
7483
dashboards/victoriametrics-cluster.json
Normal file
File diff suppressed because it is too large
Load diff
|
@ -4,7 +4,7 @@ DOCKER_NAMESPACE := victoriametrics
|
||||||
|
|
||||||
ROOT_IMAGE ?= alpine:3.16.2
|
ROOT_IMAGE ?= alpine:3.16.2
|
||||||
CERTS_IMAGE := alpine:3.16.2
|
CERTS_IMAGE := alpine:3.16.2
|
||||||
GO_BUILDER_IMAGE := golang:1.19.0-alpine
|
GO_BUILDER_IMAGE := golang:1.19.1-alpine
|
||||||
BUILDER_IMAGE := local/builder:2.0.0-$(shell echo $(GO_BUILDER_IMAGE) | tr :/ __)-1
|
BUILDER_IMAGE := local/builder:2.0.0-$(shell echo $(GO_BUILDER_IMAGE) | tr :/ __)-1
|
||||||
BASE_IMAGE := local/base:1.1.3-$(shell echo $(ROOT_IMAGE) | tr :/ __)-$(shell echo $(CERTS_IMAGE) | tr :/ __)
|
BASE_IMAGE := local/base:1.1.3-$(shell echo $(ROOT_IMAGE) | tr :/ __)-$(shell echo $(CERTS_IMAGE) | tr :/ __)
|
||||||
|
|
||||||
|
|
|
@ -9,6 +9,7 @@ See also [case studies](https://docs.victoriametrics.com/CaseStudies.html).
|
||||||
## Third-party articles and slides about VictoriaMetrics
|
## Third-party articles and slides about VictoriaMetrics
|
||||||
|
|
||||||
* [The (Almost) Infinitely Scalable Open Source Monitoring Dream](https://www.forbes.com/sites/adrianbridgwater/2022/08/16/the-almost-infinitely-scalable-open-source-monitoring-dream/)
|
* [The (Almost) Infinitely Scalable Open Source Monitoring Dream](https://www.forbes.com/sites/adrianbridgwater/2022/08/16/the-almost-infinitely-scalable-open-source-monitoring-dream/)
|
||||||
|
* [Monitoring at scale with Victoria Metrics](https://tech.bedrockstreaming.com/2022/09/06/monitoring-at-scale-with-victoriametrics.html)
|
||||||
* [Optimizing Linkerd metrics in Prometheus](https://aatarasoff.medium.com/optimizing-linkerd-metrics-in-prometheus-de607ec10f6b)
|
* [Optimizing Linkerd metrics in Prometheus](https://aatarasoff.medium.com/optimizing-linkerd-metrics-in-prometheus-de607ec10f6b)
|
||||||
* [Optimizing the Storage of Large Volumes of Metrics for a Long Time in VictoriaMetrics](https://percona.community/blog/2022/06/02/long-time-keeping-metrics-victoriametrics/)
|
* [Optimizing the Storage of Large Volumes of Metrics for a Long Time in VictoriaMetrics](https://percona.community/blog/2022/06/02/long-time-keeping-metrics-victoriametrics/)
|
||||||
* [How do We Keep Metrics for a Long Time in VictoriaMetrics](https://www.youtube.com/watch?v=SGZjY7xgDwE)
|
* [How do We Keep Metrics for a Long Time in VictoriaMetrics](https://www.youtube.com/watch?v=SGZjY7xgDwE)
|
||||||
|
|
|
@ -15,11 +15,28 @@ The following tip changes can be tested by building VictoriaMetrics components f
|
||||||
|
|
||||||
## tip
|
## tip
|
||||||
|
|
||||||
|
**Update note:** this release changes data format for [/api/v1/export/native](https://docs.victoriametrics.com/#how-to-export-data-in-native-format) in incompatible way, so it cannot be imported into older version of VictoriaMetrics via [/api/v1/import/native](https://docs.victoriametrics.com/#how-to-import-data-in-native-format).
|
||||||
|
|
||||||
|
|
||||||
|
* FEATURE: check the correctess of raw sample timestamps stored on disk when reading them. This reduces the probability of possible silent corruption of the data stored on disk. This should help [this](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2998) and [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3011).
|
||||||
|
* FEATURE: set the `start` arg to `end - 5 minutes` if isn't passed explicitly to [/api/v1/labels](https://docs.victoriametrics.com/url-examples.html#apiv1labels) and [/api/v1/label/.../values](https://docs.victoriametrics.com/url-examples.html#apiv1labelvalues). See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/3052).
|
||||||
|
* FEATURE: [vmctl](https://docs.victoriametrics.com/vmctl.html): add `vm-native-step-interval` command line flag for `vm-native` mode. New option allows splitting the import process into chunks by time interval. This helps migrating data sets with high churn rate and provides better control over the process. See [feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2733).
|
||||||
|
|
||||||
|
* BUGFIX: [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html): properly calculate `rate_over_sum(m[d])` as `sum_over_time(m[d])/d`. Previously the `sum_over_time(m[d])` could be improperly divided by smaller than `d` time range. See [rate_over_sum() docs](https://docs.victoriametrics.com/MetricsQL.html#rate_over_sum) and [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3045).
|
||||||
|
* BUGFIX: [VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html): properly calculate query results at `vmselect`. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3067). The issue has been introduced in [v1.81.0](https://docs.victoriametrics.com/CHANGELOG.html#v1810).
|
||||||
|
|
||||||
|
## [v1.81.2](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.81.2)
|
||||||
|
|
||||||
|
Released at 08-09-2022
|
||||||
|
|
||||||
|
* BUGFIX: [VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html): properly calculate query results at `vmselect`. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3067). The issue has been introduced in [v1.81.0](https://docs.victoriametrics.com/CHANGELOG.html#v1810).
|
||||||
|
|
||||||
## [v1.81.1](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.81.1)
|
## [v1.81.1](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.81.1)
|
||||||
|
|
||||||
Released at 02-09-2022
|
Released at 02-09-2022
|
||||||
|
|
||||||
|
**It isn't recommended to use VictoriaMetrics cluster v1.81.1 because of [the bug](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3067), which may result in incorrect query results. Upgrade to [v1.81.2](https://docs.victoriametrics.com/CHANGELOG.html#v1812) instead.**
|
||||||
|
|
||||||
* FEATURE: [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html): evaluate `q1`, ..., `qN` in parallel when calculating `union(q1, .., qN)`. Previously [union](https://docs.victoriametrics.com/MetricsQL.html#union) args were evaluated sequentially. This could result in lower than expected performance.
|
* FEATURE: [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html): evaluate `q1`, ..., `qN` in parallel when calculating `union(q1, .., qN)`. Previously [union](https://docs.victoriametrics.com/MetricsQL.html#union) args were evaluated sequentially. This could result in lower than expected performance.
|
||||||
|
|
||||||
* BUGFIX: [VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html): fix potential panic at `vmselect` under high load, which has been introduced in [v1.81.0](https://docs.victoriametrics.com/CHANGELOG.html#v1810). See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3058).
|
* BUGFIX: [VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html): fix potential panic at `vmselect` under high load, which has been introduced in [v1.81.0](https://docs.victoriametrics.com/CHANGELOG.html#v1810). See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3058).
|
||||||
|
@ -27,7 +44,7 @@ Released at 02-09-2022
|
||||||
|
|
||||||
## [v1.81.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.81.0)
|
## [v1.81.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.81.0)
|
||||||
|
|
||||||
**It isn't recommended to use VictoriaMetrics cluster v1.81.0 because of [the bug](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3058), which may result in `vmselect` crashes under high load. Upgrade to [v1.81.1](https://docs.victoriametrics.com/CHANGELOG.html#v1811) instead.**
|
**It isn't recommended to use VictoriaMetrics cluster v1.81.0 because of [the bug](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3058), which may result in `vmselect` crashes under high load. Upgrade to [v1.81.2](https://docs.victoriametrics.com/CHANGELOG.html#v1812) instead.**
|
||||||
|
|
||||||
Released at 31-08-2022
|
Released at 31-08-2022
|
||||||
|
|
||||||
|
|
|
@ -46,7 +46,7 @@ scrape_configs:
|
||||||
## Visualization
|
## Visualization
|
||||||
|
|
||||||
Visualisation of statistics can be done in Grafana using the following
|
Visualisation of statistics can be done in Grafana using the following
|
||||||
[dashboard](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/cluster/dashboards/clusterbytenant.json).
|
[dashboard](https://grafana.com/grafana/dashboards/16399-victoriametrics-cluster-per-tenant-statistic/).
|
||||||
|
|
||||||
## Integration with vmgateway
|
## Integration with vmgateway
|
||||||
|
|
||||||
|
|
|
@ -290,6 +290,9 @@ VictoriaMetrics provides an ability to explore time series cardinality at `cardi
|
||||||
- To identify values with the highest number of series for the selected label (aka `focusLabel`).
|
- To identify values with the highest number of series for the selected label (aka `focusLabel`).
|
||||||
- To identify label=name pairs with the highest number of series.
|
- To identify label=name pairs with the highest number of series.
|
||||||
- To identify labels with the highest number of unique values.
|
- To identify labels with the highest number of unique values.
|
||||||
|
Note that [cluster version of VictoriaMetrics](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html)
|
||||||
|
may show lower than expected number of unique label values for labels with small number of unique values.
|
||||||
|
This is because of [implementation limits](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/5a6e617b5e41c9170e7c562aecd15ee0c901d489/app/vmselect/netstorage/netstorage.go#L1039-L1045).
|
||||||
|
|
||||||
By default cardinality explorer analyzes time series for the current date. It provides the ability to select different day at the top right corner.
|
By default cardinality explorer analyzes time series for the current date. It provides the ability to select different day at the top right corner.
|
||||||
By default all the time series for the selected date are analyzed. It is possible to narrow down the analysis to series
|
By default all the time series for the selected date are analyzed. It is possible to narrow down the analysis to series
|
||||||
|
@ -326,11 +329,19 @@ See also [vmagent](https://docs.victoriametrics.com/vmagent.html), which can be
|
||||||
|
|
||||||
VictoriaMetrics accepts data from [DataDog agent](https://docs.datadoghq.com/agent/) or [DogStatsD](https://docs.datadoghq.com/developers/dogstatsd/) via ["submit metrics" API](https://docs.datadoghq.com/api/latest/metrics/#submit-metrics) at `/datadog/api/v1/series` path.
|
VictoriaMetrics accepts data from [DataDog agent](https://docs.datadoghq.com/agent/) or [DogStatsD](https://docs.datadoghq.com/developers/dogstatsd/) via ["submit metrics" API](https://docs.datadoghq.com/api/latest/metrics/#submit-metrics) at `/datadog/api/v1/series` path.
|
||||||
|
|
||||||
Run DataDog agent with `DD_DD_URL=http://victoriametrics-host:8428/datadog` environment variable in order to write data to VictoriaMetrics at `victoriametrics-host` host. Another option is to set `dd_url` param at [DataDog agent configuration file](https://docs.datadoghq.com/agent/guide/agent-configuration-files/) to `http://victoriametrics-host:8428/datadog`.
|
Single-node VictoriaMetrics:
|
||||||
|
|
||||||
|
Run DataDog agent with environment variable `DD_DD_URL=http://victoriametrics-host:8428/datadog`. Alternatively, set `dd_url` param at [DataDog agent configuration file](https://docs.datadoghq.com/agent/guide/agent-configuration-files/) to `http://victoriametrics-host:8428/datadog`.
|
||||||
|
|
||||||
|
Cluster version of VictoriaMetrics:
|
||||||
|
|
||||||
|
Run DataDog agent with environment variable `DD_DD_URL=http://vinsert-host:8480/insert/0/datadog`. Alternatively, set `dd_url` param at [DataDog agent configuration file](https://docs.datadoghq.com/agent/guide/agent-configuration-files/) to `DD_DD_URL=http://vinsert-host:8480/insert/0/datadog`.
|
||||||
|
|
||||||
VictoriaMetrics doesn't check `DD_API_KEY` param, so it can be set to arbitrary value.
|
VictoriaMetrics doesn't check `DD_API_KEY` param, so it can be set to arbitrary value.
|
||||||
|
|
||||||
Example on how to send data to VictoriaMetrics via DataDog "submit metrics" API from command line:
|
Example of how to send data to VictoriaMetrics via [DataDog "submit metrics"](https://docs.victoriametrics.com/url-examples.html#datadogapiv1series) from command line:
|
||||||
|
|
||||||
|
Single-node VictoriaMetrics:
|
||||||
|
|
||||||
```console
|
```console
|
||||||
echo '
|
echo '
|
||||||
|
@ -351,15 +362,56 @@ echo '
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
' | curl -X POST --data-binary @- http://localhost:8428/datadog/api/v1/series
|
' | curl -X POST --data-binary @- http://victoriametrics-host:8428/datadog/api/v1/series
|
||||||
```
|
```
|
||||||
|
|
||||||
The imported data can be read via [export API](https://docs.victoriametrics.com/#how-to-export-data-in-json-line-format):
|
Cluster version of VictoriaMetrics:
|
||||||
|
|
||||||
<div class="with-copy" markdown="1">
|
<div class="with-copy" markdown="1">
|
||||||
|
|
||||||
```console
|
```console
|
||||||
curl http://localhost:8428/api/v1/export -d 'match[]=system.load.1'
|
echo '
|
||||||
|
{
|
||||||
|
"series": [
|
||||||
|
{
|
||||||
|
"host": "test.example.com",
|
||||||
|
"interval": 20,
|
||||||
|
"metric": "system.load.1",
|
||||||
|
"points": [[
|
||||||
|
0,
|
||||||
|
0.5
|
||||||
|
]],
|
||||||
|
"tags": [
|
||||||
|
"environment:test"
|
||||||
|
],
|
||||||
|
"type": "rate"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
' | curl -X POST --data-binary @- http://vminsert-host:8480/insert/0/datadog/api/v1/series
|
||||||
|
```
|
||||||
|
|
||||||
|
</div>
|
||||||
|
|
||||||
|
|
||||||
|
The imported data can be read via [export API](https://docs.victoriametrics.com/url-examples.html#apiv1export):
|
||||||
|
|
||||||
|
Single-node VictoriaMetrics:
|
||||||
|
|
||||||
|
<div class="with-copy" markdown="1">
|
||||||
|
|
||||||
|
```console
|
||||||
|
curl http://victoriametrics-host:8428/api/v1/export -d 'match[]=system.load.1'
|
||||||
|
```
|
||||||
|
|
||||||
|
</div>
|
||||||
|
|
||||||
|
Cluster version of VictoriaMetrics:
|
||||||
|
|
||||||
|
<div class="with-copy" markdown="1">
|
||||||
|
|
||||||
|
```console
|
||||||
|
curl http://vmselect-host:8481/select/0/prometheus/api/v1/export -d 'match[]=system.load.1'
|
||||||
```
|
```
|
||||||
|
|
||||||
</div>
|
</div>
|
||||||
|
@ -638,7 +690,7 @@ VictoriaMetrics accepts `round_digits` query arg for `/api/v1/query` and `/api/v
|
||||||
|
|
||||||
VictoriaMetrics accepts `limit` query arg for `/api/v1/labels` and `/api/v1/label/<labelName>/values` handlers for limiting the number of returned entries. For example, the query to `/api/v1/labels?limit=5` returns a sample of up to 5 unique labels, while ignoring the rest of labels. If the provided `limit` value exceeds the corresponding `-search.maxTagKeys` / `-search.maxTagValues` command-line flag values, then limits specified in the command-line flags are used.
|
VictoriaMetrics accepts `limit` query arg for `/api/v1/labels` and `/api/v1/label/<labelName>/values` handlers for limiting the number of returned entries. For example, the query to `/api/v1/labels?limit=5` returns a sample of up to 5 unique labels, while ignoring the rest of labels. If the provided `limit` value exceeds the corresponding `-search.maxTagKeys` / `-search.maxTagValues` command-line flag values, then limits specified in the command-line flags are used.
|
||||||
|
|
||||||
By default, VictoriaMetrics returns time series for the last 5 minutes from `/api/v1/series`, while the Prometheus API defaults to all time. Use `start` and `end` to select a different time range.
|
By default, VictoriaMetrics returns time series for the last 5 minutes from `/api/v1/series`, `/api/v1/labels` and `/api/v1/label/<labelName>/values` while the Prometheus API defaults to all time. Explicitly set `start` and `end` to select the desired time range.
|
||||||
VictoriaMetrics accepts `limit` query arg for `/api/v1/series` handlers for limiting the number of returned entries. For example, the query to `/api/v1/series?limit=5` returns a sample of up to 5 series, while ignoring the rest. If the provided `limit` value exceeds the corresponding `-search.maxSeries` command-line flag values, then limits specified in the command-line flags are used.
|
VictoriaMetrics accepts `limit` query arg for `/api/v1/series` handlers for limiting the number of returned entries. For example, the query to `/api/v1/series?limit=5` returns a sample of up to 5 series, while ignoring the rest. If the provided `limit` value exceeds the corresponding `-search.maxSeries` command-line flag values, then limits specified in the command-line flags are used.
|
||||||
|
|
||||||
Additionally, VictoriaMetrics provides the following handlers:
|
Additionally, VictoriaMetrics provides the following handlers:
|
||||||
|
@ -1573,7 +1625,8 @@ The exceeded limits can be [monitored](#monitoring) with the following metrics:
|
||||||
|
|
||||||
These limits are approximate, so VictoriaMetrics can underflow/overflow the limit by a small percentage (usually less than 1%).
|
These limits are approximate, so VictoriaMetrics can underflow/overflow the limit by a small percentage (usually less than 1%).
|
||||||
|
|
||||||
See also more advanced [cardinality limiter in vmagent](https://docs.victoriametrics.com/vmagent.html#cardinality-limiter).
|
See also more advanced [cardinality limiter in vmagent](https://docs.victoriametrics.com/vmagent.html#cardinality-limiter)
|
||||||
|
and [cardinality explorer docs](#cardinality-explorer).
|
||||||
|
|
||||||
## Troubleshooting
|
## Troubleshooting
|
||||||
|
|
||||||
|
|
|
@ -35,24 +35,27 @@ git remote add enterprise <url>
|
||||||
* linux/ppc64le
|
* linux/ppc64le
|
||||||
* linux/386
|
* linux/386
|
||||||
This step can be run manually with the command `make publish` from the needed git tag.
|
This step can be run manually with the command `make publish` from the needed git tag.
|
||||||
c) Create draft GitHub release with the name `TAG`. This step can be run manually
|
4. Push the tags created `v1.xx.y` and `v1.xx.y-cluster` at step 2 to public GitHub repository at https://github.com/VictoriaMetrics/VictoriaMetrics .
|
||||||
|
**Important note:** do not push enteprise tags to public GitHub repository - they must be pushed only to private repository.
|
||||||
|
5. Run `TAG=v1.xx.yy make github-create-release github-upload-assets`. This command performs the following tasks:
|
||||||
|
a) Create draft GitHub release with the name `TAG`. This step can be run manually
|
||||||
with the command `TAG=v1.xx.y make github-create-release`.
|
with the command `TAG=v1.xx.y make github-create-release`.
|
||||||
The release id is stored at `/tmp/vm-github-release` file.
|
The release id is stored at `/tmp/vm-github-release` file.
|
||||||
d) Upload all the binaries and checksums created at step `a` to that release.
|
b) Upload all the binaries and checksums created at step `3a` to that release.
|
||||||
This step can be run manually with the command `make github-upload-assets`.
|
This step can be run manually with the command `make github-upload-assets`.
|
||||||
It is expected that the needed release id is stored at `/tmp/vm-github-release` file,
|
It is expected that the needed release id is stored at `/tmp/vm-github-release` file,
|
||||||
which must be created at the step `c`.
|
which must be created at the step `a`.
|
||||||
If the upload process is interrupted by any reason, then the following recovery steps must be performed:
|
If the upload process is interrupted by any reason, then the following recovery steps must be performed:
|
||||||
- To delete the created draft release by running the command `make github-delete-release`.
|
- To delete the created draft release by running the command `make github-delete-release`.
|
||||||
This command expects that the id of the release to delete is located at `/tmp/vm-github-release`
|
This command expects that the id of the release to delete is located at `/tmp/vm-github-release`
|
||||||
file created at the step `c`.
|
file created at the step `a`.
|
||||||
- To run the command `TAG=v1.xx.y make github-create-release github-upload-assets`, so new release is created
|
- To run the command `TAG=v1.xx.y make github-create-release github-upload-assets`, so new release is created
|
||||||
and all the needed assets are re-uploaded to it.
|
and all the needed assets are re-uploaded to it.
|
||||||
5. Go to <https://github.com/VictoriaMetrics/VictoriaMetrics/releases> and verify that draft release with the name `TAG` has been created
|
6. Go to <https://github.com/VictoriaMetrics/VictoriaMetrics/releases> and verify that draft release with the name `TAG` has been created
|
||||||
and this release contains all the needed binaries and checksums.
|
and this release contains all the needed binaries and checksums.
|
||||||
6. Update the release description with the [CHANGELOG](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/docs/CHANGELOG.md) for this release.
|
7. Update the release description with the [CHANGELOG](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/docs/CHANGELOG.md) for this release.
|
||||||
7. Remove the `draft` checkbox for the `TAG` release and manually publish it.
|
8. Remove the `draft` checkbox for the `TAG` release and manually publish it.
|
||||||
8. Bump version of the VictoriaMetrics cluster in the [sandbox environment](https://github.com/VictoriaMetrics/ops/blob/main/sandbox/manifests/benchmark-vm/vmcluster.yaml)
|
9. Bump version of the VictoriaMetrics cluster in the [sandbox environment](https://github.com/VictoriaMetrics/ops/blob/main/sandbox/manifests/benchmark-vm/vmcluster.yaml)
|
||||||
by [opening and merging PR](https://github.com/VictoriaMetrics/ops/pull/58).
|
by [opening and merging PR](https://github.com/VictoriaMetrics/ops/pull/58).
|
||||||
|
|
||||||
## Building snap package
|
## Building snap package
|
||||||
|
@ -109,7 +112,3 @@ Repository [https://github.com/VictoriaMetrics/ansible-playbooks](https://github
|
||||||
5. Commit changes
|
5. Commit changes
|
||||||
6. Create a new tag
|
6. Create a new tag
|
||||||
7. Create a new release. This automatically publishes the new versions to galaxy.ansible.com
|
7. Create a new release. This automatically publishes the new versions to galaxy.ansible.com
|
||||||
|
|
||||||
## Github pages
|
|
||||||
|
|
||||||
All changes in `README.md`, `docs` folder and `.md` extension automatically push to Wiki
|
|
||||||
|
|
|
@ -294,6 +294,9 @@ VictoriaMetrics provides an ability to explore time series cardinality at `cardi
|
||||||
- To identify values with the highest number of series for the selected label (aka `focusLabel`).
|
- To identify values with the highest number of series for the selected label (aka `focusLabel`).
|
||||||
- To identify label=name pairs with the highest number of series.
|
- To identify label=name pairs with the highest number of series.
|
||||||
- To identify labels with the highest number of unique values.
|
- To identify labels with the highest number of unique values.
|
||||||
|
Note that [cluster version of VictoriaMetrics](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html)
|
||||||
|
may show lower than expected number of unique label values for labels with small number of unique values.
|
||||||
|
This is because of [implementation limits](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/5a6e617b5e41c9170e7c562aecd15ee0c901d489/app/vmselect/netstorage/netstorage.go#L1039-L1045).
|
||||||
|
|
||||||
By default cardinality explorer analyzes time series for the current date. It provides the ability to select different day at the top right corner.
|
By default cardinality explorer analyzes time series for the current date. It provides the ability to select different day at the top right corner.
|
||||||
By default all the time series for the selected date are analyzed. It is possible to narrow down the analysis to series
|
By default all the time series for the selected date are analyzed. It is possible to narrow down the analysis to series
|
||||||
|
@ -330,11 +333,19 @@ See also [vmagent](https://docs.victoriametrics.com/vmagent.html), which can be
|
||||||
|
|
||||||
VictoriaMetrics accepts data from [DataDog agent](https://docs.datadoghq.com/agent/) or [DogStatsD](https://docs.datadoghq.com/developers/dogstatsd/) via ["submit metrics" API](https://docs.datadoghq.com/api/latest/metrics/#submit-metrics) at `/datadog/api/v1/series` path.
|
VictoriaMetrics accepts data from [DataDog agent](https://docs.datadoghq.com/agent/) or [DogStatsD](https://docs.datadoghq.com/developers/dogstatsd/) via ["submit metrics" API](https://docs.datadoghq.com/api/latest/metrics/#submit-metrics) at `/datadog/api/v1/series` path.
|
||||||
|
|
||||||
Run DataDog agent with `DD_DD_URL=http://victoriametrics-host:8428/datadog` environment variable in order to write data to VictoriaMetrics at `victoriametrics-host` host. Another option is to set `dd_url` param at [DataDog agent configuration file](https://docs.datadoghq.com/agent/guide/agent-configuration-files/) to `http://victoriametrics-host:8428/datadog`.
|
Single-node VictoriaMetrics:
|
||||||
|
|
||||||
|
Run DataDog agent with environment variable `DD_DD_URL=http://victoriametrics-host:8428/datadog`. Alternatively, set `dd_url` param at [DataDog agent configuration file](https://docs.datadoghq.com/agent/guide/agent-configuration-files/) to `http://victoriametrics-host:8428/datadog`.
|
||||||
|
|
||||||
|
Cluster version of VictoriaMetrics:
|
||||||
|
|
||||||
|
Run DataDog agent with environment variable `DD_DD_URL=http://vinsert-host:8480/insert/0/datadog`. Alternatively, set `dd_url` param at [DataDog agent configuration file](https://docs.datadoghq.com/agent/guide/agent-configuration-files/) to `DD_DD_URL=http://vinsert-host:8480/insert/0/datadog`.
|
||||||
|
|
||||||
VictoriaMetrics doesn't check `DD_API_KEY` param, so it can be set to arbitrary value.
|
VictoriaMetrics doesn't check `DD_API_KEY` param, so it can be set to arbitrary value.
|
||||||
|
|
||||||
Example on how to send data to VictoriaMetrics via DataDog "submit metrics" API from command line:
|
Example of how to send data to VictoriaMetrics via [DataDog "submit metrics"](https://docs.victoriametrics.com/url-examples.html#datadogapiv1series) from command line:
|
||||||
|
|
||||||
|
Single-node VictoriaMetrics:
|
||||||
|
|
||||||
```console
|
```console
|
||||||
echo '
|
echo '
|
||||||
|
@ -355,15 +366,56 @@ echo '
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
' | curl -X POST --data-binary @- http://localhost:8428/datadog/api/v1/series
|
' | curl -X POST --data-binary @- http://victoriametrics-host:8428/datadog/api/v1/series
|
||||||
```
|
```
|
||||||
|
|
||||||
The imported data can be read via [export API](https://docs.victoriametrics.com/#how-to-export-data-in-json-line-format):
|
Cluster version of VictoriaMetrics:
|
||||||
|
|
||||||
<div class="with-copy" markdown="1">
|
<div class="with-copy" markdown="1">
|
||||||
|
|
||||||
```console
|
```console
|
||||||
curl http://localhost:8428/api/v1/export -d 'match[]=system.load.1'
|
echo '
|
||||||
|
{
|
||||||
|
"series": [
|
||||||
|
{
|
||||||
|
"host": "test.example.com",
|
||||||
|
"interval": 20,
|
||||||
|
"metric": "system.load.1",
|
||||||
|
"points": [[
|
||||||
|
0,
|
||||||
|
0.5
|
||||||
|
]],
|
||||||
|
"tags": [
|
||||||
|
"environment:test"
|
||||||
|
],
|
||||||
|
"type": "rate"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
' | curl -X POST --data-binary @- http://vminsert-host:8480/insert/0/datadog/api/v1/series
|
||||||
|
```
|
||||||
|
|
||||||
|
</div>
|
||||||
|
|
||||||
|
|
||||||
|
The imported data can be read via [export API](https://docs.victoriametrics.com/url-examples.html#apiv1export):
|
||||||
|
|
||||||
|
Single-node VictoriaMetrics:
|
||||||
|
|
||||||
|
<div class="with-copy" markdown="1">
|
||||||
|
|
||||||
|
```console
|
||||||
|
curl http://victoriametrics-host:8428/api/v1/export -d 'match[]=system.load.1'
|
||||||
|
```
|
||||||
|
|
||||||
|
</div>
|
||||||
|
|
||||||
|
Cluster version of VictoriaMetrics:
|
||||||
|
|
||||||
|
<div class="with-copy" markdown="1">
|
||||||
|
|
||||||
|
```console
|
||||||
|
curl http://vmselect-host:8481/select/0/prometheus/api/v1/export -d 'match[]=system.load.1'
|
||||||
```
|
```
|
||||||
|
|
||||||
</div>
|
</div>
|
||||||
|
@ -642,7 +694,7 @@ VictoriaMetrics accepts `round_digits` query arg for `/api/v1/query` and `/api/v
|
||||||
|
|
||||||
VictoriaMetrics accepts `limit` query arg for `/api/v1/labels` and `/api/v1/label/<labelName>/values` handlers for limiting the number of returned entries. For example, the query to `/api/v1/labels?limit=5` returns a sample of up to 5 unique labels, while ignoring the rest of labels. If the provided `limit` value exceeds the corresponding `-search.maxTagKeys` / `-search.maxTagValues` command-line flag values, then limits specified in the command-line flags are used.
|
VictoriaMetrics accepts `limit` query arg for `/api/v1/labels` and `/api/v1/label/<labelName>/values` handlers for limiting the number of returned entries. For example, the query to `/api/v1/labels?limit=5` returns a sample of up to 5 unique labels, while ignoring the rest of labels. If the provided `limit` value exceeds the corresponding `-search.maxTagKeys` / `-search.maxTagValues` command-line flag values, then limits specified in the command-line flags are used.
|
||||||
|
|
||||||
By default, VictoriaMetrics returns time series for the last 5 minutes from `/api/v1/series`, while the Prometheus API defaults to all time. Use `start` and `end` to select a different time range.
|
By default, VictoriaMetrics returns time series for the last 5 minutes from `/api/v1/series`, `/api/v1/labels` and `/api/v1/label/<labelName>/values` while the Prometheus API defaults to all time. Explicitly set `start` and `end` to select the desired time range.
|
||||||
VictoriaMetrics accepts `limit` query arg for `/api/v1/series` handlers for limiting the number of returned entries. For example, the query to `/api/v1/series?limit=5` returns a sample of up to 5 series, while ignoring the rest. If the provided `limit` value exceeds the corresponding `-search.maxSeries` command-line flag values, then limits specified in the command-line flags are used.
|
VictoriaMetrics accepts `limit` query arg for `/api/v1/series` handlers for limiting the number of returned entries. For example, the query to `/api/v1/series?limit=5` returns a sample of up to 5 series, while ignoring the rest. If the provided `limit` value exceeds the corresponding `-search.maxSeries` command-line flag values, then limits specified in the command-line flags are used.
|
||||||
|
|
||||||
Additionally, VictoriaMetrics provides the following handlers:
|
Additionally, VictoriaMetrics provides the following handlers:
|
||||||
|
@ -1577,7 +1629,8 @@ The exceeded limits can be [monitored](#monitoring) with the following metrics:
|
||||||
|
|
||||||
These limits are approximate, so VictoriaMetrics can underflow/overflow the limit by a small percentage (usually less than 1%).
|
These limits are approximate, so VictoriaMetrics can underflow/overflow the limit by a small percentage (usually less than 1%).
|
||||||
|
|
||||||
See also more advanced [cardinality limiter in vmagent](https://docs.victoriametrics.com/vmagent.html#cardinality-limiter).
|
See also more advanced [cardinality limiter in vmagent](https://docs.victoriametrics.com/vmagent.html#cardinality-limiter)
|
||||||
|
and [cardinality explorer docs](#cardinality-explorer).
|
||||||
|
|
||||||
## Troubleshooting
|
## Troubleshooting
|
||||||
|
|
||||||
|
|
|
@ -33,7 +33,7 @@ requests_total{path="/", code="200"}
|
||||||
requests_total{path="/", code="403"}
|
requests_total{path="/", code="403"}
|
||||||
```
|
```
|
||||||
|
|
||||||
The meta-information - set of `labels` in curly braces - gives us a context for which `path` and with what `code`
|
The meta-information - a set of `labels` in curly braces - gives us a context for which `path` and with what `code`
|
||||||
the `request` was served. Label-value pairs are always of a `string` type. VictoriaMetrics data model is schemaless,
|
the `request` was served. Label-value pairs are always of a `string` type. VictoriaMetrics data model is schemaless,
|
||||||
which means there is no need to define metric names or their labels in advance. User is free to add or change ingested
|
which means there is no need to define metric names or their labels in advance. User is free to add or change ingested
|
||||||
metrics anytime.
|
metrics anytime.
|
||||||
|
@ -63,9 +63,9 @@ See [these docs](https://docs.victoriametrics.com/FAQ.html#what-is-high-cardinal
|
||||||
|
|
||||||
#### Raw samples
|
#### Raw samples
|
||||||
|
|
||||||
Every unique time series may consist of arbitrary number of `(value, timestamp)` data points (aka `raw samples`) sorted by `timestamp`.
|
Every unique time series may consist of an arbitrary number of `(value, timestamp)` data points (aka `raw samples`) sorted by `timestamp`.
|
||||||
The `value` is a [double-precision floating-point number](https://en.wikipedia.org/wiki/Double-precision_floating-point_format).
|
The `value` is a [double-precision floating-point number](https://en.wikipedia.org/wiki/Double-precision_floating-point_format).
|
||||||
The `timestamp` is a [unix timestamp](https://en.wikipedia.org/wiki/Unix_time) with millisecond precision.
|
The `timestamp` is a [Unix timestamp](https://en.wikipedia.org/wiki/Unix_time) with millisecond precision.
|
||||||
|
|
||||||
Below is an example of a single raw sample
|
Below is an example of a single raw sample
|
||||||
in [Prometheus text exposition format](https://github.com/prometheus/docs/blob/main/content/docs/instrumenting/exposition_formats.md#text-based-format):
|
in [Prometheus text exposition format](https://github.com/prometheus/docs/blob/main/content/docs/instrumenting/exposition_formats.md#text-based-format):
|
||||||
|
@ -108,8 +108,8 @@ The most common [MetricsQL](#metricsql) functions used with counters are:
|
||||||
time period specified in square brackets.
|
time period specified in square brackets.
|
||||||
For example, `increase(requests_total[1h])` shows the number of requests served over the last hour.
|
For example, `increase(requests_total[1h])` shows the number of requests served over the last hour.
|
||||||
|
|
||||||
It is OK to have fractional counters. For example, `request_duration_seconds_sum` counter may sum durations of all the requests.
|
It is OK to have fractional counters. For example, `request_duration_seconds_sum` counter may sum the durations of all the requests.
|
||||||
Every duration may have fractional value in seconds, e.g. `0.5` seconds. So the cumulative sum of all the request durations
|
Every duration may have a fractional value in seconds, e.g. `0.5` of a second. So the cumulative sum of all the request durations
|
||||||
may be fractional too.
|
may be fractional too.
|
||||||
|
|
||||||
It is recommended to put `_total`, `_sum` or `_count` suffix to `counter` metric names, so such metrics can be easily differentiated
|
It is recommended to put `_total`, `_sum` or `_count` suffix to `counter` metric names, so such metrics can be easily differentiated
|
||||||
|
@ -121,7 +121,7 @@ Gauge is used for measuring a value that can go up and down:
|
||||||
|
|
||||||
{% include img.html href="keyConcepts_gauge.png" %}
|
{% include img.html href="keyConcepts_gauge.png" %}
|
||||||
|
|
||||||
The metric `process_resident_memory_anon_bytes` on the graph shows memory usage of the application at every given time.
|
The metric `process_resident_memory_anon_bytes` on the graph shows the memory usage of the application at every given time.
|
||||||
It is changing frequently, going up and down showing how the process allocates and frees the memory.
|
It is changing frequently, going up and down showing how the process allocates and frees the memory.
|
||||||
In programming, `gauge` is a variable to which you **set** a specific value as it changes.
|
In programming, `gauge` is a variable to which you **set** a specific value as it changes.
|
||||||
|
|
||||||
|
@ -177,7 +177,7 @@ This query works in the following way:
|
||||||
2. The `sum(...) by (vmrange)` calculates per-bucket events by summing per-instance buckets
|
2. The `sum(...) by (vmrange)` calculates per-bucket events by summing per-instance buckets
|
||||||
with the same `vmrange` values.
|
with the same `vmrange` values.
|
||||||
|
|
||||||
3. The `histogram_quantile(0.99, ...)` calculates 99th percentile over `vmrange` buckets returned at the step 2.
|
3. The `histogram_quantile(0.99, ...)` calculates 99th percentile over `vmrange` buckets returned at step 2.
|
||||||
|
|
||||||
Histogram metric type exposes two additional counters ending with `_sum` and `_count` suffixes:
|
Histogram metric type exposes two additional counters ending with `_sum` and `_count` suffixes:
|
||||||
|
|
||||||
|
@ -227,7 +227,7 @@ function must be used for converting buckets with `vmrange` labels to buckets wi
|
||||||
Histograms are usually used for measuring the distribution of latency, sizes of elements (batch size, for example) etc. There are two
|
Histograms are usually used for measuring the distribution of latency, sizes of elements (batch size, for example) etc. There are two
|
||||||
implementations of a histogram supported by VictoriaMetrics:
|
implementations of a histogram supported by VictoriaMetrics:
|
||||||
|
|
||||||
1. [Prometheus histogram](https://prometheus.io/docs/practices/histograms/). The canonical histogram implementation
|
1. [Prometheus histogram](https://prometheus.io/docs/practices/histograms/). The canonical histogram implementation is
|
||||||
supported by most of
|
supported by most of
|
||||||
the [client libraries for metrics instrumentation](https://prometheus.io/docs/instrumenting/clientlibs/). Prometheus
|
the [client libraries for metrics instrumentation](https://prometheus.io/docs/instrumenting/clientlibs/). Prometheus
|
||||||
histogram requires a user to define ranges (`buckets`) statically.
|
histogram requires a user to define ranges (`buckets`) statically.
|
||||||
|
@ -263,7 +263,7 @@ The visualisation of summaries is pretty straightforward:
|
||||||
|
|
||||||
{% include img.html href="keyConcepts_summary.png" %}
|
{% include img.html href="keyConcepts_summary.png" %}
|
||||||
|
|
||||||
Such an approach makes summaries easier to use but also puts significant limitations comparing to [histograms](#histogram):
|
Such an approach makes summaries easier to use but also puts significant limitations compared to [histograms](#histogram):
|
||||||
|
|
||||||
- It is impossible to calculate quantile over multiple summary metrics, e.g. `sum(go_gc_duration_seconds{quantile="0.75"})`,
|
- It is impossible to calculate quantile over multiple summary metrics, e.g. `sum(go_gc_duration_seconds{quantile="0.75"})`,
|
||||||
`avg(go_gc_duration_seconds{quantile="0.75"})` or `max(go_gc_duration_seconds{quantile="0.75"})`
|
`avg(go_gc_duration_seconds{quantile="0.75"})` or `max(go_gc_duration_seconds{quantile="0.75"})`
|
||||||
|
@ -272,7 +272,7 @@ Such an approach makes summaries easier to use but also puts significant limitat
|
||||||
|
|
||||||
- It is impossible to calculate quantiles other than the already pre-calculated quantiles.
|
- It is impossible to calculate quantiles other than the already pre-calculated quantiles.
|
||||||
|
|
||||||
- It is impossible to calculate quantiles for measurements collected over arbitrary time range. Usually `summary`
|
- It is impossible to calculate quantiles for measurements collected over an arbitrary time range. Usually, `summary`
|
||||||
quantiles are calculated over a fixed time range such as the last 5 minutes.
|
quantiles are calculated over a fixed time range such as the last 5 minutes.
|
||||||
|
|
||||||
Summaries are usually used for tracking the pre-defined percentiles for latency, sizes of elements (batch size, for example) etc.
|
Summaries are usually used for tracking the pre-defined percentiles for latency, sizes of elements (batch size, for example) etc.
|
||||||
|
@ -280,7 +280,7 @@ Summaries are usually used for tracking the pre-defined percentiles for latency,
|
||||||
### Instrumenting application with metrics
|
### Instrumenting application with metrics
|
||||||
|
|
||||||
As was said at the beginning of the [types of metrics](#types-of-metrics) section, metric type defines how it was
|
As was said at the beginning of the [types of metrics](#types-of-metrics) section, metric type defines how it was
|
||||||
measured. VictoriaMetrics TSDB doesn't know about metric types, all it sees are metric names, labels, values, and timestamps.
|
measured. VictoriaMetrics TSDB doesn't know about metric types. All it sees are metric names, labels, values, and timestamps.
|
||||||
What are these metrics, what do they measure, and how - all this depends on the application which emits them.
|
What are these metrics, what do they measure, and how - all this depends on the application which emits them.
|
||||||
|
|
||||||
To instrument your application with metrics compatible with VictoriaMetrics we recommend
|
To instrument your application with metrics compatible with VictoriaMetrics we recommend
|
||||||
|
@ -292,18 +292,18 @@ VictoriaMetrics is also compatible with [Prometheus client libraries for metrics
|
||||||
#### Naming
|
#### Naming
|
||||||
|
|
||||||
We recommend following [Prometheus naming convention for metrics](https://prometheus.io/docs/practices/naming/). There
|
We recommend following [Prometheus naming convention for metrics](https://prometheus.io/docs/practices/naming/). There
|
||||||
are no strict restrictions, so any metric name and labels are be accepted by VictoriaMetrics.
|
are no strict restrictions, so any metric name and labels are accepted by VictoriaMetrics.
|
||||||
But the convention helps to keep names meaningful, descriptive and clear to other people.
|
But the convention helps to keep names meaningful, descriptive, and clear to other people.
|
||||||
Following convention is a good practice.
|
Following convention is a good practice.
|
||||||
|
|
||||||
#### Labels
|
#### Labels
|
||||||
|
|
||||||
Every measurement can contain an arbitrary number of `key="value"` labels. The good practice is to keep this number limited.
|
Every measurement can contain an arbitrary number of `key="value"` labels. The good practice is to keep this number limited.
|
||||||
Otherwise, it would be difficult to deal with measurements containing big number of labels.
|
Otherwise, it would be difficult to deal with measurements containing a big number of labels.
|
||||||
By default, VictoriaMetrics limits the number of labels per measurement to `30` and drops other labels.
|
By default, VictoriaMetrics limits the number of labels per measurement to `30` and drops other labels.
|
||||||
This limit can be changed via `-maxLabelsPerTimeseries` command-line flag if necessary (but this isn't recommended).
|
This limit can be changed via `-maxLabelsPerTimeseries` command-line flag if necessary (but this isn't recommended).
|
||||||
|
|
||||||
Every label value can contain arbitrary string value. The good practice is to use short and meaningful label values to
|
Every label value can contain an arbitrary string value. The good practice is to use short and meaningful label values to
|
||||||
describe the attribute of the metric, not to tell the story about it. For example, label-value pair
|
describe the attribute of the metric, not to tell the story about it. For example, label-value pair
|
||||||
`environment="prod"` is ok, but `log_message="long log message with a lot of details..."` is not ok. By default,
|
`environment="prod"` is ok, but `log_message="long log message with a lot of details..."` is not ok. By default,
|
||||||
VcitoriaMetrics limits label's value size with 16kB. This limit can be changed via `-maxLabelValueLen` command-line flag.
|
VcitoriaMetrics limits label's value size with 16kB. This limit can be changed via `-maxLabelValueLen` command-line flag.
|
||||||
|
@ -318,7 +318,7 @@ VictoriaMetrics supports both models used in modern monitoring applications: [pu
|
||||||
|
|
||||||
### Push model
|
### Push model
|
||||||
|
|
||||||
Client regularly sends the collected metrics to the server in push model:
|
Client regularly sends the collected metrics to the server in the push model:
|
||||||
|
|
||||||
{% include img.html href="keyConcepts_push_model.png" %}
|
{% include img.html href="keyConcepts_push_model.png" %}
|
||||||
|
|
||||||
|
@ -366,9 +366,9 @@ elaborating more on why Percona switched from pull to push model.
|
||||||
The cons of push protocol:
|
The cons of push protocol:
|
||||||
|
|
||||||
* Increased configuration complexity for monitored applications.
|
* Increased configuration complexity for monitored applications.
|
||||||
Every application needs te be individually configured with the address of the monitoring system
|
Every application needs to be individually configured with the address of the monitoring system
|
||||||
for metrics delivery. It also needs to be configured with the interval between metric pushes
|
for metrics delivery. It also needs to be configured with the interval between metric pushes
|
||||||
and the strategy on metric delivery failure.
|
and the strategy in case of metric delivery failure.
|
||||||
* Non-trivial setup for metrics' delivery into multiple monitoring systems.
|
* Non-trivial setup for metrics' delivery into multiple monitoring systems.
|
||||||
* It may be hard to tell whether the application went down or just stopped sending metrics for a different reason.
|
* It may be hard to tell whether the application went down or just stopped sending metrics for a different reason.
|
||||||
* Applications can overload the monitoring system by pushing metrics at too short intervals.
|
* Applications can overload the monitoring system by pushing metrics at too short intervals.
|
||||||
|
@ -394,12 +394,12 @@ The pros of the pull model:
|
||||||
* Easier to debug - VictoriaMetrics knows about all the monitored applications (aka `scrape targets`).
|
* Easier to debug - VictoriaMetrics knows about all the monitored applications (aka `scrape targets`).
|
||||||
The `up == 0` query instantly shows unavailable scrape targets.
|
The `up == 0` query instantly shows unavailable scrape targets.
|
||||||
The actual information about scrape targets is available at `http://victoriametrics:8428/targets` and `http://vmagent:8429/targets`.
|
The actual information about scrape targets is available at `http://victoriametrics:8428/targets` and `http://vmagent:8429/targets`.
|
||||||
* Monitoring system controls the frequency of metrics' scrape, so it is easier to control its' load.
|
* Monitoring system controls the frequency of metrics' scrape, so it is easier to control its load.
|
||||||
* Applications aren't aware of the monitoring system and don't need to implement the logic for metrics' delivery.
|
* Applications aren't aware of the monitoring system and don't need to implement the logic for metrics delivery.
|
||||||
|
|
||||||
The cons of the pull model:
|
The cons of the pull model:
|
||||||
|
|
||||||
* Harder security setup - monitoring system needs have access to applications it monitors.
|
* Harder security setup - monitoring system needs to have access to applications it monitors.
|
||||||
* Pull model needs non-trivial [service discovery schemes](https://docs.victoriametrics.com/sd_configs.html).
|
* Pull model needs non-trivial [service discovery schemes](https://docs.victoriametrics.com/sd_configs.html).
|
||||||
|
|
||||||
### Common approaches for data collection
|
### Common approaches for data collection
|
||||||
|
@ -426,7 +426,7 @@ VictoriaMetrics components allow building more advanced topologies. For example,
|
||||||
|
|
||||||
{% include img.html href="keyConcepts_two_dcs.png" %}
|
{% include img.html href="keyConcepts_two_dcs.png" %}
|
||||||
|
|
||||||
VictoriaMetrics in this example the may be either [single-node VictoriaMetrics](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html)
|
VictoriaMetrics in this example may be either [single-node VictoriaMetrics](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html)
|
||||||
or [VictoriaMetrics Cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html). Vmagent also allows
|
or [VictoriaMetrics Cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html). Vmagent also allows
|
||||||
[replicating the same data to multiple destinations](https://docs.victoriametrics.com/vmagent.html#replication-and-high-availability).
|
[replicating the same data to multiple destinations](https://docs.victoriametrics.com/vmagent.html#replication-and-high-availability).
|
||||||
|
|
||||||
|
@ -436,7 +436,7 @@ VictoriaMetrics provides
|
||||||
an [HTTP API](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#prometheus-querying-api-usage)
|
an [HTTP API](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#prometheus-querying-api-usage)
|
||||||
for serving read queries. The API is used in various integrations such as
|
for serving read queries. The API is used in various integrations such as
|
||||||
[Grafana](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#grafana-setup). The same API is also used by
|
[Grafana](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#grafana-setup). The same API is also used by
|
||||||
[VMUI](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#vmui) - graphical User Interface for querying
|
[VMUI](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#vmui) - a graphical User Interface for querying
|
||||||
and visualizing metrics.
|
and visualizing metrics.
|
||||||
|
|
||||||
The API consists of two main handlers for serving [instant queries](#instant-query) and [range queries](#range-query).
|
The API consists of two main handlers for serving [instant queries](#instant-query) and [range queries](#range-query).
|
||||||
|
@ -455,9 +455,9 @@ Params:
|
||||||
* `time` - optional timestamp when to evaluate the `query`. If `time` is skipped, then the current timestamp is used.
|
* `time` - optional timestamp when to evaluate the `query`. If `time` is skipped, then the current timestamp is used.
|
||||||
The `time` param can be specified in the following formats:
|
The `time` param can be specified in the following formats:
|
||||||
* [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) such as `2022-08-10T12:45:43.000Z`.
|
* [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) such as `2022-08-10T12:45:43.000Z`.
|
||||||
* [Unix timestamp](https://en.wikipedia.org/wiki/Unix_time) in seconds. It can contains fractional part for millisecond precision.
|
* [Unix timestamp](https://en.wikipedia.org/wiki/Unix_time) in seconds. It can contain a fractional part for millisecond precision.
|
||||||
* [Relative duration](https://prometheus.io/docs/prometheus/latest/querying/basics/#time-durations)
|
* [Relative duration](https://prometheus.io/docs/prometheus/latest/querying/basics/#time-durations)
|
||||||
comparing to the current timestamp. For example, `-1h` means `one hour before the current time`.
|
compared to the current timestamp. For example, `-1h` means `one hour before the current time`.
|
||||||
* `step` - optional max lookback window for searching for raw samples when executing the `query`.
|
* `step` - optional max lookback window for searching for raw samples when executing the `query`.
|
||||||
If `step` is skipped, then it is set to `5m` (5 minutes) by default.
|
If `step` is skipped, then it is set to `5m` (5 minutes) by default.
|
||||||
|
|
||||||
|
@ -517,8 +517,8 @@ curl "http://<victoria-metrics-addr>/api/v1/query?query=foo_bar&time=2022-05-10T
|
||||||
|
|
||||||
In response, VictoriaMetrics returns a single sample-timestamp pair with a value of `3` for the series
|
In response, VictoriaMetrics returns a single sample-timestamp pair with a value of `3` for the series
|
||||||
`foo_bar` at the given moment of time `2022-05-10 10:03`. But, if we take a look at the original data sample again,
|
`foo_bar` at the given moment of time `2022-05-10 10:03`. But, if we take a look at the original data sample again,
|
||||||
we'll see that there is no a raw sample at `2022-05-10 10:03`. What happens here is if there is no a raw sample at the
|
we'll see that there is no raw sample at `2022-05-10 10:03`. What happens here if there is no raw sample at the
|
||||||
requested timestamp, VictoriaMetrics will try to locate the closest sample on the left to the requested timestamp:
|
requested timestamp - VictoriaMetrics will try to locate the closest sample on the left to the requested timestamp:
|
||||||
|
|
||||||
<p style="text-align: center">
|
<p style="text-align: center">
|
||||||
<a href="keyConcepts_instant_query.png" target="_blank">
|
<a href="keyConcepts_instant_query.png" target="_blank">
|
||||||
|
@ -550,9 +550,9 @@ Params:
|
||||||
* `start` - the starting timestamp of the time range for `query` evaluation.
|
* `start` - the starting timestamp of the time range for `query` evaluation.
|
||||||
The `start` param can be specified in the following formats:
|
The `start` param can be specified in the following formats:
|
||||||
* [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) such as `2022-08-10T12:45:43.000Z`.
|
* [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) such as `2022-08-10T12:45:43.000Z`.
|
||||||
* [Unix timestamp](https://en.wikipedia.org/wiki/Unix_time) in seconds. It can contains fractional part for millisecond precision.
|
* [Unix timestamp](https://en.wikipedia.org/wiki/Unix_time) in seconds. It can contain a fractional part for millisecond precision.
|
||||||
* [Relative duration](https://prometheus.io/docs/prometheus/latest/querying/basics/#time-durations)
|
* [Relative duration](https://prometheus.io/docs/prometheus/latest/querying/basics/#time-durations)
|
||||||
comparing to the current timestamp. For example, `-1h` means `one hour before the current time`.
|
compared to the current timestamp. For example, `-1h` means `one hour before the current time`.
|
||||||
* `end` - the ending timestamp of the time range for `query` evaluation.
|
* `end` - the ending timestamp of the time range for `query` evaluation.
|
||||||
If the `end` isn't set, then the `end` is automatically set to the current time.
|
If the `end` isn't set, then the `end` is automatically set to the current time.
|
||||||
* `step` - the [interval](https://prometheus.io/docs/prometheus/latest/querying/basics/#time-durations) between datapoints,
|
* `step` - the [interval](https://prometheus.io/docs/prometheus/latest/querying/basics/#time-durations) between datapoints,
|
||||||
|
@ -560,7 +560,7 @@ Params:
|
||||||
The `query` is executed at `start`, `start+step`, `start+2*step`, ..., `end` timestamps.
|
The `query` is executed at `start`, `start+step`, `start+2*step`, ..., `end` timestamps.
|
||||||
If the `step` isn't set, then it is automatically set to `5m` (5 minutes).
|
If the `step` isn't set, then it is automatically set to `5m` (5 minutes).
|
||||||
|
|
||||||
To get the values of `foo_bar` on time range from `2022-05-10 09:59:00` to `2022-05-10 10:17:00`, in VictoriaMetrics we
|
To get the values of `foo_bar` on the time range from `2022-05-10 09:59:00` to `2022-05-10 10:17:00`, in VictoriaMetrics we
|
||||||
need to issue a range query:
|
need to issue a range query:
|
||||||
|
|
||||||
```console
|
```console
|
||||||
|
@ -665,7 +665,7 @@ this request in VictoriaMetrics the graph will be shown as the following:
|
||||||
</a>
|
</a>
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
The blue dotted lines on the pic are the moments when instant query was executed. Since instant query retains the
|
The blue dotted lines on the pic are the moments when the instant query was executed. Since instant query retains the
|
||||||
ability to locate the missing point, the graph contains two types of points: `real` and `ephemeral` data
|
ability to locate the missing point, the graph contains two types of points: `real` and `ephemeral` data
|
||||||
points. `ephemeral` data point always repeats the left closest raw sample (see red arrow on the pic above).
|
points. `ephemeral` data point always repeats the left closest raw sample (see red arrow on the pic above).
|
||||||
|
|
||||||
|
@ -692,14 +692,14 @@ useful in the following scenarios:
|
||||||
* Correlate changes between multiple metrics on the time interval;
|
* Correlate changes between multiple metrics on the time interval;
|
||||||
* Observe trends and dynamics of the metric change.
|
* Observe trends and dynamics of the metric change.
|
||||||
|
|
||||||
If you need exporting raw samples from VictoriaMetrics, then take a look at [export APIs](https://docs.victoriametrics.com/#how-to-export-time-series).
|
If you need to export raw samples from VictoriaMetrics, then take a look at [export APIs](https://docs.victoriametrics.com/#how-to-export-time-series).
|
||||||
|
|
||||||
### MetricsQL
|
### MetricsQL
|
||||||
|
|
||||||
VictoriaMetrics provide a special query language for executing read queries - [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html).
|
VictoriaMetrics provide a special query language for executing read queries - [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html).
|
||||||
It is a [PromQL](https://prometheus.io/docs/prometheus/latest/querying/basics)-like query language with a powerful set of
|
It is a [PromQL](https://prometheus.io/docs/prometheus/latest/querying/basics)-like query language with a powerful set of
|
||||||
functions and features for working specifically with time series data. MetricsQL is backwards-compatible with PromQL,
|
functions and features for working specifically with time series data. MetricsQL is backward-compatible with PromQL,
|
||||||
so it shares most of the query concepts. The basics concepts for PromQL and MetricsQL are
|
so it shares most of the query concepts. The basic concepts for PromQL and MetricsQL are
|
||||||
described [here](https://valyala.medium.com/promql-tutorial-for-beginners-9ab455142085).
|
described [here](https://valyala.medium.com/promql-tutorial-for-beginners-9ab455142085).
|
||||||
|
|
||||||
#### Filtering
|
#### Filtering
|
||||||
|
@ -805,7 +805,7 @@ process_resident_memory_bytes > 100*1024*1024
|
||||||
|
|
||||||
#### Aggregation and grouping functions
|
#### Aggregation and grouping functions
|
||||||
|
|
||||||
MetricsQL allows aggregating and grouping time series. Time series are grouped by the given set of labels and then the
|
MetricsQL allows aggregating and grouping of time series. Time series are grouped by the given set of labels and then the
|
||||||
given aggregation function is applied individually per each group. For instance, the following query returns
|
given aggregation function is applied individually per each group. For instance, the following query returns
|
||||||
summary memory usage for each `job`:
|
summary memory usage for each `job`:
|
||||||
|
|
||||||
|
@ -838,7 +838,7 @@ as [duration](https://prometheus.io/docs/prometheus/latest/querying/basics/#time
|
||||||
In this case VictoriaMetrics uses the specified lookbehind window - `5m` (5 minutes) - for calculating the average per-second increase rate.
|
In this case VictoriaMetrics uses the specified lookbehind window - `5m` (5 minutes) - for calculating the average per-second increase rate.
|
||||||
Bigger lookbehind windows usually lead to smoother graphs.
|
Bigger lookbehind windows usually lead to smoother graphs.
|
||||||
|
|
||||||
`rate` strips metric name while leaving all the labels for the inner time series. If you need keeping the metric name,
|
`rate` strips metric name while leaving all the labels for the inner time series. If you need to keep the metric name,
|
||||||
then add [keep_metric_names](https://docs.victoriametrics.com/MetricsQL.html#keep_metric_names) modifier
|
then add [keep_metric_names](https://docs.victoriametrics.com/MetricsQL.html#keep_metric_names) modifier
|
||||||
after the `rate(..)`. For example, the following query leaves metric names after calculating the `rate()`:
|
after the `rate(..)`. For example, the following query leaves metric names after calculating the `rate()`:
|
||||||
|
|
||||||
|
@ -846,7 +846,7 @@ after the `rate(..)`. For example, the following query leaves metric names after
|
||||||
rate(node_network_receive_bytes_total) keep_metric_names
|
rate(node_network_receive_bytes_total) keep_metric_names
|
||||||
```
|
```
|
||||||
|
|
||||||
`rate()` must be apllied only to [counters](#counter). The result of applying the `rate()` to [gauge](#gauge) is undefined.
|
`rate()` must be applied only to [counters](#counter). The result of applying the `rate()` to [gauge](#gauge) is undefined.
|
||||||
|
|
||||||
### Visualizing time series
|
### Visualizing time series
|
||||||
|
|
||||||
|
@ -885,4 +885,4 @@ VictoriaMetrics supports data deduplication. See [these docs](https://docs.victo
|
||||||
|
|
||||||
### Downsampling
|
### Downsampling
|
||||||
|
|
||||||
VictoriaMetrics supports data downsampling - see [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#downsampling).
|
VictoriaMetrics supports data downsampling. See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#downsampling).
|
||||||
|
|
|
@ -101,6 +101,7 @@ Additional information:
|
||||||
|
|
||||||
* [How to export time series](https://docs.victoriametrics.com/#how-to-export-time-series)
|
* [How to export time series](https://docs.victoriametrics.com/#how-to-export-time-series)
|
||||||
* [How to import time series](https://docs.victoriametrics.com/#how-to-import-time-series-data)
|
* [How to import time series](https://docs.victoriametrics.com/#how-to-import-time-series-data)
|
||||||
|
* [How to export data in JSON line format](https://docs.victoriametrics.com/#how-to-export-data-in-json-line-format)
|
||||||
* [URL format for VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format)
|
* [URL format for VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format)
|
||||||
|
|
||||||
## /api/v1/export/csv
|
## /api/v1/export/csv
|
||||||
|
@ -267,7 +268,7 @@ Additional information:
|
||||||
|
|
||||||
## /api/v1/labels
|
## /api/v1/labels
|
||||||
|
|
||||||
**Get a list of label names**
|
**Get a list of label names at the given time range**
|
||||||
|
|
||||||
Single-node VictoriaMetrics:
|
Single-node VictoriaMetrics:
|
||||||
<div class="with-copy" markdown="1">
|
<div class="with-copy" markdown="1">
|
||||||
|
@ -287,6 +288,8 @@ curl http://<vmselect>:8481/select/0/prometheus/api/v1/labels
|
||||||
|
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
|
By default VictoriaMetrics returns labels seen during the last 5 minutes. An arbitrary time range can be set via `start` and `end` query args.
|
||||||
|
|
||||||
Additional information:
|
Additional information:
|
||||||
* [Prometheus querying API usage](https://docs.victoriametrics.com/#prometheus-querying-api-usage)
|
* [Prometheus querying API usage](https://docs.victoriametrics.com/#prometheus-querying-api-usage)
|
||||||
* [Querying label values](https://prometheus.io/docs/prometheus/latest/querying/api/#querying-label-values)
|
* [Querying label values](https://prometheus.io/docs/prometheus/latest/querying/api/#querying-label-values)
|
||||||
|
@ -294,7 +297,7 @@ Additional information:
|
||||||
|
|
||||||
## /api/v1/label/.../values
|
## /api/v1/label/.../values
|
||||||
|
|
||||||
**Get a list of values for a particular label**
|
**Get a list of values for a particular label on the given time range**
|
||||||
|
|
||||||
Single-node VictoriaMetrics:
|
Single-node VictoriaMetrics:
|
||||||
<div class="with-copy" markdown="1">
|
<div class="with-copy" markdown="1">
|
||||||
|
@ -314,6 +317,8 @@ curl http://<vmselect>:8481/select/0/prometheus/api/v1/label/job/values
|
||||||
|
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
|
By default VictoriaMetrics returns label values seen during the last 5 minutes. An arbitrary time range can be set via `start` and `end` query args.
|
||||||
|
|
||||||
Additional information:
|
Additional information:
|
||||||
* [Prometheus querying API usage](https://docs.victoriametrics.com/#prometheus-querying-api-usage)
|
* [Prometheus querying API usage](https://docs.victoriametrics.com/#prometheus-querying-api-usage)
|
||||||
* [Getting label names](https://prometheus.io/docs/prometheus/latest/querying/api/#getting-label-names)
|
* [Getting label names](https://prometheus.io/docs/prometheus/latest/querying/api/#getting-label-names)
|
||||||
|
@ -377,7 +382,7 @@ Additional information:
|
||||||
|
|
||||||
## /api/v1/series
|
## /api/v1/series
|
||||||
|
|
||||||
**Returns series names with their labels**
|
**Returns series names with their labels on the given time range**
|
||||||
|
|
||||||
Single-node VictoriaMetrics:
|
Single-node VictoriaMetrics:
|
||||||
<div class="with-copy" markdown="1">
|
<div class="with-copy" markdown="1">
|
||||||
|
@ -397,6 +402,8 @@ curl http://<vmselect>:8481/select/0/prometheus/api/v1/series -d 'match[]=vm_htt
|
||||||
|
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
|
By default VictoriaMetrics returns time series seen during the last 5 minutes. An arbitrary time range can be set via `start` and `end` query args.
|
||||||
|
|
||||||
Additional information:
|
Additional information:
|
||||||
* [Prometheus querying API usage](https://docs.victoriametrics.com/#prometheus-querying-api-usage)
|
* [Prometheus querying API usage](https://docs.victoriametrics.com/#prometheus-querying-api-usage)
|
||||||
* [Finding series by label matchers](https://prometheus.io/docs/prometheus/latest/querying/api/#finding-series-by-label-matchers)
|
* [Finding series by label matchers](https://prometheus.io/docs/prometheus/latest/querying/api/#finding-series-by-label-matchers)
|
||||||
|
|
|
@ -629,6 +629,8 @@ Both limits can be set simultaneously. If any of these limits is reached, then s
|
||||||
|
|
||||||
These limits are approximate, so `vmagent` can underflow/overflow the limit by a small percentage (usually less than 1%).
|
These limits are approximate, so `vmagent` can underflow/overflow the limit by a small percentage (usually less than 1%).
|
||||||
|
|
||||||
|
See also [cardinality explorer docs](https://docs.victoriametrics.com/#cardinality-explorer).
|
||||||
|
|
||||||
## Monitoring
|
## Monitoring
|
||||||
|
|
||||||
`vmagent` exports various metrics in Prometheus exposition format at `http://vmagent-host:8429/metrics` page. We recommend setting up regular scraping of this page
|
`vmagent` exports various metrics in Prometheus exposition format at `http://vmagent-host:8429/metrics` page. We recommend setting up regular scraping of this page
|
||||||
|
|
|
@ -525,6 +525,74 @@ To avoid such situation try to filter out VM process metrics via `--vm-native-fi
|
||||||
Instead, use [relabeling in VictoriaMetrics](https://github.com/VictoriaMetrics/vmctl/issues/4#issuecomment-683424375).
|
Instead, use [relabeling in VictoriaMetrics](https://github.com/VictoriaMetrics/vmctl/issues/4#issuecomment-683424375).
|
||||||
5. When importing in or from cluster version remember to use correct [URL format](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format)
|
5. When importing in or from cluster version remember to use correct [URL format](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format)
|
||||||
and specify `accountID` param.
|
and specify `accountID` param.
|
||||||
|
6. When migrating large volumes of data it might be useful to use `--vm-native-step-interval` flag to split single process into smaller steps.
|
||||||
|
|
||||||
|
#### Using time-based chunking of migration
|
||||||
|
|
||||||
|
It is possible split migration process into set of smaller batches based on time. This is especially useful when migrating large volumes of data as this adds indication of progress and ability to restore process from certain point in case of failure.
|
||||||
|
|
||||||
|
To use this you need to specify `--vm-native-step-interval` flag. Supported values are: `month`, `day`, `hour`.
|
||||||
|
Note that in order to use this it is required `--vm-native-filter-time-start` to be set to calculate time ranges for export process.
|
||||||
|
|
||||||
|
Every range is being processed independently, which means that:
|
||||||
|
- after range processing is finished all data within range is migrated
|
||||||
|
- if process fails on one of stages it is guaranteed that data of prior stages is already written, so it is possible to restart process starting from failed range
|
||||||
|
|
||||||
|
It is recommended using the `month` step when migrating the data over multiple months, since the migration with `day` and `hour` steps may take longer time to complete
|
||||||
|
because of additional overhead.
|
||||||
|
|
||||||
|
Usage example:
|
||||||
|
```console
|
||||||
|
./vmctl vm-native
|
||||||
|
--vm-native-filter-time-start 2022-06-17T00:07:00Z \
|
||||||
|
--vm-native-filter-time-end 2022-10-03T00:07:00Z \
|
||||||
|
--vm-native-src-addr http://localhost:8428 \
|
||||||
|
--vm-native-dst-addr http://localhost:8528 \
|
||||||
|
--vm-native-step-interval=month
|
||||||
|
VictoriaMetrics Native import mode
|
||||||
|
2022/08/30 19:48:24 Processing range 1/5: 2022-06-17T00:07:00Z - 2022-06-30T23:59:59Z
|
||||||
|
2022/08/30 19:48:24 Initing export pipe from "http://localhost:8428" with filters:
|
||||||
|
filter: match[]={__name__!=""}
|
||||||
|
start: 2022-06-17T00:07:00Z
|
||||||
|
end: 2022-06-30T23:59:59Z
|
||||||
|
Initing import process to "http://localhost:8428":
|
||||||
|
2022/08/30 19:48:24 Import finished!
|
||||||
|
Total: 16 B ↗ Speed: 28.89 KiB p/s
|
||||||
|
2022/08/30 19:48:24 Processing range 2/5: 2022-07-01T00:00:00Z - 2022-07-31T23:59:59Z
|
||||||
|
2022/08/30 19:48:24 Initing export pipe from "http://localhost:8428" with filters:
|
||||||
|
filter: match[]={__name__!=""}
|
||||||
|
start: 2022-07-01T00:00:00Z
|
||||||
|
end: 2022-07-31T23:59:59Z
|
||||||
|
Initing import process to "http://localhost:8428":
|
||||||
|
2022/08/30 19:48:24 Import finished!
|
||||||
|
Total: 16 B ↗ Speed: 164.35 KiB p/s
|
||||||
|
2022/08/30 19:48:24 Processing range 3/5: 2022-08-01T00:00:00Z - 2022-08-31T23:59:59Z
|
||||||
|
2022/08/30 19:48:24 Initing export pipe from "http://localhost:8428" with filters:
|
||||||
|
filter: match[]={__name__!=""}
|
||||||
|
start: 2022-08-01T00:00:00Z
|
||||||
|
end: 2022-08-31T23:59:59Z
|
||||||
|
Initing import process to "http://localhost:8428":
|
||||||
|
2022/08/30 19:48:24 Import finished!
|
||||||
|
Total: 16 B ↗ Speed: 191.42 KiB p/s
|
||||||
|
2022/08/30 19:48:24 Processing range 4/5: 2022-09-01T00:00:00Z - 2022-09-30T23:59:59Z
|
||||||
|
2022/08/30 19:48:24 Initing export pipe from "http://localhost:8428" with filters:
|
||||||
|
filter: match[]={__name__!=""}
|
||||||
|
start: 2022-09-01T00:00:00Z
|
||||||
|
end: 2022-09-30T23:59:59Z
|
||||||
|
Initing import process to "http://localhost:8428":
|
||||||
|
2022/08/30 19:48:24 Import finished!
|
||||||
|
Total: 16 B ↗ Speed: 141.04 KiB p/s
|
||||||
|
2022/08/30 19:48:24 Processing range 5/5: 2022-10-01T00:00:00Z - 2022-10-03T00:07:00Z
|
||||||
|
2022/08/30 19:48:24 Initing export pipe from "http://localhost:8428" with filters:
|
||||||
|
filter: match[]={__name__!=""}
|
||||||
|
start: 2022-10-01T00:00:00Z
|
||||||
|
end: 2022-10-03T00:07:00Z
|
||||||
|
Initing import process to "http://localhost:8428":
|
||||||
|
2022/08/30 19:48:24 Import finished!
|
||||||
|
Total: 16 B ↗ Speed: 186.32 KiB p/s
|
||||||
|
2022/08/30 19:48:24 Total time: 12.680582ms
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
## Verifying exported blocks from VictoriaMetrics
|
## Verifying exported blocks from VictoriaMetrics
|
||||||
|
|
||||||
|
|
20
go.mod
20
go.mod
|
@ -11,7 +11,7 @@ require (
|
||||||
github.com/VictoriaMetrics/fasthttp v1.1.0
|
github.com/VictoriaMetrics/fasthttp v1.1.0
|
||||||
github.com/VictoriaMetrics/metrics v1.22.2
|
github.com/VictoriaMetrics/metrics v1.22.2
|
||||||
github.com/VictoriaMetrics/metricsql v0.44.1
|
github.com/VictoriaMetrics/metricsql v0.44.1
|
||||||
github.com/aws/aws-sdk-go v1.44.91
|
github.com/aws/aws-sdk-go v1.44.93
|
||||||
github.com/cespare/xxhash/v2 v2.1.2
|
github.com/cespare/xxhash/v2 v2.1.2
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
|
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
|
||||||
|
|
||||||
|
@ -29,30 +29,30 @@ require (
|
||||||
github.com/oklog/ulid v1.3.1
|
github.com/oklog/ulid v1.3.1
|
||||||
github.com/prometheus/common v0.37.0 // indirect
|
github.com/prometheus/common v0.37.0 // indirect
|
||||||
github.com/prometheus/prometheus v1.8.2-0.20201119142752-3ad25a6dc3d9
|
github.com/prometheus/prometheus v1.8.2-0.20201119142752-3ad25a6dc3d9
|
||||||
github.com/urfave/cli/v2 v2.14.0
|
github.com/urfave/cli/v2 v2.14.1
|
||||||
github.com/valyala/fastjson v1.6.3
|
github.com/valyala/fastjson v1.6.3
|
||||||
github.com/valyala/fastrand v1.1.0
|
github.com/valyala/fastrand v1.1.0
|
||||||
github.com/valyala/fasttemplate v1.2.1
|
github.com/valyala/fasttemplate v1.2.1
|
||||||
github.com/valyala/gozstd v1.17.0
|
github.com/valyala/gozstd v1.17.0
|
||||||
github.com/valyala/quicktemplate v1.7.0
|
github.com/valyala/quicktemplate v1.7.0
|
||||||
golang.org/x/net v0.0.0-20220826154423-83b083e8dc8b
|
golang.org/x/net v0.0.0-20220907135653-1e95f45603a7
|
||||||
golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094
|
golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094
|
||||||
golang.org/x/sys v0.0.0-20220829200755-d48e67d00261
|
golang.org/x/sys v0.0.0-20220908150016-7ac13a9a928d
|
||||||
google.golang.org/api v0.94.0
|
google.golang.org/api v0.95.0
|
||||||
gopkg.in/yaml.v2 v2.4.0
|
gopkg.in/yaml.v2 v2.4.0
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
cloud.google.com/go v0.104.0 // indirect
|
cloud.google.com/go v0.104.0 // indirect
|
||||||
cloud.google.com/go/compute v1.9.0 // indirect
|
cloud.google.com/go/compute v1.9.0 // indirect
|
||||||
cloud.google.com/go/iam v0.3.0 // indirect
|
cloud.google.com/go/iam v0.4.0 // indirect
|
||||||
github.com/VividCortex/ewma v1.2.0 // indirect
|
github.com/VividCortex/ewma v1.2.0 // indirect
|
||||||
github.com/beorn7/perks v1.0.1 // indirect
|
github.com/beorn7/perks v1.0.1 // indirect
|
||||||
github.com/go-kit/log v0.2.1 // indirect
|
github.com/go-kit/log v0.2.1 // indirect
|
||||||
github.com/go-logfmt/logfmt v0.5.1 // indirect
|
github.com/go-logfmt/logfmt v0.5.1 // indirect
|
||||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||||
github.com/golang/protobuf v1.5.2 // indirect
|
github.com/golang/protobuf v1.5.2 // indirect
|
||||||
github.com/google/go-cmp v0.5.8 // indirect
|
github.com/google/go-cmp v0.5.9 // indirect
|
||||||
github.com/google/uuid v1.3.0 // indirect
|
github.com/google/uuid v1.3.0 // indirect
|
||||||
github.com/googleapis/enterprise-certificate-proxy v0.1.0 // indirect
|
github.com/googleapis/enterprise-certificate-proxy v0.1.0 // indirect
|
||||||
github.com/googleapis/gax-go/v2 v2.5.1 // indirect
|
github.com/googleapis/gax-go/v2 v2.5.1 // indirect
|
||||||
|
@ -71,11 +71,11 @@ require (
|
||||||
go.opencensus.io v0.23.0 // indirect
|
go.opencensus.io v0.23.0 // indirect
|
||||||
go.uber.org/atomic v1.10.0 // indirect
|
go.uber.org/atomic v1.10.0 // indirect
|
||||||
go.uber.org/goleak v1.1.11-0.20210813005559-691160354723 // indirect
|
go.uber.org/goleak v1.1.11-0.20210813005559-691160354723 // indirect
|
||||||
golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde // indirect
|
golang.org/x/sync v0.0.0-20220907140024-f12130a52804 // indirect
|
||||||
golang.org/x/text v0.3.7 // indirect
|
golang.org/x/text v0.3.7 // indirect
|
||||||
golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f // indirect
|
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
|
||||||
google.golang.org/appengine v1.6.7 // indirect
|
google.golang.org/appengine v1.6.7 // indirect
|
||||||
google.golang.org/genproto v0.0.0-20220902135211-223410557253 // indirect
|
google.golang.org/genproto v0.0.0-20220908141613-51c1cc9bc6d0 // indirect
|
||||||
google.golang.org/grpc v1.49.0 // indirect
|
google.golang.org/grpc v1.49.0 // indirect
|
||||||
google.golang.org/protobuf v1.28.1 // indirect
|
google.golang.org/protobuf v1.28.1 // indirect
|
||||||
)
|
)
|
||||||
|
|
37
go.sum
37
go.sum
|
@ -49,8 +49,9 @@ cloud.google.com/go/compute v1.9.0 h1:ED/FP4xv8GJw63v556/ASNc1CeeLUO2Bs8nzaHchkH
|
||||||
cloud.google.com/go/compute v1.9.0/go.mod h1:lWv1h/zUWTm/LozzfTJhBSkd6ShQq8la8VeeuOEGxfY=
|
cloud.google.com/go/compute v1.9.0/go.mod h1:lWv1h/zUWTm/LozzfTJhBSkd6ShQq8la8VeeuOEGxfY=
|
||||||
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
|
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
|
||||||
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
|
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
|
||||||
cloud.google.com/go/iam v0.3.0 h1:exkAomrVUuzx9kWFI1wm3KI0uoDeUFPB4kKGzx6x+Gc=
|
|
||||||
cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY=
|
cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY=
|
||||||
|
cloud.google.com/go/iam v0.4.0 h1:YBYU00SCDzZJdHqVc4I5d6lsklcYIjQZa1YmEz4jlSE=
|
||||||
|
cloud.google.com/go/iam v0.4.0/go.mod h1:cbaZxyScUhxl7ZAkNWiALgihfP75wS/fUsVNaa1r3vA=
|
||||||
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
|
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
|
||||||
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
|
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
|
||||||
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
|
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
|
||||||
|
@ -147,8 +148,8 @@ github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQ
|
||||||
github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||||
github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48=
|
github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48=
|
||||||
github.com/aws/aws-sdk-go v1.35.31/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
github.com/aws/aws-sdk-go v1.35.31/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
||||||
github.com/aws/aws-sdk-go v1.44.91 h1:SRWmuX7PTyhBdLuvSfM7KWrWISJsrRsUPcFDSFduRxY=
|
github.com/aws/aws-sdk-go v1.44.93 h1:hAgd9fuaptBatSft27/5eBMdcA8+cIMqo96/tZ6rKl8=
|
||||||
github.com/aws/aws-sdk-go v1.44.91/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
|
github.com/aws/aws-sdk-go v1.44.93/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
|
||||||
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
|
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
|
||||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||||
|
@ -431,8 +432,9 @@ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
||||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
|
github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
|
||||||
github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
|
|
||||||
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||||
|
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
||||||
|
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||||
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
|
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
|
||||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||||
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||||
|
@ -826,8 +828,8 @@ github.com/uber/jaeger-client-go v2.25.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMW
|
||||||
github.com/uber/jaeger-lib v2.4.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U=
|
github.com/uber/jaeger-lib v2.4.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U=
|
||||||
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
|
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
|
||||||
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||||
github.com/urfave/cli/v2 v2.14.0 h1:sFRL29Dm9JhXSMYb96raDeo/Q/JRyPXPs8u+4CkMlI8=
|
github.com/urfave/cli/v2 v2.14.1 h1:0Sx+C9404t2+DPuIJ3UpZFOEFhNG3wPxMj7uZHyZKFA=
|
||||||
github.com/urfave/cli/v2 v2.14.0/go.mod h1:1CNUng3PtjQMtRzJO4FMXBQvkGtuYRxxiR9xMa7jMwI=
|
github.com/urfave/cli/v2 v2.14.1/go.mod h1:1CNUng3PtjQMtRzJO4FMXBQvkGtuYRxxiR9xMa7jMwI=
|
||||||
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
|
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
|
||||||
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
||||||
github.com/valyala/fasthttp v1.30.0/go.mod h1:2rsYD01CKFrjjsvFxx75KlEUNpWNBY9JWD3K/7o2Cus=
|
github.com/valyala/fasthttp v1.30.0/go.mod h1:2rsYD01CKFrjjsvFxx75KlEUNpWNBY9JWD3K/7o2Cus=
|
||||||
|
@ -1005,8 +1007,8 @@ golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su
|
||||||
golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||||
golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||||
golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||||
golang.org/x/net v0.0.0-20220826154423-83b083e8dc8b h1:ZmngSVLe/wycRns9MKikG9OWIEjGcGAkacif7oYQaUY=
|
golang.org/x/net v0.0.0-20220907135653-1e95f45603a7 h1:1WGATo9HAhkWMbfyuVU0tEFP88OIkUvwaHFveQPvzCQ=
|
||||||
golang.org/x/net v0.0.0-20220826154423-83b083e8dc8b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
|
golang.org/x/net v0.0.0-20220907135653-1e95f45603a7/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
|
||||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
|
@ -1042,8 +1044,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ
|
||||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde h1:ejfdSekXMDxDLbRrJMwUk6KnSLZ2McaUCVcIKM+N6jc=
|
golang.org/x/sync v0.0.0-20220907140024-f12130a52804 h1:0SH2R3f1b1VmIMG7BXbEZCBUu2dKmHschSmjqGUrW8A=
|
||||||
golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20220907140024-f12130a52804/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
@ -1139,8 +1141,8 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||||
golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220829200755-d48e67d00261 h1:v6hYoSR9T5oet+pMXwUWkbiVqx/63mlHjefrHmxwfeY=
|
golang.org/x/sys v0.0.0-20220908150016-7ac13a9a928d h1:RoyzQTK76Rktm3p4xyZslc8T8I1tBz4UEjZCzeh57mM=
|
||||||
golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220908150016-7ac13a9a928d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
|
@ -1238,8 +1240,9 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T
|
||||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
|
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
|
||||||
golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f h1:uF6paiQQebLeSXkrTqHqz0MXhXXS1KgF41eUdBNvxK0=
|
|
||||||
golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
|
golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
|
||||||
|
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk=
|
||||||
|
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
|
||||||
gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
|
gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
|
||||||
gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
|
gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
|
||||||
gonum.org/v1/gonum v0.6.0/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU=
|
gonum.org/v1/gonum v0.6.0/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU=
|
||||||
|
@ -1286,8 +1289,8 @@ google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69
|
||||||
google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw=
|
google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw=
|
||||||
google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg=
|
google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg=
|
||||||
google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o=
|
google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o=
|
||||||
google.golang.org/api v0.94.0 h1:KtKM9ru3nzQioV1HLlUf1cR7vMYJIpgls5VhAYQXIwA=
|
google.golang.org/api v0.95.0 h1:d1c24AAS01DYqXreBeuVV7ewY/U8Mnhh47pwtsgVtYg=
|
||||||
google.golang.org/api v0.94.0/go.mod h1:eADj+UBuxkh5zlrSntJghuNeg8HwQ1w5lTKkuqaETEI=
|
google.golang.org/api v0.95.0/go.mod h1:eADj+UBuxkh5zlrSntJghuNeg8HwQ1w5lTKkuqaETEI=
|
||||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||||
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||||
|
@ -1379,8 +1382,8 @@ google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP
|
||||||
google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
|
google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
|
||||||
google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
|
google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
|
||||||
google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
|
google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
|
||||||
google.golang.org/genproto v0.0.0-20220902135211-223410557253 h1:vXJMM8Shg7TGaYxZsQ++A/FOSlbDmDtWhS/o+3w/hj4=
|
google.golang.org/genproto v0.0.0-20220908141613-51c1cc9bc6d0 h1:bMz0aY2wd9TwUp9M7QfjBWuQqaFD/ZaTtvDpPDCo2Ow=
|
||||||
google.golang.org/genproto v0.0.0-20220902135211-223410557253/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk=
|
google.golang.org/genproto v0.0.0-20220908141613-51c1cc9bc6d0/go.mod h1:rQWNQYp1kbHR3+n5cARSTCF5rlJOttUn8yIhRklGAWQ=
|
||||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||||
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
|
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
|
||||||
|
|
|
@ -240,7 +240,7 @@ var bbPool bytesutil.ByteBufferPool
|
||||||
// EnsureNonDecreasingSequence makes sure the first item in a is vMin, the last
|
// EnsureNonDecreasingSequence makes sure the first item in a is vMin, the last
|
||||||
// item in a is vMax and all the items in a are non-decreasing.
|
// item in a is vMax and all the items in a are non-decreasing.
|
||||||
//
|
//
|
||||||
// If this isn't the case the a is fixed accordingly.
|
// If this isn't the case then a is fixed accordingly.
|
||||||
func EnsureNonDecreasingSequence(a []int64, vMin, vMax int64) {
|
func EnsureNonDecreasingSequence(a []int64, vMin, vMax int64) {
|
||||||
if vMax < vMin {
|
if vMax < vMin {
|
||||||
logger.Panicf("BUG: vMax cannot be smaller than vMin; got %d vs %d", vMax, vMin)
|
logger.Panicf("BUG: vMax cannot be smaller than vMin; got %d vs %d", vMax, vMin)
|
||||||
|
|
|
@ -2,7 +2,6 @@ package storage
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
|
|
||||||
|
@ -273,6 +272,11 @@ func (b *Block) UnmarshalData() error {
|
||||||
if b.bh.PrecisionBits < 64 {
|
if b.bh.PrecisionBits < 64 {
|
||||||
// Recover timestamps order after lossy compression.
|
// Recover timestamps order after lossy compression.
|
||||||
encoding.EnsureNonDecreasingSequence(b.timestamps, b.bh.MinTimestamp, b.bh.MaxTimestamp)
|
encoding.EnsureNonDecreasingSequence(b.timestamps, b.bh.MinTimestamp, b.bh.MaxTimestamp)
|
||||||
|
} else {
|
||||||
|
// Ensure timestamps are in the range [MinTimestamp ... MaxTimestamps] and are ordered.
|
||||||
|
if err := checkTimestampsBounds(b.timestamps, b.bh.MinTimestamp, b.bh.MaxTimestamp); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
b.timestampsData = b.timestampsData[:0]
|
b.timestampsData = b.timestampsData[:0]
|
||||||
|
|
||||||
|
@ -291,6 +295,27 @@ func (b *Block) UnmarshalData() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func checkTimestampsBounds(timestamps []int64, minTimestamp, maxTimestamp int64) error {
|
||||||
|
if len(timestamps) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
tsPrev := timestamps[0]
|
||||||
|
if tsPrev < minTimestamp {
|
||||||
|
return fmt.Errorf("timestamp for the row 0 out of %d rows cannot be smaller than %d; got %d", len(timestamps), minTimestamp, tsPrev)
|
||||||
|
}
|
||||||
|
for i, ts := range timestamps[1:] {
|
||||||
|
if ts < tsPrev {
|
||||||
|
return fmt.Errorf("timestamp for the row %d cannot be smaller than the timestamp for the row %d (total %d rows); got %d vs %d",
|
||||||
|
i+1, i, len(timestamps), ts, tsPrev)
|
||||||
|
}
|
||||||
|
tsPrev = ts
|
||||||
|
}
|
||||||
|
if tsPrev > maxTimestamp {
|
||||||
|
return fmt.Errorf("timestamp for the row %d (the last one) cannot be bigger than %d; got %d", len(timestamps)-1, maxTimestamp, tsPrev)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// AppendRowsWithTimeRangeFilter filters samples from b according to tr and appends them to dst*.
|
// AppendRowsWithTimeRangeFilter filters samples from b according to tr and appends them to dst*.
|
||||||
//
|
//
|
||||||
// It is expected that UnmarshalData has been already called on b.
|
// It is expected that UnmarshalData has been already called on b.
|
||||||
|
@ -327,16 +352,9 @@ func (b *Block) filterTimestamps(tr TimeRange) ([]int64, []int64) {
|
||||||
// The marshaled value must be unmarshaled with UnmarshalPortable function.
|
// The marshaled value must be unmarshaled with UnmarshalPortable function.
|
||||||
func (b *Block) MarshalPortable(dst []byte) []byte {
|
func (b *Block) MarshalPortable(dst []byte) []byte {
|
||||||
b.MarshalData(0, 0)
|
b.MarshalData(0, 0)
|
||||||
|
dst = b.bh.marshalPortable(dst)
|
||||||
dst = encoding.MarshalVarInt64(dst, b.bh.MinTimestamp)
|
|
||||||
dst = encoding.MarshalVarInt64(dst, b.bh.FirstValue)
|
|
||||||
dst = encoding.MarshalVarUint64(dst, uint64(b.bh.RowsCount))
|
|
||||||
dst = encoding.MarshalVarInt64(dst, int64(b.bh.Scale))
|
|
||||||
dst = append(dst, byte(b.bh.TimestampsMarshalType))
|
|
||||||
dst = append(dst, byte(b.bh.ValuesMarshalType))
|
|
||||||
dst = encoding.MarshalBytes(dst, b.timestampsData)
|
dst = encoding.MarshalBytes(dst, b.timestampsData)
|
||||||
dst = encoding.MarshalBytes(dst, b.valuesData)
|
dst = encoding.MarshalBytes(dst, b.valuesData)
|
||||||
|
|
||||||
return dst
|
return dst
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -345,50 +363,10 @@ func (b *Block) MarshalPortable(dst []byte) []byte {
|
||||||
// It is assumed that the block has been marshaled with MarshalPortable.
|
// It is assumed that the block has been marshaled with MarshalPortable.
|
||||||
func (b *Block) UnmarshalPortable(src []byte) ([]byte, error) {
|
func (b *Block) UnmarshalPortable(src []byte) ([]byte, error) {
|
||||||
b.Reset()
|
b.Reset()
|
||||||
|
src, err := b.bh.unmarshalPortable(src)
|
||||||
// Read header
|
|
||||||
src, firstTimestamp, err := encoding.UnmarshalVarInt64(src)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return src, fmt.Errorf("cannot unmarshal firstTimestamp: %w", err)
|
return src, err
|
||||||
}
|
}
|
||||||
b.bh.MinTimestamp = firstTimestamp
|
|
||||||
src, firstValue, err := encoding.UnmarshalVarInt64(src)
|
|
||||||
if err != nil {
|
|
||||||
return src, fmt.Errorf("cannot unmarshal firstValue: %w", err)
|
|
||||||
}
|
|
||||||
b.bh.FirstValue = firstValue
|
|
||||||
src, rowsCount, err := encoding.UnmarshalVarUint64(src)
|
|
||||||
if err != nil {
|
|
||||||
return src, fmt.Errorf("cannot unmarshal rowsCount: %w", err)
|
|
||||||
}
|
|
||||||
if rowsCount > math.MaxUint32 {
|
|
||||||
return src, fmt.Errorf("got too big rowsCount=%d; it mustn't exceed %d", rowsCount, uint32(math.MaxUint32))
|
|
||||||
}
|
|
||||||
b.bh.RowsCount = uint32(rowsCount)
|
|
||||||
src, scale, err := encoding.UnmarshalVarInt64(src)
|
|
||||||
if err != nil {
|
|
||||||
return src, fmt.Errorf("cannot unmarshal scale: %w", err)
|
|
||||||
}
|
|
||||||
if scale < math.MinInt16 {
|
|
||||||
return src, fmt.Errorf("got too small scale=%d; it mustn't be smaller than %d", scale, math.MinInt16)
|
|
||||||
}
|
|
||||||
if scale > math.MaxInt16 {
|
|
||||||
return src, fmt.Errorf("got too big scale=%d; it mustn't exceeed %d", scale, math.MaxInt16)
|
|
||||||
}
|
|
||||||
b.bh.Scale = int16(scale)
|
|
||||||
if len(src) < 1 {
|
|
||||||
return src, fmt.Errorf("cannot unmarshal marshalType for timestamps from %d bytes; need at least %d bytes", len(src), 1)
|
|
||||||
}
|
|
||||||
b.bh.TimestampsMarshalType = encoding.MarshalType(src[0])
|
|
||||||
src = src[1:]
|
|
||||||
if len(src) < 1 {
|
|
||||||
return src, fmt.Errorf("cannot unmarshal marshalType for values from %d bytes; need at least %d bytes", len(src), 1)
|
|
||||||
}
|
|
||||||
b.bh.ValuesMarshalType = encoding.MarshalType(src[0])
|
|
||||||
src = src[1:]
|
|
||||||
b.bh.PrecisionBits = 64
|
|
||||||
|
|
||||||
// Read data
|
|
||||||
src, timestampsData, err := encoding.UnmarshalBytes(src)
|
src, timestampsData, err := encoding.UnmarshalBytes(src)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return src, fmt.Errorf("cannot read timestampsData: %w", err)
|
return src, fmt.Errorf("cannot read timestampsData: %w", err)
|
||||||
|
@ -400,7 +378,6 @@ func (b *Block) UnmarshalPortable(src []byte) ([]byte, error) {
|
||||||
}
|
}
|
||||||
b.valuesData = append(b.valuesData[:0], valuesData...)
|
b.valuesData = append(b.valuesData[:0], valuesData...)
|
||||||
|
|
||||||
// Validate
|
|
||||||
if err := b.bh.validate(); err != nil {
|
if err := b.bh.validate(); err != nil {
|
||||||
return src, fmt.Errorf("invalid blockHeader: %w", err)
|
return src, fmt.Errorf("invalid blockHeader: %w", err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,6 +2,7 @@ package storage
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"math"
|
||||||
"sort"
|
"sort"
|
||||||
|
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding"
|
||||||
|
@ -154,6 +155,69 @@ func (bh *blockHeader) Unmarshal(src []byte) ([]byte, error) {
|
||||||
return src, err
|
return src, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (bh *blockHeader) marshalPortable(dst []byte) []byte {
|
||||||
|
dst = encoding.MarshalVarInt64(dst, bh.MinTimestamp)
|
||||||
|
dst = encoding.MarshalVarInt64(dst, bh.MaxTimestamp)
|
||||||
|
dst = encoding.MarshalVarInt64(dst, bh.FirstValue)
|
||||||
|
dst = encoding.MarshalVarUint64(dst, uint64(bh.RowsCount))
|
||||||
|
dst = encoding.MarshalVarInt64(dst, int64(bh.Scale))
|
||||||
|
dst = append(dst, byte(bh.TimestampsMarshalType), byte(bh.ValuesMarshalType), byte(bh.PrecisionBits))
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bh *blockHeader) unmarshalPortable(src []byte) ([]byte, error) {
|
||||||
|
src, minTimestamp, err := encoding.UnmarshalVarInt64(src)
|
||||||
|
if err != nil {
|
||||||
|
return src, fmt.Errorf("cannot unmarshal firstTimestamp: %w", err)
|
||||||
|
}
|
||||||
|
bh.MinTimestamp = minTimestamp
|
||||||
|
src, maxTimestamp, err := encoding.UnmarshalVarInt64(src)
|
||||||
|
if err != nil {
|
||||||
|
return src, fmt.Errorf("cannot unmarshal firstTimestamp: %w", err)
|
||||||
|
}
|
||||||
|
bh.MaxTimestamp = maxTimestamp
|
||||||
|
src, firstValue, err := encoding.UnmarshalVarInt64(src)
|
||||||
|
if err != nil {
|
||||||
|
return src, fmt.Errorf("cannot unmarshal firstValue: %w", err)
|
||||||
|
}
|
||||||
|
bh.FirstValue = firstValue
|
||||||
|
src, rowsCount, err := encoding.UnmarshalVarUint64(src)
|
||||||
|
if err != nil {
|
||||||
|
return src, fmt.Errorf("cannot unmarshal rowsCount: %w", err)
|
||||||
|
}
|
||||||
|
if rowsCount > math.MaxUint32 {
|
||||||
|
return src, fmt.Errorf("got too big rowsCount=%d; it mustn't exceed %d", rowsCount, uint32(math.MaxUint32))
|
||||||
|
}
|
||||||
|
bh.RowsCount = uint32(rowsCount)
|
||||||
|
src, scale, err := encoding.UnmarshalVarInt64(src)
|
||||||
|
if err != nil {
|
||||||
|
return src, fmt.Errorf("cannot unmarshal scale: %w", err)
|
||||||
|
}
|
||||||
|
if scale < math.MinInt16 {
|
||||||
|
return src, fmt.Errorf("got too small scale=%d; it mustn't be smaller than %d", scale, math.MinInt16)
|
||||||
|
}
|
||||||
|
if scale > math.MaxInt16 {
|
||||||
|
return src, fmt.Errorf("got too big scale=%d; it mustn't exceeed %d", scale, math.MaxInt16)
|
||||||
|
}
|
||||||
|
bh.Scale = int16(scale)
|
||||||
|
if len(src) < 1 {
|
||||||
|
return src, fmt.Errorf("cannot unmarshal marshalType for timestamps from %d bytes; need at least %d bytes", len(src), 1)
|
||||||
|
}
|
||||||
|
bh.TimestampsMarshalType = encoding.MarshalType(src[0])
|
||||||
|
src = src[1:]
|
||||||
|
if len(src) < 1 {
|
||||||
|
return src, fmt.Errorf("cannot unmarshal marshalType for values from %d bytes; need at least %d bytes", len(src), 1)
|
||||||
|
}
|
||||||
|
bh.ValuesMarshalType = encoding.MarshalType(src[0])
|
||||||
|
src = src[1:]
|
||||||
|
if len(src) < 1 {
|
||||||
|
return src, fmt.Errorf("cannot unmarshal precisionBits for values from %d bytes; need at least %d bytes", len(src), 1)
|
||||||
|
}
|
||||||
|
bh.PrecisionBits = uint8(src[0])
|
||||||
|
src = src[1:]
|
||||||
|
return src, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (bh *blockHeader) validate() error {
|
func (bh *blockHeader) validate() error {
|
||||||
if bh.RowsCount == 0 {
|
if bh.RowsCount == 0 {
|
||||||
return fmt.Errorf("RowsCount in block header cannot be zero")
|
return fmt.Errorf("RowsCount in block header cannot be zero")
|
||||||
|
|
|
@ -1,10 +1,13 @@
|
||||||
package storage
|
package storage
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"reflect"
|
"reflect"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestBlockMarshalUnmarshalPortable(t *testing.T) {
|
func TestBlockMarshalUnmarshalPortable(t *testing.T) {
|
||||||
|
@ -15,15 +18,15 @@ func TestBlockMarshalUnmarshalPortable(t *testing.T) {
|
||||||
b.timestamps = getRandTimestamps(rowsCount)
|
b.timestamps = getRandTimestamps(rowsCount)
|
||||||
b.values = getRandValues(rowsCount)
|
b.values = getRandValues(rowsCount)
|
||||||
b.bh.Scale = int16(rand.Intn(30) - 15)
|
b.bh.Scale = int16(rand.Intn(30) - 15)
|
||||||
b.bh.PrecisionBits = 64
|
b.bh.PrecisionBits = uint8(64 - (i % 64))
|
||||||
testBlockMarshalUnmarshalPortable(t, &b)
|
testBlockMarshalUnmarshalPortable(t, &b)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func testBlockMarshalUnmarshalPortable(t *testing.T, b *Block) {
|
func testBlockMarshalUnmarshalPortable(t *testing.T, b *Block) {
|
||||||
var b1, b2 Block
|
var b1, b2 Block
|
||||||
b1.CopyFrom(b)
|
|
||||||
rowsCount := len(b.values)
|
rowsCount := len(b.values)
|
||||||
|
b1.CopyFrom(b)
|
||||||
data := b1.MarshalPortable(nil)
|
data := b1.MarshalPortable(nil)
|
||||||
if b1.bh.RowsCount != uint32(rowsCount) {
|
if b1.bh.RowsCount != uint32(rowsCount) {
|
||||||
t.Fatalf("unexpected number of rows marshaled; got %d; want %d", b1.bh.RowsCount, rowsCount)
|
t.Fatalf("unexpected number of rows marshaled; got %d; want %d", b1.bh.RowsCount, rowsCount)
|
||||||
|
@ -60,11 +63,14 @@ func testBlockMarshalUnmarshalPortable(t *testing.T, b *Block) {
|
||||||
compareBlocksPortable(t, &b2, b, &b1.bh)
|
compareBlocksPortable(t, &b2, b, &b1.bh)
|
||||||
}
|
}
|
||||||
|
|
||||||
func compareBlocksPortable(t *testing.T, b1, b2 *Block, bhExpected *blockHeader) {
|
func compareBlocksPortable(t *testing.T, b1, bExpected *Block, bhExpected *blockHeader) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
if b1.bh.MinTimestamp != bhExpected.MinTimestamp {
|
if b1.bh.MinTimestamp != bhExpected.MinTimestamp {
|
||||||
t.Fatalf("unexpected MinTimestamp; got %d; want %d", b1.bh.MinTimestamp, bhExpected.MinTimestamp)
|
t.Fatalf("unexpected MinTimestamp; got %d; want %d", b1.bh.MinTimestamp, bhExpected.MinTimestamp)
|
||||||
}
|
}
|
||||||
|
if b1.bh.MaxTimestamp != bhExpected.MaxTimestamp {
|
||||||
|
t.Fatalf("unexpected MinTimestamp; got %d; want %d", b1.bh.MaxTimestamp, bhExpected.MaxTimestamp)
|
||||||
|
}
|
||||||
if b1.bh.FirstValue != bhExpected.FirstValue {
|
if b1.bh.FirstValue != bhExpected.FirstValue {
|
||||||
t.Fatalf("unexpected FirstValue; got %d; want %d", b1.bh.FirstValue, bhExpected.FirstValue)
|
t.Fatalf("unexpected FirstValue; got %d; want %d", b1.bh.FirstValue, bhExpected.FirstValue)
|
||||||
}
|
}
|
||||||
|
@ -83,11 +89,15 @@ func compareBlocksPortable(t *testing.T, b1, b2 *Block, bhExpected *blockHeader)
|
||||||
if b1.bh.PrecisionBits != bhExpected.PrecisionBits {
|
if b1.bh.PrecisionBits != bhExpected.PrecisionBits {
|
||||||
t.Fatalf("unexpected PrecisionBits; got %d; want %d", b1.bh.PrecisionBits, bhExpected.PrecisionBits)
|
t.Fatalf("unexpected PrecisionBits; got %d; want %d", b1.bh.PrecisionBits, bhExpected.PrecisionBits)
|
||||||
}
|
}
|
||||||
if !reflect.DeepEqual(b1.values, b2.values) {
|
|
||||||
t.Fatalf("unexpected values; got %d; want %d", b1.values, b2.values)
|
timestampsExpected := getTimestampsForPrecisionBits(bExpected.timestamps, bhExpected.PrecisionBits)
|
||||||
|
valuesExpected := getValuesForPrecisionBits(bExpected.values, bhExpected.PrecisionBits)
|
||||||
|
|
||||||
|
if !reflect.DeepEqual(b1.values, valuesExpected) {
|
||||||
|
t.Fatalf("unexpected values for precisionBits=%d; got\n%d\nwant\n%d", b1.bh.PrecisionBits, b1.values, valuesExpected)
|
||||||
}
|
}
|
||||||
if !reflect.DeepEqual(b1.timestamps, b2.timestamps) {
|
if !reflect.DeepEqual(b1.timestamps, timestampsExpected) {
|
||||||
t.Fatalf("unexpected timestamps; got %d; want %d", b1.timestamps, b2.timestamps)
|
t.Fatalf("unexpected timestamps for precisionBits=%d; got\n%d\nwant\n%d", b1.bh.PrecisionBits, b1.timestamps, timestampsExpected)
|
||||||
}
|
}
|
||||||
if len(b1.values) != int(bhExpected.RowsCount) {
|
if len(b1.values) != int(bhExpected.RowsCount) {
|
||||||
t.Fatalf("unexpected number of values; got %d; want %d", len(b1.values), bhExpected.RowsCount)
|
t.Fatalf("unexpected number of values; got %d; want %d", len(b1.values), bhExpected.RowsCount)
|
||||||
|
@ -97,6 +107,27 @@ func compareBlocksPortable(t *testing.T, b1, b2 *Block, bhExpected *blockHeader)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func getTimestampsForPrecisionBits(timestamps []int64, precisionBits uint8) []int64 {
|
||||||
|
data, marshalType, firstTimestamp := encoding.MarshalTimestamps(nil, timestamps, precisionBits)
|
||||||
|
timestampsAdjusted, err := encoding.UnmarshalTimestamps(nil, data, marshalType, firstTimestamp, len(timestamps))
|
||||||
|
if err != nil {
|
||||||
|
panic(fmt.Errorf("BUG: cannot unmarshal timestamps with precisionBits %d: %s", precisionBits, err))
|
||||||
|
}
|
||||||
|
minTimestamp := timestamps[0]
|
||||||
|
maxTimestamp := timestamps[len(timestamps)-1]
|
||||||
|
encoding.EnsureNonDecreasingSequence(timestampsAdjusted, minTimestamp, maxTimestamp)
|
||||||
|
return timestampsAdjusted
|
||||||
|
}
|
||||||
|
|
||||||
|
func getValuesForPrecisionBits(values []int64, precisionBits uint8) []int64 {
|
||||||
|
data, marshalType, firstValue := encoding.MarshalValues(nil, values, precisionBits)
|
||||||
|
valuesAdjusted, err := encoding.UnmarshalValues(nil, data, marshalType, firstValue, len(values))
|
||||||
|
if err != nil {
|
||||||
|
panic(fmt.Errorf("BUG: cannot unmarshal values with precisionBits %d: %s", precisionBits, err))
|
||||||
|
}
|
||||||
|
return valuesAdjusted
|
||||||
|
}
|
||||||
|
|
||||||
func getRandValues(rowsCount int) []int64 {
|
func getRandValues(rowsCount int) []int64 {
|
||||||
a := make([]int64, rowsCount)
|
a := make([]int64, rowsCount)
|
||||||
for i := 0; i < rowsCount; i++ {
|
for i := 0; i < rowsCount; i++ {
|
||||||
|
|
12
vendor/cloud.google.com/go/.gitignore
generated
vendored
12
vendor/cloud.google.com/go/.gitignore
generated
vendored
|
@ -1,12 +0,0 @@
|
||||||
# Editors
|
|
||||||
.idea
|
|
||||||
.vscode
|
|
||||||
*.swp
|
|
||||||
.history
|
|
||||||
|
|
||||||
# Test files
|
|
||||||
*.test
|
|
||||||
coverage.txt
|
|
||||||
|
|
||||||
# Other
|
|
||||||
.DS_Store
|
|
111
vendor/cloud.google.com/go/.release-please-manifest-submodules.json
generated
vendored
111
vendor/cloud.google.com/go/.release-please-manifest-submodules.json
generated
vendored
|
@ -1,111 +0,0 @@
|
||||||
{
|
|
||||||
"accessapproval": "1.3.0",
|
|
||||||
"accesscontextmanager": "1.2.0",
|
|
||||||
"aiplatform": "1.17.0",
|
|
||||||
"analytics": "0.9.0",
|
|
||||||
"apigateway": "1.2.0",
|
|
||||||
"apigeeconnect": "1.2.0",
|
|
||||||
"apigeeregistry": "0.2.0",
|
|
||||||
"apikeys": "0.1.0",
|
|
||||||
"appengine": "1.3.0",
|
|
||||||
"area120": "0.4.0",
|
|
||||||
"artifactregistry": "1.4.0",
|
|
||||||
"asset": "1.4.0",
|
|
||||||
"assuredworkloads": "1.2.0",
|
|
||||||
"automl": "1.4.0",
|
|
||||||
"baremetalsolution": "0.2.0",
|
|
||||||
"batch": "0.1.0",
|
|
||||||
"beyondcorp": "0.1.0",
|
|
||||||
"billing": "1.2.0",
|
|
||||||
"binaryauthorization": "1.0.0",
|
|
||||||
"certificatemanager": "0.2.1",
|
|
||||||
"channel": "1.7.0",
|
|
||||||
"cloudbuild": "1.2.0",
|
|
||||||
"clouddms": "1.2.0",
|
|
||||||
"cloudtasks": "1.4.0",
|
|
||||||
"compute": "1.9.0",
|
|
||||||
"contactcenterinsights": "1.2.3",
|
|
||||||
"container": "1.3.1",
|
|
||||||
"containeranalysis": "0.4.0",
|
|
||||||
"datacatalog": "1.3.1",
|
|
||||||
"dataflow": "0.5.1",
|
|
||||||
"dataform": "0.2.0",
|
|
||||||
"datafusion": "1.3.0",
|
|
||||||
"datalabeling": "0.3.0",
|
|
||||||
"dataplex": "1.1.0",
|
|
||||||
"dataproc": "1.5.0",
|
|
||||||
"dataqna": "0.4.0",
|
|
||||||
"datastream": "1.0.0",
|
|
||||||
"deploy": "1.2.1",
|
|
||||||
"dialogflow": "1.12.1",
|
|
||||||
"dlp": "1.4.0",
|
|
||||||
"documentai": "1.5.0",
|
|
||||||
"domains": "0.5.0",
|
|
||||||
"essentialcontacts": "1.2.0",
|
|
||||||
"eventarc": "1.6.0",
|
|
||||||
"filestore": "1.2.0",
|
|
||||||
"functions": "1.5.0",
|
|
||||||
"gaming": "1.3.1",
|
|
||||||
"gkebackup": "0.1.0",
|
|
||||||
"gkeconnect": "0.3.0",
|
|
||||||
"gkehub": "0.8.0",
|
|
||||||
"gkemulticloud": "0.2.0",
|
|
||||||
"grafeas": "0.2.0",
|
|
||||||
"gsuiteaddons": "1.2.0",
|
|
||||||
"iam": "0.3.0",
|
|
||||||
"iap": "1.3.0",
|
|
||||||
"ids": "1.0.0",
|
|
||||||
"iot": "1.2.0",
|
|
||||||
"kms": "1.4.0",
|
|
||||||
"language": "1.3.0",
|
|
||||||
"lifesciences": "0.4.0",
|
|
||||||
"managedidentities": "1.2.0",
|
|
||||||
"mediatranslation": "0.3.0",
|
|
||||||
"memcache": "1.3.0",
|
|
||||||
"metastore": "1.3.0",
|
|
||||||
"monitoring": "1.6.0",
|
|
||||||
"networkconnectivity": "1.2.0",
|
|
||||||
"networkmanagement": "1.3.0",
|
|
||||||
"networksecurity": "0.3.1",
|
|
||||||
"notebooks": "1.0.0",
|
|
||||||
"optimization": "1.0.0",
|
|
||||||
"orchestration": "1.2.0",
|
|
||||||
"orgpolicy": "1.3.0",
|
|
||||||
"osconfig": "1.6.0",
|
|
||||||
"oslogin": "1.3.0",
|
|
||||||
"phishingprotection": "0.4.0",
|
|
||||||
"policytroubleshooter": "1.2.0",
|
|
||||||
"privatecatalog": "0.4.0",
|
|
||||||
"recaptchaenterprise/v2": "2.0.1",
|
|
||||||
"recommendationengine": "0.3.0",
|
|
||||||
"recommender": "1.4.0",
|
|
||||||
"redis": "1.6.0",
|
|
||||||
"resourcemanager": "1.2.0",
|
|
||||||
"resourcesettings": "1.2.0",
|
|
||||||
"retail": "1.5.0",
|
|
||||||
"run": "0.1.1",
|
|
||||||
"scheduler": "1.3.0",
|
|
||||||
"secretmanager": "1.5.0",
|
|
||||||
"security": "1.4.1",
|
|
||||||
"securitycenter": "1.10.0",
|
|
||||||
"servicecontrol": "1.3.0",
|
|
||||||
"servicedirectory": "1.3.0",
|
|
||||||
"servicemanagement": "1.3.1",
|
|
||||||
"serviceusage": "1.2.0",
|
|
||||||
"shell": "1.2.0",
|
|
||||||
"speech": "1.5.0",
|
|
||||||
"storagetransfer": "1.3.0",
|
|
||||||
"talent": "1.0.0",
|
|
||||||
"texttospeech": "1.3.0",
|
|
||||||
"tpu": "1.2.0",
|
|
||||||
"trace": "1.2.0",
|
|
||||||
"translate": "1.2.0",
|
|
||||||
"video": "1.7.0",
|
|
||||||
"videointelligence": "1.4.0",
|
|
||||||
"vision/v2": "2.1.0",
|
|
||||||
"vmmigration": "1.1.0",
|
|
||||||
"vpcaccess": "1.2.0",
|
|
||||||
"webrisk": "1.3.0",
|
|
||||||
"websecurityscanner": "1.2.0",
|
|
||||||
"workflows": "1.5.0"
|
|
||||||
}
|
|
3
vendor/cloud.google.com/go/.release-please-manifest.json
generated
vendored
3
vendor/cloud.google.com/go/.release-please-manifest.json
generated
vendored
|
@ -1,3 +0,0 @@
|
||||||
{
|
|
||||||
".": "0.104.0"
|
|
||||||
}
|
|
2424
vendor/cloud.google.com/go/CHANGES.md
generated
vendored
2424
vendor/cloud.google.com/go/CHANGES.md
generated
vendored
File diff suppressed because it is too large
Load diff
44
vendor/cloud.google.com/go/CODE_OF_CONDUCT.md
generated
vendored
44
vendor/cloud.google.com/go/CODE_OF_CONDUCT.md
generated
vendored
|
@ -1,44 +0,0 @@
|
||||||
# Contributor Code of Conduct
|
|
||||||
|
|
||||||
As contributors and maintainers of this project,
|
|
||||||
and in the interest of fostering an open and welcoming community,
|
|
||||||
we pledge to respect all people who contribute through reporting issues,
|
|
||||||
posting feature requests, updating documentation,
|
|
||||||
submitting pull requests or patches, and other activities.
|
|
||||||
|
|
||||||
We are committed to making participation in this project
|
|
||||||
a harassment-free experience for everyone,
|
|
||||||
regardless of level of experience, gender, gender identity and expression,
|
|
||||||
sexual orientation, disability, personal appearance,
|
|
||||||
body size, race, ethnicity, age, religion, or nationality.
|
|
||||||
|
|
||||||
Examples of unacceptable behavior by participants include:
|
|
||||||
|
|
||||||
* The use of sexualized language or imagery
|
|
||||||
* Personal attacks
|
|
||||||
* Trolling or insulting/derogatory comments
|
|
||||||
* Public or private harassment
|
|
||||||
* Publishing other's private information,
|
|
||||||
such as physical or electronic
|
|
||||||
addresses, without explicit permission
|
|
||||||
* Other unethical or unprofessional conduct.
|
|
||||||
|
|
||||||
Project maintainers have the right and responsibility to remove, edit, or reject
|
|
||||||
comments, commits, code, wiki edits, issues, and other contributions
|
|
||||||
that are not aligned to this Code of Conduct.
|
|
||||||
By adopting this Code of Conduct,
|
|
||||||
project maintainers commit themselves to fairly and consistently
|
|
||||||
applying these principles to every aspect of managing this project.
|
|
||||||
Project maintainers who do not follow or enforce the Code of Conduct
|
|
||||||
may be permanently removed from the project team.
|
|
||||||
|
|
||||||
This code of conduct applies both within project spaces and in public spaces
|
|
||||||
when an individual is representing the project or its community.
|
|
||||||
|
|
||||||
Instances of abusive, harassing, or otherwise unacceptable behavior
|
|
||||||
may be reported by opening an issue
|
|
||||||
or contacting one or more of the project maintainers.
|
|
||||||
|
|
||||||
This Code of Conduct is adapted from the [Contributor Covenant](http://contributor-covenant.org), version 1.2.0,
|
|
||||||
available at [http://contributor-covenant.org/version/1/2/0/](http://contributor-covenant.org/version/1/2/0/)
|
|
||||||
|
|
327
vendor/cloud.google.com/go/CONTRIBUTING.md
generated
vendored
327
vendor/cloud.google.com/go/CONTRIBUTING.md
generated
vendored
|
@ -1,327 +0,0 @@
|
||||||
# Contributing
|
|
||||||
|
|
||||||
1. [File an issue](https://github.com/googleapis/google-cloud-go/issues/new/choose).
|
|
||||||
The issue will be used to discuss the bug or feature and should be created
|
|
||||||
before sending a PR.
|
|
||||||
|
|
||||||
1. [Install Go](https://golang.org/dl/).
|
|
||||||
1. Ensure that your `GOBIN` directory (by default `$(go env GOPATH)/bin`)
|
|
||||||
is in your `PATH`.
|
|
||||||
1. Check it's working by running `go version`.
|
|
||||||
* If it doesn't work, check the install location, usually
|
|
||||||
`/usr/local/go`, is on your `PATH`.
|
|
||||||
|
|
||||||
1. Sign one of the
|
|
||||||
[contributor license agreements](#contributor-license-agreements) below.
|
|
||||||
|
|
||||||
1. Clone the repo:
|
|
||||||
`git clone https://github.com/googleapis/google-cloud-go`
|
|
||||||
|
|
||||||
1. Change into the checked out source:
|
|
||||||
`cd google-cloud-go`
|
|
||||||
|
|
||||||
1. Fork the repo.
|
|
||||||
|
|
||||||
1. Set your fork as a remote:
|
|
||||||
`git remote add fork git@github.com:GITHUB_USERNAME/google-cloud-go.git`
|
|
||||||
|
|
||||||
1. Make changes, commit to your fork.
|
|
||||||
|
|
||||||
Commit messages should follow the
|
|
||||||
[Conventional Commits Style](https://www.conventionalcommits.org). The scope
|
|
||||||
portion should always be filled with the name of the package affected by the
|
|
||||||
changes being made. For example:
|
|
||||||
```
|
|
||||||
feat(functions): add gophers codelab
|
|
||||||
```
|
|
||||||
|
|
||||||
1. Send a pull request with your changes.
|
|
||||||
|
|
||||||
To minimize friction, consider setting `Allow edits from maintainers` on the
|
|
||||||
PR, which will enable project committers and automation to update your PR.
|
|
||||||
|
|
||||||
1. A maintainer will review the pull request and make comments.
|
|
||||||
|
|
||||||
Prefer adding additional commits over amending and force-pushing since it can
|
|
||||||
be difficult to follow code reviews when the commit history changes.
|
|
||||||
|
|
||||||
Commits will be squashed when they're merged.
|
|
||||||
|
|
||||||
## Testing
|
|
||||||
|
|
||||||
We test code against two versions of Go, the minimum and maximum versions
|
|
||||||
supported by our clients. To see which versions these are checkout our
|
|
||||||
[README](README.md#supported-versions).
|
|
||||||
|
|
||||||
### Integration Tests
|
|
||||||
|
|
||||||
In addition to the unit tests, you may run the integration test suite. These
|
|
||||||
directions describe setting up your environment to run integration tests for
|
|
||||||
_all_ packages: note that many of these instructions may be redundant if you
|
|
||||||
intend only to run integration tests on a single package.
|
|
||||||
|
|
||||||
#### GCP Setup
|
|
||||||
|
|
||||||
To run the integrations tests, creation and configuration of two projects in
|
|
||||||
the Google Developers Console is required: one specifically for Firestore
|
|
||||||
integration tests, and another for all other integration tests. We'll refer to
|
|
||||||
these projects as "general project" and "Firestore project".
|
|
||||||
|
|
||||||
After creating each project, you must [create a service account](https://developers.google.com/identity/protocols/OAuth2ServiceAccount#creatinganaccount)
|
|
||||||
for each project. Ensure the project-level **Owner**
|
|
||||||
[IAM role](https://console.cloud.google.com/iam-admin/iam/project) role is added to
|
|
||||||
each service account. During the creation of the service account, you should
|
|
||||||
download the JSON credential file for use later.
|
|
||||||
|
|
||||||
Next, ensure the following APIs are enabled in the general project:
|
|
||||||
|
|
||||||
- BigQuery API
|
|
||||||
- BigQuery Data Transfer API
|
|
||||||
- Cloud Dataproc API
|
|
||||||
- Cloud Dataproc Control API Private
|
|
||||||
- Cloud Datastore API
|
|
||||||
- Cloud Firestore API
|
|
||||||
- Cloud Key Management Service (KMS) API
|
|
||||||
- Cloud Natural Language API
|
|
||||||
- Cloud OS Login API
|
|
||||||
- Cloud Pub/Sub API
|
|
||||||
- Cloud Resource Manager API
|
|
||||||
- Cloud Spanner API
|
|
||||||
- Cloud Speech API
|
|
||||||
- Cloud Translation API
|
|
||||||
- Cloud Video Intelligence API
|
|
||||||
- Cloud Vision API
|
|
||||||
- Compute Engine API
|
|
||||||
- Compute Engine Instance Group Manager API
|
|
||||||
- Container Registry API
|
|
||||||
- Firebase Rules API
|
|
||||||
- Google Cloud APIs
|
|
||||||
- Google Cloud Deployment Manager V2 API
|
|
||||||
- Google Cloud SQL
|
|
||||||
- Google Cloud Storage
|
|
||||||
- Google Cloud Storage JSON API
|
|
||||||
- Google Compute Engine Instance Group Updater API
|
|
||||||
- Google Compute Engine Instance Groups API
|
|
||||||
- Kubernetes Engine API
|
|
||||||
- Cloud Error Reporting API
|
|
||||||
- Pub/Sub Lite API
|
|
||||||
|
|
||||||
Next, create a Datastore database in the general project, and a Firestore
|
|
||||||
database in the Firestore project.
|
|
||||||
|
|
||||||
Finally, in the general project, create an API key for the translate API:
|
|
||||||
|
|
||||||
- Go to GCP Developer Console.
|
|
||||||
- Navigate to APIs & Services > Credentials.
|
|
||||||
- Click Create Credentials > API Key.
|
|
||||||
- Save this key for use in `GCLOUD_TESTS_API_KEY` as described below.
|
|
||||||
|
|
||||||
#### Local Setup
|
|
||||||
|
|
||||||
Once the two projects are created and configured, set the following environment
|
|
||||||
variables:
|
|
||||||
|
|
||||||
- `GCLOUD_TESTS_GOLANG_PROJECT_ID`: Developers Console project's ID (e.g.
|
|
||||||
bamboo-shift-455) for the general project.
|
|
||||||
- `GCLOUD_TESTS_GOLANG_KEY`: The path to the JSON key file of the general
|
|
||||||
project's service account.
|
|
||||||
- `GCLOUD_TESTS_GOLANG_FIRESTORE_PROJECT_ID`: Developers Console project's ID
|
|
||||||
(e.g. doorway-cliff-677) for the Firestore project.
|
|
||||||
- `GCLOUD_TESTS_GOLANG_FIRESTORE_KEY`: The path to the JSON key file of the
|
|
||||||
Firestore project's service account.
|
|
||||||
- `GCLOUD_TESTS_API_KEY`: API key for using the Translate API created above.
|
|
||||||
|
|
||||||
As part of the setup that follows, the following variables will be configured:
|
|
||||||
|
|
||||||
- `GCLOUD_TESTS_GOLANG_KEYRING`: The full name of the keyring for the tests,
|
|
||||||
in the form
|
|
||||||
"projects/P/locations/L/keyRings/R". The creation of this is described below.
|
|
||||||
- `GCLOUD_TESTS_BIGTABLE_KEYRING`: The full name of the keyring for the bigtable tests,
|
|
||||||
in the form
|
|
||||||
"projects/P/locations/L/keyRings/R". The creation of this is described below. Expected to be single region.
|
|
||||||
- `GCLOUD_TESTS_GOLANG_ZONE`: Compute Engine zone.
|
|
||||||
|
|
||||||
Install the [gcloud command-line tool][gcloudcli] to your machine and use it to
|
|
||||||
create some resources used in integration tests.
|
|
||||||
|
|
||||||
From the project's root directory:
|
|
||||||
|
|
||||||
``` sh
|
|
||||||
# Sets the default project in your env.
|
|
||||||
$ gcloud config set project $GCLOUD_TESTS_GOLANG_PROJECT_ID
|
|
||||||
|
|
||||||
# Authenticates the gcloud tool with your account.
|
|
||||||
$ gcloud auth login
|
|
||||||
|
|
||||||
# Create the indexes used in the datastore integration tests.
|
|
||||||
$ gcloud datastore indexes create datastore/testdata/index.yaml
|
|
||||||
|
|
||||||
# Creates a Google Cloud storage bucket with the same name as your test project,
|
|
||||||
# and with the Cloud Logging service account as owner, for the sink
|
|
||||||
# integration tests in logging.
|
|
||||||
$ gsutil mb gs://$GCLOUD_TESTS_GOLANG_PROJECT_ID
|
|
||||||
$ gsutil acl ch -g cloud-logs@google.com:O gs://$GCLOUD_TESTS_GOLANG_PROJECT_ID
|
|
||||||
|
|
||||||
# Creates a PubSub topic for integration tests of storage notifications.
|
|
||||||
$ gcloud beta pubsub topics create go-storage-notification-test
|
|
||||||
# Next, go to the Pub/Sub dashboard in GCP console. Authorize the user
|
|
||||||
# "service-<numeric project id>@gs-project-accounts.iam.gserviceaccount.com"
|
|
||||||
# as a publisher to that topic.
|
|
||||||
|
|
||||||
# Creates a Spanner instance for the spanner integration tests.
|
|
||||||
$ gcloud beta spanner instances create go-integration-test --config regional-us-central1 --nodes 10 --description 'Instance for go client test'
|
|
||||||
# NOTE: Spanner instances are priced by the node-hour, so you may want to
|
|
||||||
# delete the instance after testing with 'gcloud beta spanner instances delete'.
|
|
||||||
|
|
||||||
$ export MY_KEYRING=some-keyring-name
|
|
||||||
$ export MY_LOCATION=global
|
|
||||||
$ export MY_SINGLE_LOCATION=us-central1
|
|
||||||
# Creates a KMS keyring, in the same location as the default location for your
|
|
||||||
# project's buckets.
|
|
||||||
$ gcloud kms keyrings create $MY_KEYRING --location $MY_LOCATION
|
|
||||||
# Creates two keys in the keyring, named key1 and key2.
|
|
||||||
$ gcloud kms keys create key1 --keyring $MY_KEYRING --location $MY_LOCATION --purpose encryption
|
|
||||||
$ gcloud kms keys create key2 --keyring $MY_KEYRING --location $MY_LOCATION --purpose encryption
|
|
||||||
# Sets the GCLOUD_TESTS_GOLANG_KEYRING environment variable.
|
|
||||||
$ export GCLOUD_TESTS_GOLANG_KEYRING=projects/$GCLOUD_TESTS_GOLANG_PROJECT_ID/locations/$MY_LOCATION/keyRings/$MY_KEYRING
|
|
||||||
# Authorizes Google Cloud Storage to encrypt and decrypt using key1.
|
|
||||||
$ gsutil kms authorize -p $GCLOUD_TESTS_GOLANG_PROJECT_ID -k $GCLOUD_TESTS_GOLANG_KEYRING/cryptoKeys/key1
|
|
||||||
|
|
||||||
# Create KMS Key in one region for Bigtable
|
|
||||||
$ gcloud kms keyrings create $MY_KEYRING --location $MY_SINGLE_LOCATION
|
|
||||||
$ gcloud kms keys create key1 --keyring $MY_KEYRING --location $MY_SINGLE_LOCATION --purpose encryption
|
|
||||||
# Sets the GCLOUD_TESTS_BIGTABLE_KEYRING environment variable.
|
|
||||||
$ export GCLOUD_TESTS_BIGTABLE_KEYRING=projects/$GCLOUD_TESTS_GOLANG_PROJECT_ID/locations/$MY_SINGLE_LOCATION/keyRings/$MY_KEYRING
|
|
||||||
# Create a service agent, https://cloud.google.com/bigtable/docs/use-cmek#gcloud:
|
|
||||||
$ gcloud beta services identity create \
|
|
||||||
--service=bigtableadmin.googleapis.com \
|
|
||||||
--project $GCLOUD_TESTS_GOLANG_PROJECT_ID
|
|
||||||
# Note the service agent email for the agent created.
|
|
||||||
$ export SERVICE_AGENT_EMAIL=<service agent email, from last step>
|
|
||||||
|
|
||||||
# Authorizes Google Cloud Bigtable to encrypt and decrypt using key1
|
|
||||||
$ gcloud kms keys add-iam-policy-binding key1 \
|
|
||||||
--keyring $MY_KEYRING \
|
|
||||||
--location $MY_SINGLE_LOCATION \
|
|
||||||
--role roles/cloudkms.cryptoKeyEncrypterDecrypter \
|
|
||||||
--member "serviceAccount:$SERVICE_AGENT_EMAIL" \
|
|
||||||
--project $GCLOUD_TESTS_GOLANG_PROJECT_ID
|
|
||||||
```
|
|
||||||
|
|
||||||
It may be useful to add exports to your shell initialization for future use.
|
|
||||||
For instance, in `.zshrc`:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
#### START GO SDK Test Variables
|
|
||||||
# Developers Console project's ID (e.g. bamboo-shift-455) for the general project.
|
|
||||||
export GCLOUD_TESTS_GOLANG_PROJECT_ID=your-project
|
|
||||||
|
|
||||||
# The path to the JSON key file of the general project's service account.
|
|
||||||
export GCLOUD_TESTS_GOLANG_KEY=~/directory/your-project-abcd1234.json
|
|
||||||
|
|
||||||
# Developers Console project's ID (e.g. doorway-cliff-677) for the Firestore project.
|
|
||||||
export GCLOUD_TESTS_GOLANG_FIRESTORE_PROJECT_ID=your-firestore-project
|
|
||||||
|
|
||||||
# The path to the JSON key file of the Firestore project's service account.
|
|
||||||
export GCLOUD_TESTS_GOLANG_FIRESTORE_KEY=~/directory/your-firestore-project-abcd1234.json
|
|
||||||
|
|
||||||
# The full name of the keyring for the tests, in the form "projects/P/locations/L/keyRings/R".
|
|
||||||
# The creation of this is described below.
|
|
||||||
export MY_KEYRING=my-golang-sdk-test
|
|
||||||
export MY_LOCATION=global
|
|
||||||
export GCLOUD_TESTS_GOLANG_KEYRING=projects/$GCLOUD_TESTS_GOLANG_PROJECT_ID/locations/$MY_LOCATION/keyRings/$MY_KEYRING
|
|
||||||
|
|
||||||
# API key for using the Translate API.
|
|
||||||
export GCLOUD_TESTS_API_KEY=abcdefghijk123456789
|
|
||||||
|
|
||||||
# Compute Engine zone. (https://cloud.google.com/compute/docs/regions-zones)
|
|
||||||
export GCLOUD_TESTS_GOLANG_ZONE=your-chosen-region
|
|
||||||
#### END GO SDK Test Variables
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Running
|
|
||||||
|
|
||||||
Once you've done the necessary setup, you can run the integration tests by
|
|
||||||
running:
|
|
||||||
|
|
||||||
``` sh
|
|
||||||
$ go test -v ./...
|
|
||||||
```
|
|
||||||
|
|
||||||
Note that the above command will not run the tests in other modules. To run
|
|
||||||
tests on other modules, first navigate to the appropriate
|
|
||||||
subdirectory. For instance, to run only the tests for datastore:
|
|
||||||
``` sh
|
|
||||||
$ cd datastore
|
|
||||||
$ go test -v ./...
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Replay
|
|
||||||
|
|
||||||
Some packages can record the RPCs during integration tests to a file for
|
|
||||||
subsequent replay. To record, pass the `-record` flag to `go test`. The
|
|
||||||
recording will be saved to the _package_`.replay` file. To replay integration
|
|
||||||
tests from a saved recording, the replay file must be present, the `-short`
|
|
||||||
flag must be passed to `go test`, and the `GCLOUD_TESTS_GOLANG_ENABLE_REPLAY`
|
|
||||||
environment variable must have a non-empty value.
|
|
||||||
|
|
||||||
## Contributor License Agreements
|
|
||||||
|
|
||||||
Before we can accept your pull requests you'll need to sign a Contributor
|
|
||||||
License Agreement (CLA):
|
|
||||||
|
|
||||||
- **If you are an individual writing original source code** and **you own the
|
|
||||||
intellectual property**, then you'll need to sign an [individual CLA][indvcla].
|
|
||||||
- **If you work for a company that wants to allow you to contribute your
|
|
||||||
work**, then you'll need to sign a [corporate CLA][corpcla].
|
|
||||||
|
|
||||||
You can sign these electronically (just scroll to the bottom). After that,
|
|
||||||
we'll be able to accept your pull requests.
|
|
||||||
|
|
||||||
## Contributor Code of Conduct
|
|
||||||
|
|
||||||
As contributors and maintainers of this project,
|
|
||||||
and in the interest of fostering an open and welcoming community,
|
|
||||||
we pledge to respect all people who contribute through reporting issues,
|
|
||||||
posting feature requests, updating documentation,
|
|
||||||
submitting pull requests or patches, and other activities.
|
|
||||||
|
|
||||||
We are committed to making participation in this project
|
|
||||||
a harassment-free experience for everyone,
|
|
||||||
regardless of level of experience, gender, gender identity and expression,
|
|
||||||
sexual orientation, disability, personal appearance,
|
|
||||||
body size, race, ethnicity, age, religion, or nationality.
|
|
||||||
|
|
||||||
Examples of unacceptable behavior by participants include:
|
|
||||||
|
|
||||||
* The use of sexualized language or imagery
|
|
||||||
* Personal attacks
|
|
||||||
* Trolling or insulting/derogatory comments
|
|
||||||
* Public or private harassment
|
|
||||||
* Publishing other's private information,
|
|
||||||
such as physical or electronic
|
|
||||||
addresses, without explicit permission
|
|
||||||
* Other unethical or unprofessional conduct.
|
|
||||||
|
|
||||||
Project maintainers have the right and responsibility to remove, edit, or reject
|
|
||||||
comments, commits, code, wiki edits, issues, and other contributions
|
|
||||||
that are not aligned to this Code of Conduct.
|
|
||||||
By adopting this Code of Conduct,
|
|
||||||
project maintainers commit themselves to fairly and consistently
|
|
||||||
applying these principles to every aspect of managing this project.
|
|
||||||
Project maintainers who do not follow or enforce the Code of Conduct
|
|
||||||
may be permanently removed from the project team.
|
|
||||||
|
|
||||||
This code of conduct applies both within project spaces and in public spaces
|
|
||||||
when an individual is representing the project or its community.
|
|
||||||
|
|
||||||
Instances of abusive, harassing, or otherwise unacceptable behavior
|
|
||||||
may be reported by opening an issue
|
|
||||||
or contacting one or more of the project maintainers.
|
|
||||||
|
|
||||||
This Code of Conduct is adapted from the [Contributor Covenant](https://contributor-covenant.org), version 1.2.0,
|
|
||||||
available at [https://contributor-covenant.org/version/1/2/0/](https://contributor-covenant.org/version/1/2/0/)
|
|
||||||
|
|
||||||
[gcloudcli]: https://developers.google.com/cloud/sdk/gcloud/
|
|
||||||
[indvcla]: https://developers.google.com/open-source/cla/individual
|
|
||||||
[corpcla]: https://developers.google.com/open-source/cla/corporate
|
|
139
vendor/cloud.google.com/go/README.md
generated
vendored
139
vendor/cloud.google.com/go/README.md
generated
vendored
|
@ -1,139 +0,0 @@
|
||||||
# Google Cloud Client Libraries for Go
|
|
||||||
|
|
||||||
[![Go Reference](https://pkg.go.dev/badge/cloud.google.com/go.svg)](https://pkg.go.dev/cloud.google.com/go)
|
|
||||||
|
|
||||||
Go packages for [Google Cloud Platform](https://cloud.google.com) services.
|
|
||||||
|
|
||||||
``` go
|
|
||||||
import "cloud.google.com/go"
|
|
||||||
```
|
|
||||||
|
|
||||||
To install the packages on your system, *do not clone the repo*. Instead:
|
|
||||||
|
|
||||||
1. Change to your project directory:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
cd /my/cloud/project
|
|
||||||
```
|
|
||||||
1. Get the package you want to use. Some products have their own module, so it's
|
|
||||||
best to `go get` the package(s) you want to use:
|
|
||||||
|
|
||||||
```
|
|
||||||
$ go get cloud.google.com/go/firestore # Replace with the package you want to use.
|
|
||||||
```
|
|
||||||
|
|
||||||
**NOTE:** Some of these packages are under development, and may occasionally
|
|
||||||
make backwards-incompatible changes.
|
|
||||||
|
|
||||||
## Supported APIs
|
|
||||||
|
|
||||||
For an updated list of all of our released APIs please see our
|
|
||||||
[reference docs](https://cloud.google.com/go/docs/reference).
|
|
||||||
|
|
||||||
## [Go Versions Supported](#supported-versions)
|
|
||||||
|
|
||||||
Our libraries are compatible with at least the three most recent, major Go
|
|
||||||
releases. They are currently compatible with:
|
|
||||||
|
|
||||||
- Go 1.19
|
|
||||||
- Go 1.18
|
|
||||||
- Go 1.17
|
|
||||||
- Go 1.16
|
|
||||||
- Go 1.15
|
|
||||||
|
|
||||||
## Authorization
|
|
||||||
|
|
||||||
By default, each API will use [Google Application Default Credentials](https://developers.google.com/identity/protocols/application-default-credentials)
|
|
||||||
for authorization credentials used in calling the API endpoints. This will allow your
|
|
||||||
application to run in many environments without requiring explicit configuration.
|
|
||||||
|
|
||||||
[snip]:# (auth)
|
|
||||||
```go
|
|
||||||
client, err := storage.NewClient(ctx)
|
|
||||||
```
|
|
||||||
|
|
||||||
To authorize using a
|
|
||||||
[JSON key file](https://cloud.google.com/iam/docs/managing-service-account-keys),
|
|
||||||
pass
|
|
||||||
[`option.WithCredentialsFile`](https://pkg.go.dev/google.golang.org/api/option#WithCredentialsFile)
|
|
||||||
to the `NewClient` function of the desired package. For example:
|
|
||||||
|
|
||||||
[snip]:# (auth-JSON)
|
|
||||||
```go
|
|
||||||
client, err := storage.NewClient(ctx, option.WithCredentialsFile("path/to/keyfile.json"))
|
|
||||||
```
|
|
||||||
|
|
||||||
You can exert more control over authorization by using the
|
|
||||||
[`golang.org/x/oauth2`](https://pkg.go.dev/golang.org/x/oauth2) package to
|
|
||||||
create an `oauth2.TokenSource`. Then pass
|
|
||||||
[`option.WithTokenSource`](https://pkg.go.dev/google.golang.org/api/option#WithTokenSource)
|
|
||||||
to the `NewClient` function:
|
|
||||||
[snip]:# (auth-ts)
|
|
||||||
```go
|
|
||||||
tokenSource := ...
|
|
||||||
client, err := storage.NewClient(ctx, option.WithTokenSource(tokenSource))
|
|
||||||
```
|
|
||||||
|
|
||||||
## Contributing
|
|
||||||
|
|
||||||
Contributions are welcome. Please, see the
|
|
||||||
[CONTRIBUTING](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md)
|
|
||||||
document for details.
|
|
||||||
|
|
||||||
Please note that this project is released with a Contributor Code of Conduct.
|
|
||||||
By participating in this project you agree to abide by its terms.
|
|
||||||
See [Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md#contributor-code-of-conduct)
|
|
||||||
for more information.
|
|
||||||
|
|
||||||
[cloud-asset]: https://cloud.google.com/security-command-center/docs/how-to-asset-inventory
|
|
||||||
[cloud-automl]: https://cloud.google.com/automl
|
|
||||||
[cloud-build]: https://cloud.google.com/cloud-build/
|
|
||||||
[cloud-bigquery]: https://cloud.google.com/bigquery/
|
|
||||||
[cloud-bigtable]: https://cloud.google.com/bigtable/
|
|
||||||
[cloud-compute]: https://cloud.google.com/compute
|
|
||||||
[cloud-container]: https://cloud.google.com/containers/
|
|
||||||
[cloud-containeranalysis]: https://cloud.google.com/container-registry/docs/container-analysis
|
|
||||||
[cloud-dataproc]: https://cloud.google.com/dataproc/
|
|
||||||
[cloud-datastore]: https://cloud.google.com/datastore/
|
|
||||||
[cloud-dialogflow]: https://cloud.google.com/dialogflow-enterprise/
|
|
||||||
[cloud-debugger]: https://cloud.google.com/debugger/
|
|
||||||
[cloud-dlp]: https://cloud.google.com/dlp/
|
|
||||||
[cloud-errors]: https://cloud.google.com/error-reporting/
|
|
||||||
[cloud-firestore]: https://cloud.google.com/firestore/
|
|
||||||
[cloud-iam]: https://cloud.google.com/iam/
|
|
||||||
[cloud-iot]: https://cloud.google.com/iot-core/
|
|
||||||
[cloud-irm]: https://cloud.google.com/incident-response/docs/concepts
|
|
||||||
[cloud-kms]: https://cloud.google.com/kms/
|
|
||||||
[cloud-pubsub]: https://cloud.google.com/pubsub/
|
|
||||||
[cloud-pubsublite]: https://cloud.google.com/pubsub/lite
|
|
||||||
[cloud-storage]: https://cloud.google.com/storage/
|
|
||||||
[cloud-language]: https://cloud.google.com/natural-language
|
|
||||||
[cloud-logging]: https://cloud.google.com/logging/
|
|
||||||
[cloud-natural-language]: https://cloud.google.com/natural-language/
|
|
||||||
[cloud-memorystore]: https://cloud.google.com/memorystore/
|
|
||||||
[cloud-monitoring]: https://cloud.google.com/monitoring/
|
|
||||||
[cloud-oslogin]: https://cloud.google.com/compute/docs/oslogin/rest
|
|
||||||
[cloud-phishingprotection]: https://cloud.google.com/phishing-protection/
|
|
||||||
[cloud-securitycenter]: https://cloud.google.com/security-command-center/
|
|
||||||
[cloud-scheduler]: https://cloud.google.com/scheduler
|
|
||||||
[cloud-spanner]: https://cloud.google.com/spanner/
|
|
||||||
[cloud-speech]: https://cloud.google.com/speech
|
|
||||||
[cloud-talent]: https://cloud.google.com/solutions/talent-solution/
|
|
||||||
[cloud-tasks]: https://cloud.google.com/tasks/
|
|
||||||
[cloud-texttospeech]: https://cloud.google.com/texttospeech/
|
|
||||||
[cloud-talent]: https://cloud.google.com/solutions/talent-solution/
|
|
||||||
[cloud-trace]: https://cloud.google.com/trace/
|
|
||||||
[cloud-translate]: https://cloud.google.com/translate
|
|
||||||
[cloud-recaptcha]: https://cloud.google.com/recaptcha-enterprise/
|
|
||||||
[cloud-recommender]: https://cloud.google.com/recommendations/
|
|
||||||
[cloud-video]: https://cloud.google.com/video-intelligence/
|
|
||||||
[cloud-vision]: https://cloud.google.com/vision
|
|
||||||
[cloud-webrisk]: https://cloud.google.com/web-risk/
|
|
||||||
|
|
||||||
## Links
|
|
||||||
|
|
||||||
- [Go on Google Cloud](https://cloud.google.com/go/home)
|
|
||||||
- [Getting started with Go on Google Cloud](https://cloud.google.com/go/getting-started)
|
|
||||||
- [App Engine Quickstart](https://cloud.google.com/appengine/docs/standard/go/quickstart)
|
|
||||||
- [Cloud Functions Quickstart](https://cloud.google.com/functions/docs/quickstart-go)
|
|
||||||
- [Cloud Run Quickstart](https://cloud.google.com/run/docs/quickstarts/build-and-deploy#go)
|
|
141
vendor/cloud.google.com/go/RELEASING.md
generated
vendored
141
vendor/cloud.google.com/go/RELEASING.md
generated
vendored
|
@ -1,141 +0,0 @@
|
||||||
# Releasing
|
|
||||||
|
|
||||||
## Determine which module to release
|
|
||||||
|
|
||||||
The Go client libraries have several modules. Each module does not strictly
|
|
||||||
correspond to a single library - they correspond to trees of directories. If a
|
|
||||||
file needs to be released, you must release the closest ancestor module.
|
|
||||||
|
|
||||||
To see all modules:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
$ cat `find . -name go.mod` | grep module
|
|
||||||
module cloud.google.com/go/pubsub
|
|
||||||
module cloud.google.com/go/spanner
|
|
||||||
module cloud.google.com/go
|
|
||||||
module cloud.google.com/go/bigtable
|
|
||||||
module cloud.google.com/go/bigquery
|
|
||||||
module cloud.google.com/go/storage
|
|
||||||
module cloud.google.com/go/pubsublite
|
|
||||||
module cloud.google.com/go/firestore
|
|
||||||
module cloud.google.com/go/logging
|
|
||||||
module cloud.google.com/go/internal/gapicgen
|
|
||||||
module cloud.google.com/go/internal/godocfx
|
|
||||||
module cloud.google.com/go/internal/examples/fake
|
|
||||||
module cloud.google.com/go/internal/examples/mock
|
|
||||||
module cloud.google.com/go/datastore
|
|
||||||
```
|
|
||||||
|
|
||||||
The `cloud.google.com/go` is the repository root module. Each other module is
|
|
||||||
a submodule.
|
|
||||||
|
|
||||||
So, if you need to release a change in `bigtable/bttest/inmem.go`, the closest
|
|
||||||
ancestor module is `cloud.google.com/go/bigtable` - so you should release a new
|
|
||||||
version of the `cloud.google.com/go/bigtable` submodule.
|
|
||||||
|
|
||||||
If you need to release a change in `asset/apiv1/asset_client.go`, the closest
|
|
||||||
ancestor module is `cloud.google.com/go` - so you should release a new version
|
|
||||||
of the `cloud.google.com/go` repository root module. Note: releasing
|
|
||||||
`cloud.google.com/go` has no impact on any of the submodules, and vice-versa.
|
|
||||||
They are released entirely independently.
|
|
||||||
|
|
||||||
## Test failures
|
|
||||||
|
|
||||||
If there are any test failures in the Kokoro build, releases are blocked until
|
|
||||||
the failures have been resolved.
|
|
||||||
|
|
||||||
## How to release
|
|
||||||
|
|
||||||
### Automated Releases (`cloud.google.com/go` and submodules)
|
|
||||||
|
|
||||||
We now use [release-please](https://github.com/googleapis/release-please) to
|
|
||||||
perform automated releases for `cloud.google.com/go` and all submodules.
|
|
||||||
|
|
||||||
1. If there are changes that have not yet been released, a
|
|
||||||
[pull request](https://github.com/googleapis/google-cloud-go/pull/2971) will
|
|
||||||
be automatically opened by release-please
|
|
||||||
with a title like "chore: release X.Y.Z" (for the root module) or
|
|
||||||
"chore: release datastore X.Y.Z" (for the datastore submodule), where X.Y.Z
|
|
||||||
is the next version to be released. Find the desired pull request
|
|
||||||
[here](https://github.com/googleapis/google-cloud-go/pulls)
|
|
||||||
1. Check for failures in the
|
|
||||||
[continuous Kokoro build](http://go/google-cloud-go-continuous). If there are
|
|
||||||
any failures in the most recent build, address them before proceeding with
|
|
||||||
the release. (This applies even if the failures are in a different submodule
|
|
||||||
from the one being released.)
|
|
||||||
1. Review the release notes. These are automatically generated from the titles
|
|
||||||
of any merged commits since the previous release. If you would like to edit
|
|
||||||
them, this can be done by updating the changes in the release PR.
|
|
||||||
1. To cut a release, approve and merge the pull request. Doing so will
|
|
||||||
update the `CHANGES.md`, tag the merged commit with the appropriate version,
|
|
||||||
and draft a GitHub release which will copy the notes from `CHANGES.md`.
|
|
||||||
|
|
||||||
### Manual Release (`cloud.google.com/go`)
|
|
||||||
|
|
||||||
If for whatever reason the automated release process is not working as expected,
|
|
||||||
here is how to manually cut a release of `cloud.google.com/go`.
|
|
||||||
|
|
||||||
1. Check for failures in the
|
|
||||||
[continuous Kokoro build](http://go/google-cloud-go-continuous). If there are
|
|
||||||
any failures in the most recent build, address them before proceeding with
|
|
||||||
the release.
|
|
||||||
1. Navigate to `google-cloud-go/` and switch to main.
|
|
||||||
1. `git pull`
|
|
||||||
1. Run `git tag -l | grep -v beta | grep -v alpha` to see all existing releases.
|
|
||||||
The current latest tag `$CV` is the largest tag. It should look something
|
|
||||||
like `vX.Y.Z` (note: ignore all `LIB/vX.Y.Z` tags - these are tags for a
|
|
||||||
specific library, not the module root). We'll call the current version `$CV`
|
|
||||||
and the new version `$NV`.
|
|
||||||
1. On main, run `git log $CV...` to list all the changes since the last
|
|
||||||
release. NOTE: You must manually visually parse out changes to submodules [1]
|
|
||||||
(the `git log` is going to show you things in submodules, which are not going
|
|
||||||
to be part of your release).
|
|
||||||
1. Edit `CHANGES.md` to include a summary of the changes.
|
|
||||||
1. In `internal/version/version.go`, update `const Repo` to today's date with
|
|
||||||
the format `YYYYMMDD`.
|
|
||||||
1. In `internal/version` run `go generate`.
|
|
||||||
1. Commit the changes, ignoring the generated `.go-r` file. Push to your fork,
|
|
||||||
and create a PR titled `chore: release $NV`.
|
|
||||||
1. Wait for the PR to be reviewed and merged. Once it's merged, and without
|
|
||||||
merging any other PRs in the meantime:
|
|
||||||
a. Switch to main.
|
|
||||||
b. `git pull`
|
|
||||||
c. Tag the repo with the next version: `git tag $NV`.
|
|
||||||
d. Push the tag to origin:
|
|
||||||
`git push origin $NV`
|
|
||||||
1. Update [the releases page](https://github.com/googleapis/google-cloud-go/releases)
|
|
||||||
with the new release, copying the contents of `CHANGES.md`.
|
|
||||||
|
|
||||||
### Manual Releases (submodules)
|
|
||||||
|
|
||||||
If for whatever reason the automated release process is not working as expected,
|
|
||||||
here is how to manually cut a release of a submodule.
|
|
||||||
|
|
||||||
(these instructions assume we're releasing `cloud.google.com/go/datastore` - adjust accordingly)
|
|
||||||
|
|
||||||
1. Check for failures in the
|
|
||||||
[continuous Kokoro build](http://go/google-cloud-go-continuous). If there are
|
|
||||||
any failures in the most recent build, address them before proceeding with
|
|
||||||
the release. (This applies even if the failures are in a different submodule
|
|
||||||
from the one being released.)
|
|
||||||
1. Navigate to `google-cloud-go/` and switch to main.
|
|
||||||
1. `git pull`
|
|
||||||
1. Run `git tag -l | grep datastore | grep -v beta | grep -v alpha` to see all
|
|
||||||
existing releases. The current latest tag `$CV` is the largest tag. It
|
|
||||||
should look something like `datastore/vX.Y.Z`. We'll call the current version
|
|
||||||
`$CV` and the new version `$NV`.
|
|
||||||
1. On main, run `git log $CV.. -- datastore/` to list all the changes to the
|
|
||||||
submodule directory since the last release.
|
|
||||||
1. Edit `datastore/CHANGES.md` to include a summary of the changes.
|
|
||||||
1. In `internal/version` run `go generate`.
|
|
||||||
1. Commit the changes, ignoring the generated `.go-r` file. Push to your fork,
|
|
||||||
and create a PR titled `chore(datastore): release $NV`.
|
|
||||||
1. Wait for the PR to be reviewed and merged. Once it's merged, and without
|
|
||||||
merging any other PRs in the meantime:
|
|
||||||
a. Switch to main.
|
|
||||||
b. `git pull`
|
|
||||||
c. Tag the repo with the next version: `git tag $NV`.
|
|
||||||
d. Push the tag to origin:
|
|
||||||
`git push origin $NV`
|
|
||||||
1. Update [the releases page](https://github.com/googleapis/google-cloud-go/releases)
|
|
||||||
with the new release, copying the contents of `datastore/CHANGES.md`.
|
|
7
vendor/cloud.google.com/go/SECURITY.md
generated
vendored
7
vendor/cloud.google.com/go/SECURITY.md
generated
vendored
|
@ -1,7 +0,0 @@
|
||||||
# Security Policy
|
|
||||||
|
|
||||||
To report a security issue, please use [g.co/vulnz](https://g.co/vulnz).
|
|
||||||
|
|
||||||
The Google Security Team will respond within 5 working days of your report on g.co/vulnz.
|
|
||||||
|
|
||||||
We use g.co/vulnz for our intake, and do coordination and disclosure here using GitHub Security Advisory to privately discuss and fix the issue.
|
|
228
vendor/cloud.google.com/go/doc.go
generated
vendored
228
vendor/cloud.google.com/go/doc.go
generated
vendored
|
@ -1,228 +0,0 @@
|
||||||
// Copyright 2014 Google LLC
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
/*
|
|
||||||
Package cloud is the root of the packages used to access Google Cloud
|
|
||||||
Services. See https://godoc.org/cloud.google.com/go for a full list
|
|
||||||
of sub-packages.
|
|
||||||
|
|
||||||
# Client Options
|
|
||||||
|
|
||||||
All clients in sub-packages are configurable via client options. These options are
|
|
||||||
described here: https://godoc.org/google.golang.org/api/option.
|
|
||||||
|
|
||||||
# Authentication and Authorization
|
|
||||||
|
|
||||||
All the clients in sub-packages support authentication via Google Application Default
|
|
||||||
Credentials (see https://cloud.google.com/docs/authentication/production), or
|
|
||||||
by providing a JSON key file for a Service Account. See examples below.
|
|
||||||
|
|
||||||
Google Application Default Credentials (ADC) is the recommended way to authorize
|
|
||||||
and authenticate clients. For information on how to create and obtain
|
|
||||||
Application Default Credentials, see
|
|
||||||
https://cloud.google.com/docs/authentication/production. Here is an example
|
|
||||||
of a client using ADC to authenticate:
|
|
||||||
|
|
||||||
client, err := secretmanager.NewClient(context.Background())
|
|
||||||
if err != nil {
|
|
||||||
// TODO: handle error.
|
|
||||||
}
|
|
||||||
_ = client // Use the client.
|
|
||||||
|
|
||||||
You can use a file with credentials to authenticate and authorize, such as a JSON
|
|
||||||
key file associated with a Google service account. Service Account keys can be
|
|
||||||
created and downloaded from
|
|
||||||
https://console.cloud.google.com/iam-admin/serviceaccounts. This example uses
|
|
||||||
the Secret Manger client, but the same steps apply to the other client libraries
|
|
||||||
underneath this package. Example:
|
|
||||||
|
|
||||||
client, err := secretmanager.NewClient(context.Background(),
|
|
||||||
option.WithCredentialsFile("/path/to/service-account-key.json"))
|
|
||||||
if err != nil {
|
|
||||||
// TODO: handle error.
|
|
||||||
}
|
|
||||||
_ = client // Use the client.
|
|
||||||
|
|
||||||
In some cases (for instance, you don't want to store secrets on disk), you can
|
|
||||||
create credentials from in-memory JSON and use the WithCredentials option.
|
|
||||||
The google package in this example is at golang.org/x/oauth2/google.
|
|
||||||
This example uses the Secret Manager client, but the same steps apply to
|
|
||||||
the other client libraries underneath this package. Note that scopes can be
|
|
||||||
found at https://developers.google.com/identity/protocols/oauth2/scopes, and
|
|
||||||
are also provided in all auto-generated libraries: for example,
|
|
||||||
cloud.google.com/go/secretmanager/apiv1 provides DefaultAuthScopes. Example:
|
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
creds, err := google.CredentialsFromJSON(ctx, []byte("JSON creds"), secretmanager.DefaultAuthScopes()...)
|
|
||||||
if err != nil {
|
|
||||||
// TODO: handle error.
|
|
||||||
}
|
|
||||||
client, err := secretmanager.NewClient(ctx, option.WithCredentials(creds))
|
|
||||||
if err != nil {
|
|
||||||
// TODO: handle error.
|
|
||||||
}
|
|
||||||
_ = client // Use the client.
|
|
||||||
|
|
||||||
# Timeouts and Cancellation
|
|
||||||
|
|
||||||
By default, non-streaming methods, like Create or Get, will have a default deadline applied to the
|
|
||||||
context provided at call time, unless a context deadline is already set. Streaming
|
|
||||||
methods have no default deadline and will run indefinitely. To set timeouts or
|
|
||||||
arrange for cancellation, use contexts. Transient
|
|
||||||
errors will be retried when correctness allows.
|
|
||||||
|
|
||||||
Here is an example of how to set a timeout for an RPC, use context.WithTimeout:
|
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
// Do not set a timeout on the context passed to NewClient: dialing happens
|
|
||||||
// asynchronously, and the context is used to refresh credentials in the
|
|
||||||
// background.
|
|
||||||
client, err := secretmanager.NewClient(ctx)
|
|
||||||
if err != nil {
|
|
||||||
// TODO: handle error.
|
|
||||||
}
|
|
||||||
// Time out if it takes more than 10 seconds to create a dataset.
|
|
||||||
tctx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
|
||||||
defer cancel() // Always call cancel.
|
|
||||||
|
|
||||||
req := &secretmanagerpb.DeleteSecretRequest{Name: "projects/project-id/secrets/name"}
|
|
||||||
if err := client.DeleteSecret(tctx, req); err != nil {
|
|
||||||
// TODO: handle error.
|
|
||||||
}
|
|
||||||
|
|
||||||
Here is an example of how to arrange for an RPC to be canceled, use context.WithCancel:
|
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
// Do not cancel the context passed to NewClient: dialing happens asynchronously,
|
|
||||||
// and the context is used to refresh credentials in the background.
|
|
||||||
client, err := secretmanager.NewClient(ctx)
|
|
||||||
if err != nil {
|
|
||||||
// TODO: handle error.
|
|
||||||
}
|
|
||||||
cctx, cancel := context.WithCancel(ctx)
|
|
||||||
defer cancel() // Always call cancel.
|
|
||||||
|
|
||||||
// TODO: Make the cancel function available to whatever might want to cancel the
|
|
||||||
// call--perhaps a GUI button.
|
|
||||||
req := &secretmanagerpb.DeleteSecretRequest{Name: "projects/proj/secrets/name"}
|
|
||||||
if err := client.DeleteSecret(cctx, req); err != nil {
|
|
||||||
// TODO: handle error.
|
|
||||||
}
|
|
||||||
|
|
||||||
To opt out of default deadlines, set the temporary environment variable
|
|
||||||
GOOGLE_API_GO_EXPERIMENTAL_DISABLE_DEFAULT_DEADLINE to "true" prior to client
|
|
||||||
creation. This affects all Google Cloud Go client libraries. This opt-out
|
|
||||||
mechanism will be removed in a future release. File an issue at
|
|
||||||
https://github.com/googleapis/google-cloud-go if the default deadlines
|
|
||||||
cannot work for you.
|
|
||||||
|
|
||||||
Do not attempt to control the initial connection (dialing) of a service by setting a
|
|
||||||
timeout on the context passed to NewClient. Dialing is non-blocking, so timeouts
|
|
||||||
would be ineffective and would only interfere with credential refreshing, which uses
|
|
||||||
the same context.
|
|
||||||
|
|
||||||
# Connection Pooling
|
|
||||||
|
|
||||||
Connection pooling differs in clients based on their transport. Cloud
|
|
||||||
clients either rely on HTTP or gRPC transports to communicate
|
|
||||||
with Google Cloud.
|
|
||||||
|
|
||||||
Cloud clients that use HTTP (bigquery, compute, storage, and translate) rely on the
|
|
||||||
underlying HTTP transport to cache connections for later re-use. These are cached to
|
|
||||||
the default http.MaxIdleConns and http.MaxIdleConnsPerHost settings in
|
|
||||||
http.DefaultTransport.
|
|
||||||
|
|
||||||
For gRPC clients (all others in this repo), connection pooling is configurable. Users
|
|
||||||
of cloud client libraries may specify option.WithGRPCConnectionPool(n) as a client
|
|
||||||
option to NewClient calls. This configures the underlying gRPC connections to be
|
|
||||||
pooled and addressed in a round robin fashion.
|
|
||||||
|
|
||||||
# Using the Libraries with Docker
|
|
||||||
|
|
||||||
Minimal docker images like Alpine lack CA certificates. This causes RPCs to appear to
|
|
||||||
hang, because gRPC retries indefinitely. See https://github.com/googleapis/google-cloud-go/issues/928
|
|
||||||
for more information.
|
|
||||||
|
|
||||||
# Debugging
|
|
||||||
|
|
||||||
To see gRPC logs, set the environment variable GRPC_GO_LOG_SEVERITY_LEVEL. See
|
|
||||||
https://godoc.org/google.golang.org/grpc/grpclog for more information.
|
|
||||||
|
|
||||||
For HTTP logging, set the GODEBUG environment variable to "http2debug=1" or "http2debug=2".
|
|
||||||
|
|
||||||
# Inspecting errors
|
|
||||||
|
|
||||||
Most of the errors returned by the generated clients are wrapped in an
|
|
||||||
`apierror.APIError` (https://pkg.go.dev/github.com/googleapis/gax-go/v2/apierror)
|
|
||||||
and can be further unwrapped into a `grpc.Status` or `googleapi.Error` depending
|
|
||||||
on the transport used to make the call (gRPC or REST). Converting your errors to
|
|
||||||
these types can be a useful way to get more information about what went wrong
|
|
||||||
while debugging.
|
|
||||||
|
|
||||||
`apierror.APIError` gives access to specific details in the
|
|
||||||
error. The transport-specific errors can still be unwrapped using the
|
|
||||||
`apierror.APIError`.
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
var ae *apierror.APIError
|
|
||||||
if errors.As(err, &ae) {
|
|
||||||
log.Println(ae.Reason())
|
|
||||||
log.Println(ae.Details().Help.GetLinks())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
If the gRPC transport was used, the `grpc.Status` can still be parsed using the
|
|
||||||
`status.FromError` function.
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
if s, ok := status.FromError(err); ok {
|
|
||||||
log.Println(s.Message())
|
|
||||||
for _, d := range s.Proto().Details {
|
|
||||||
log.Println(d)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
If the REST transport was used, the `googleapi.Error` can be parsed in a similar
|
|
||||||
way.
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
var gerr *googleapi.Error
|
|
||||||
if errors.As(err, &gerr) {
|
|
||||||
log.Println(gerr.Message)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# Client Stability
|
|
||||||
|
|
||||||
Clients in this repository are considered alpha or beta unless otherwise
|
|
||||||
marked as stable in the README.md. Semver is not used to communicate stability
|
|
||||||
of clients.
|
|
||||||
|
|
||||||
Alpha and beta clients may change or go away without notice.
|
|
||||||
|
|
||||||
Clients marked stable will maintain compatibility with future versions for as
|
|
||||||
long as we can reasonably sustain. Incompatible changes might be made in some
|
|
||||||
situations, including:
|
|
||||||
|
|
||||||
- Security bugs may prompt backwards-incompatible changes.
|
|
||||||
|
|
||||||
- Situations in which components are no longer feasible to maintain without
|
|
||||||
making breaking changes, including removal.
|
|
||||||
|
|
||||||
- Parts of the client surface may be outright unstable and subject to change.
|
|
||||||
These parts of the surface will be labeled with the note, "It is EXPERIMENTAL
|
|
||||||
and subject to change or removal without notice."
|
|
||||||
*/
|
|
||||||
package cloud // import "cloud.google.com/go"
|
|
7
vendor/cloud.google.com/go/iam/CHANGES.md
generated
vendored
7
vendor/cloud.google.com/go/iam/CHANGES.md
generated
vendored
|
@ -1,5 +1,12 @@
|
||||||
# Changes
|
# Changes
|
||||||
|
|
||||||
|
## [0.4.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.3.0...iam/v0.4.0) (2022-09-06)
|
||||||
|
|
||||||
|
|
||||||
|
### Features
|
||||||
|
|
||||||
|
* **iam:** start generating apiv2 ([#6605](https://github.com/googleapis/google-cloud-go/issues/6605)) ([a6004e7](https://github.com/googleapis/google-cloud-go/commit/a6004e762f782869cd85688937475744f7b17e50))
|
||||||
|
|
||||||
## [0.3.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.2.0...iam/v0.3.0) (2022-02-23)
|
## [0.3.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.2.0...iam/v0.3.0) (2022-02-23)
|
||||||
|
|
||||||
|
|
||||||
|
|
23
vendor/cloud.google.com/go/iam/go_mod_tidy_hack.go
generated
vendored
23
vendor/cloud.google.com/go/iam/go_mod_tidy_hack.go
generated
vendored
|
@ -1,23 +0,0 @@
|
||||||
// Copyright 2022 Google LLC
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// This file, and the cloud.google.com/go import, won't actually become part of
|
|
||||||
// the resultant binary.
|
|
||||||
//go:build modhack
|
|
||||||
// +build modhack
|
|
||||||
|
|
||||||
package iam
|
|
||||||
|
|
||||||
// Necessary for safely adding multi-module repo. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository
|
|
||||||
import _ "cloud.google.com/go"
|
|
334
vendor/cloud.google.com/go/release-please-config-yoshi-submodules.json
generated
vendored
334
vendor/cloud.google.com/go/release-please-config-yoshi-submodules.json
generated
vendored
|
@ -1,334 +0,0 @@
|
||||||
{
|
|
||||||
"release-type": "go-yoshi",
|
|
||||||
"include-component-in-tag": true,
|
|
||||||
"tag-separator": "/",
|
|
||||||
"packages": {
|
|
||||||
"accessapproval": {
|
|
||||||
"component": "accessapproval"
|
|
||||||
},
|
|
||||||
"accesscontextmanager": {
|
|
||||||
"component": "accesscontextmanager"
|
|
||||||
},
|
|
||||||
"aiplatform": {
|
|
||||||
"component": "aiplatform"
|
|
||||||
},
|
|
||||||
"analytics": {
|
|
||||||
"component": "analytics"
|
|
||||||
},
|
|
||||||
"apigateway": {
|
|
||||||
"component": "apigateway"
|
|
||||||
},
|
|
||||||
"apigeeconnect": {
|
|
||||||
"component": "apigeeconnect"
|
|
||||||
},
|
|
||||||
"apigeeregistry": {
|
|
||||||
"component": "apigeeregistry"
|
|
||||||
},
|
|
||||||
"apikeys": {
|
|
||||||
"component": "apikeys"
|
|
||||||
},
|
|
||||||
"appengine": {
|
|
||||||
"component": "appengine"
|
|
||||||
},
|
|
||||||
"area120": {
|
|
||||||
"component": "area120"
|
|
||||||
},
|
|
||||||
"artifactregistry": {
|
|
||||||
"component": "artifactregistry"
|
|
||||||
},
|
|
||||||
"asset": {
|
|
||||||
"component": "asset"
|
|
||||||
},
|
|
||||||
"assuredworkloads": {
|
|
||||||
"component": "assuredworkloads"
|
|
||||||
},
|
|
||||||
"automl": {
|
|
||||||
"component": "automl"
|
|
||||||
},
|
|
||||||
"baremetalsolution": {
|
|
||||||
"component": "baremetalsolution"
|
|
||||||
},
|
|
||||||
"batch": {
|
|
||||||
"component": "batch"
|
|
||||||
},
|
|
||||||
"beyondcorp": {
|
|
||||||
"component": "beyondcorp"
|
|
||||||
},
|
|
||||||
"billing": {
|
|
||||||
"component": "billing"
|
|
||||||
},
|
|
||||||
"binaryauthorization": {
|
|
||||||
"component": "binaryauthorization"
|
|
||||||
},
|
|
||||||
"certificatemanager": {
|
|
||||||
"component": "certificatemanager"
|
|
||||||
},
|
|
||||||
"channel": {
|
|
||||||
"component": "channel"
|
|
||||||
},
|
|
||||||
"cloudbuild": {
|
|
||||||
"component": "cloudbuild"
|
|
||||||
},
|
|
||||||
"clouddms": {
|
|
||||||
"component": "clouddms"
|
|
||||||
},
|
|
||||||
"cloudtasks": {
|
|
||||||
"component": "cloudtasks"
|
|
||||||
},
|
|
||||||
"compute": {
|
|
||||||
"component": "compute"
|
|
||||||
},
|
|
||||||
"contactcenterinsights": {
|
|
||||||
"component": "contactcenterinsights"
|
|
||||||
},
|
|
||||||
"container": {
|
|
||||||
"component": "container"
|
|
||||||
},
|
|
||||||
"containeranalysis": {
|
|
||||||
"component": "containeranalysis"
|
|
||||||
},
|
|
||||||
"datacatalog": {
|
|
||||||
"component": "datacatalog"
|
|
||||||
},
|
|
||||||
"dataflow": {
|
|
||||||
"component": "dataflow"
|
|
||||||
},
|
|
||||||
"dataform": {
|
|
||||||
"component": "dataform"
|
|
||||||
},
|
|
||||||
"datafusion": {
|
|
||||||
"component": "datafusion"
|
|
||||||
},
|
|
||||||
"datalabeling": {
|
|
||||||
"component": "datalabeling"
|
|
||||||
},
|
|
||||||
"dataplex": {
|
|
||||||
"component": "dataplex"
|
|
||||||
},
|
|
||||||
"dataproc": {
|
|
||||||
"component": "dataproc"
|
|
||||||
},
|
|
||||||
"dataqna": {
|
|
||||||
"component": "dataqna"
|
|
||||||
},
|
|
||||||
"datastream": {
|
|
||||||
"component": "datastream"
|
|
||||||
},
|
|
||||||
"deploy": {
|
|
||||||
"component": "deploy"
|
|
||||||
},
|
|
||||||
"dialogflow": {
|
|
||||||
"component": "dialogflow"
|
|
||||||
},
|
|
||||||
"dlp": {
|
|
||||||
"component": "dlp"
|
|
||||||
},
|
|
||||||
"documentai": {
|
|
||||||
"component": "documentai"
|
|
||||||
},
|
|
||||||
"domains": {
|
|
||||||
"component": "domains"
|
|
||||||
},
|
|
||||||
"essentialcontacts": {
|
|
||||||
"component": "essentialcontacts"
|
|
||||||
},
|
|
||||||
"eventarc": {
|
|
||||||
"component": "eventarc"
|
|
||||||
},
|
|
||||||
"filestore": {
|
|
||||||
"component": "filestore"
|
|
||||||
},
|
|
||||||
"functions": {
|
|
||||||
"component": "functions"
|
|
||||||
},
|
|
||||||
"gaming": {
|
|
||||||
"component": "gaming"
|
|
||||||
},
|
|
||||||
"gkebackup": {
|
|
||||||
"component": "gkebackup"
|
|
||||||
},
|
|
||||||
"gkeconnect": {
|
|
||||||
"component": "gkeconnect"
|
|
||||||
},
|
|
||||||
"gkehub": {
|
|
||||||
"component": "gkehub"
|
|
||||||
},
|
|
||||||
"gkemulticloud": {
|
|
||||||
"component": "gkemulticloud"
|
|
||||||
},
|
|
||||||
"grafeas": {
|
|
||||||
"component": "grafeas"
|
|
||||||
},
|
|
||||||
"gsuiteaddons": {
|
|
||||||
"component": "gsuiteaddons"
|
|
||||||
},
|
|
||||||
"iam": {
|
|
||||||
"component": "iam"
|
|
||||||
},
|
|
||||||
"iap": {
|
|
||||||
"component": "iap"
|
|
||||||
},
|
|
||||||
"ids": {
|
|
||||||
"component": "ids"
|
|
||||||
},
|
|
||||||
"iot": {
|
|
||||||
"component": "iot"
|
|
||||||
},
|
|
||||||
"kms": {
|
|
||||||
"component": "kms"
|
|
||||||
},
|
|
||||||
"language": {
|
|
||||||
"component": "language"
|
|
||||||
},
|
|
||||||
"lifesciences": {
|
|
||||||
"component": "lifesciences"
|
|
||||||
},
|
|
||||||
"managedidentities": {
|
|
||||||
"component": "managedidentities"
|
|
||||||
},
|
|
||||||
"mediatranslation": {
|
|
||||||
"component": "mediatranslation"
|
|
||||||
},
|
|
||||||
"memcache": {
|
|
||||||
"component": "memcache"
|
|
||||||
},
|
|
||||||
"metastore": {
|
|
||||||
"component": "metastore"
|
|
||||||
},
|
|
||||||
"monitoring": {
|
|
||||||
"component": "monitoring"
|
|
||||||
},
|
|
||||||
"networkconnectivity": {
|
|
||||||
"component": "networkconnectivity"
|
|
||||||
},
|
|
||||||
"networkmanagement": {
|
|
||||||
"component": "networkmanagement"
|
|
||||||
},
|
|
||||||
"networksecurity": {
|
|
||||||
"component": "networksecurity"
|
|
||||||
},
|
|
||||||
"notebooks": {
|
|
||||||
"component": "notebooks"
|
|
||||||
},
|
|
||||||
"optimization": {
|
|
||||||
"component": "optimization"
|
|
||||||
},
|
|
||||||
"orchestration": {
|
|
||||||
"component": "orchestration"
|
|
||||||
},
|
|
||||||
"orgpolicy": {
|
|
||||||
"component": "orgpolicy"
|
|
||||||
},
|
|
||||||
"osconfig": {
|
|
||||||
"component": "osconfig"
|
|
||||||
},
|
|
||||||
"oslogin": {
|
|
||||||
"component": "oslogin"
|
|
||||||
},
|
|
||||||
"phishingprotection": {
|
|
||||||
"component": "phishingprotection"
|
|
||||||
},
|
|
||||||
"policytroubleshooter": {
|
|
||||||
"component": "policytroubleshooter"
|
|
||||||
},
|
|
||||||
"privatecatalog": {
|
|
||||||
"component": "privatecatalog"
|
|
||||||
},
|
|
||||||
"recaptchaenterprise/v2": {
|
|
||||||
"component": "recaptchaenterprise"
|
|
||||||
},
|
|
||||||
"recommendationengine": {
|
|
||||||
"component": "recommendationengine"
|
|
||||||
},
|
|
||||||
"recommender": {
|
|
||||||
"component": "recommender"
|
|
||||||
},
|
|
||||||
"redis": {
|
|
||||||
"component": "redis"
|
|
||||||
},
|
|
||||||
"resourcemanager": {
|
|
||||||
"component": "resourcemanager"
|
|
||||||
},
|
|
||||||
"resourcesettings": {
|
|
||||||
"component": "resourcesettings"
|
|
||||||
},
|
|
||||||
"retail": {
|
|
||||||
"component": "retail"
|
|
||||||
},
|
|
||||||
"run": {
|
|
||||||
"component": "run"
|
|
||||||
},
|
|
||||||
"scheduler": {
|
|
||||||
"component": "scheduler"
|
|
||||||
},
|
|
||||||
"secretmanager": {
|
|
||||||
"component": "secretmanager"
|
|
||||||
},
|
|
||||||
"security": {
|
|
||||||
"component": "security"
|
|
||||||
},
|
|
||||||
"securitycenter": {
|
|
||||||
"component": "securitycenter"
|
|
||||||
},
|
|
||||||
"servicecontrol": {
|
|
||||||
"component": "servicecontrol"
|
|
||||||
},
|
|
||||||
"servicedirectory": {
|
|
||||||
"component": "servicedirectory"
|
|
||||||
},
|
|
||||||
"servicemanagement": {
|
|
||||||
"component": "servicemanagement"
|
|
||||||
},
|
|
||||||
"serviceusage": {
|
|
||||||
"component": "serviceusage"
|
|
||||||
},
|
|
||||||
"shell": {
|
|
||||||
"component": "shell"
|
|
||||||
},
|
|
||||||
"speech": {
|
|
||||||
"component": "speech"
|
|
||||||
},
|
|
||||||
"storagetransfer": {
|
|
||||||
"component": "storagetransfer"
|
|
||||||
},
|
|
||||||
"talent": {
|
|
||||||
"component": "talent"
|
|
||||||
},
|
|
||||||
"texttospeech": {
|
|
||||||
"component": "texttospeech"
|
|
||||||
},
|
|
||||||
"tpu": {
|
|
||||||
"component": "tpu"
|
|
||||||
},
|
|
||||||
"trace": {
|
|
||||||
"component": "trace"
|
|
||||||
},
|
|
||||||
"translate": {
|
|
||||||
"component": "translate"
|
|
||||||
},
|
|
||||||
"video": {
|
|
||||||
"component": "video"
|
|
||||||
},
|
|
||||||
"videointelligence": {
|
|
||||||
"component": "videointelligence"
|
|
||||||
},
|
|
||||||
"vision/v2": {
|
|
||||||
"component": "vision"
|
|
||||||
},
|
|
||||||
"vmmigration": {
|
|
||||||
"component": "vmmigration"
|
|
||||||
},
|
|
||||||
"vpcaccess": {
|
|
||||||
"component": "vpcaccess"
|
|
||||||
},
|
|
||||||
"webrisk": {
|
|
||||||
"component": "webrisk"
|
|
||||||
},
|
|
||||||
"websecurityscanner": {
|
|
||||||
"component": "websecurityscanner"
|
|
||||||
},
|
|
||||||
"workflows": {
|
|
||||||
"component": "workflows"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
10
vendor/cloud.google.com/go/release-please-config.json
generated
vendored
10
vendor/cloud.google.com/go/release-please-config.json
generated
vendored
|
@ -1,10 +0,0 @@
|
||||||
{
|
|
||||||
"release-type": "go-yoshi",
|
|
||||||
"separate-pull-requests": true,
|
|
||||||
"include-component-in-tag": false,
|
|
||||||
"packages": {
|
|
||||||
".": {
|
|
||||||
"component": "main"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
236
vendor/cloud.google.com/go/testing.md
generated
vendored
236
vendor/cloud.google.com/go/testing.md
generated
vendored
|
@ -1,236 +0,0 @@
|
||||||
# Testing Code that depends on Go Client Libraries
|
|
||||||
|
|
||||||
The Go client libraries generated as a part of `cloud.google.com/go` all take
|
|
||||||
the approach of returning concrete types instead of interfaces. That way, new
|
|
||||||
fields and methods can be added to the libraries without breaking users. This
|
|
||||||
document will go over some patterns that can be used to test code that depends
|
|
||||||
on the Go client libraries.
|
|
||||||
|
|
||||||
## Testing gRPC services using fakes
|
|
||||||
|
|
||||||
*Note*: You can see the full
|
|
||||||
[example code using a fake here](https://github.com/googleapis/google-cloud-go/tree/main/internal/examples/fake).
|
|
||||||
|
|
||||||
The clients found in `cloud.google.com/go` are gRPC based, with a couple of
|
|
||||||
notable exceptions being the [`storage`](https://pkg.go.dev/cloud.google.com/go/storage)
|
|
||||||
and [`bigquery`](https://pkg.go.dev/cloud.google.com/go/bigquery) clients.
|
|
||||||
Interactions with gRPC services can be faked by serving up your own in-memory
|
|
||||||
server within your test. One benefit of using this approach is that you don’t
|
|
||||||
need to define an interface in your runtime code; you can keep using
|
|
||||||
concrete struct types. You instead define a fake server in your test code. For
|
|
||||||
example, take a look at the following function:
|
|
||||||
|
|
||||||
```go
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
|
|
||||||
translate "cloud.google.com/go/translate/apiv3"
|
|
||||||
"github.com/googleapis/gax-go/v2"
|
|
||||||
translatepb "google.golang.org/genproto/googleapis/cloud/translate/v3"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TranslateTextWithConcreteClient(client *translate.TranslationClient, text string, targetLang string) (string, error) {
|
|
||||||
ctx := context.Background()
|
|
||||||
log.Printf("Translating %q to %q", text, targetLang)
|
|
||||||
req := &translatepb.TranslateTextRequest{
|
|
||||||
Parent: fmt.Sprintf("projects/%s/locations/global", os.Getenv("GOOGLE_CLOUD_PROJECT")),
|
|
||||||
TargetLanguageCode: "en-US",
|
|
||||||
Contents: []string{text},
|
|
||||||
}
|
|
||||||
resp, err := client.TranslateText(ctx, req)
|
|
||||||
if err != nil {
|
|
||||||
return "", fmt.Errorf("unable to translate text: %v", err)
|
|
||||||
}
|
|
||||||
translations := resp.GetTranslations()
|
|
||||||
if len(translations) != 1 {
|
|
||||||
return "", fmt.Errorf("expected only one result, got %d", len(translations))
|
|
||||||
}
|
|
||||||
return translations[0].TranslatedText, nil
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Here is an example of what a fake server implementation would look like for
|
|
||||||
faking the interactions above:
|
|
||||||
|
|
||||||
```go
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
|
|
||||||
translatepb "google.golang.org/genproto/googleapis/cloud/translate/v3"
|
|
||||||
)
|
|
||||||
|
|
||||||
type fakeTranslationServer struct {
|
|
||||||
translatepb.UnimplementedTranslationServiceServer
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *fakeTranslationServer) TranslateText(ctx context.Context, req *translatepb.TranslateTextRequest) (*translatepb.TranslateTextResponse, error) {
|
|
||||||
resp := &translatepb.TranslateTextResponse{
|
|
||||||
Translations: []*translatepb.Translation{
|
|
||||||
&translatepb.Translation{
|
|
||||||
TranslatedText: "Hello World",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
return resp, nil
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
All of the generated protobuf code found in [google.golang.org/genproto](https://pkg.go.dev/google.golang.org/genproto)
|
|
||||||
contains a similar `package.UnimplmentedFooServer` type that is useful for
|
|
||||||
creating fakes. By embedding the unimplemented server in the
|
|
||||||
`fakeTranslationServer`, the fake will “inherit” all of the RPCs the server
|
|
||||||
exposes. Then, by providing our own `fakeTranslationServer.TranslateText`
|
|
||||||
method you can “override” the default unimplemented behavior of the one RPC that
|
|
||||||
you would like to be faked.
|
|
||||||
|
|
||||||
The test itself does require a little bit of setup: start up a `net.Listener`,
|
|
||||||
register the server, and tell the client library to call the server:
|
|
||||||
|
|
||||||
```go
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"net"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
translate "cloud.google.com/go/translate/apiv3"
|
|
||||||
"google.golang.org/api/option"
|
|
||||||
translatepb "google.golang.org/genproto/googleapis/cloud/translate/v3"
|
|
||||||
"google.golang.org/grpc"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestTranslateTextWithConcreteClient(t *testing.T) {
|
|
||||||
ctx := context.Background()
|
|
||||||
|
|
||||||
// Setup the fake server.
|
|
||||||
fakeTranslationServer := &fakeTranslationServer{}
|
|
||||||
l, err := net.Listen("tcp", "localhost:0")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
gsrv := grpc.NewServer()
|
|
||||||
translatepb.RegisterTranslationServiceServer(gsrv, fakeTranslationServer)
|
|
||||||
fakeServerAddr := l.Addr().String()
|
|
||||||
go func() {
|
|
||||||
if err := gsrv.Serve(l); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Create a client.
|
|
||||||
client, err := translate.NewTranslationClient(ctx,
|
|
||||||
option.WithEndpoint(fakeServerAddr),
|
|
||||||
option.WithoutAuthentication(),
|
|
||||||
option.WithGRPCDialOption(grpc.WithInsecure()),
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Run the test.
|
|
||||||
text, err := TranslateTextWithConcreteClient(client, "Hola Mundo", "en-US")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
if text != "Hello World" {
|
|
||||||
t.Fatalf("got %q, want Hello World", text)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
## Testing using mocks
|
|
||||||
|
|
||||||
*Note*: You can see the full
|
|
||||||
[example code using a mock here](https://github.com/googleapis/google-cloud-go/tree/main/internal/examples/mock).
|
|
||||||
|
|
||||||
When mocking code you need to work with interfaces. Let’s create an interface
|
|
||||||
for the `cloud.google.com/go/translate/apiv3` client used in the
|
|
||||||
`TranslateTextWithConcreteClient` function mentioned in the previous section.
|
|
||||||
The `translate.Client` has over a dozen methods but this code only uses one of
|
|
||||||
them. Here is an interface that satisfies the interactions of the
|
|
||||||
`translate.Client` in this function.
|
|
||||||
|
|
||||||
```go
|
|
||||||
type TranslationClient interface {
|
|
||||||
TranslateText(ctx context.Context, req *translatepb.TranslateTextRequest, opts ...gax.CallOption) (*translatepb.TranslateTextResponse, error)
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Now that we have an interface that satisfies the method being used we can
|
|
||||||
rewrite the function signature to take the interface instead of the concrete
|
|
||||||
type.
|
|
||||||
|
|
||||||
```go
|
|
||||||
func TranslateTextWithInterfaceClient(client TranslationClient, text string, targetLang string) (string, error) {
|
|
||||||
// ...
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
This allows a real `translate.Client` to be passed to the method in production
|
|
||||||
and for a mock implementation to be passed in during testing. This pattern can
|
|
||||||
be applied to any Go code, not just `cloud.google.com/go`. This is because
|
|
||||||
interfaces in Go are implicitly satisfied. Structs in the client libraries can
|
|
||||||
implicitly implement interfaces defined in your codebase. Let’s take a look at
|
|
||||||
what it might look like to define a lightweight mock for the `TranslationClient`
|
|
||||||
interface.
|
|
||||||
|
|
||||||
```go
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/googleapis/gax-go/v2"
|
|
||||||
translatepb "google.golang.org/genproto/googleapis/cloud/translate/v3"
|
|
||||||
)
|
|
||||||
|
|
||||||
type mockClient struct{}
|
|
||||||
|
|
||||||
func (*mockClient) TranslateText(_ context.Context, req *translatepb.TranslateTextRequest, opts ...gax.CallOption) (*translatepb.TranslateTextResponse, error) {
|
|
||||||
resp := &translatepb.TranslateTextResponse{
|
|
||||||
Translations: []*translatepb.Translation{
|
|
||||||
&translatepb.Translation{
|
|
||||||
TranslatedText: "Hello World",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
return resp, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestTranslateTextWithAbstractClient(t *testing.T) {
|
|
||||||
client := &mockClient{}
|
|
||||||
text, err := TranslateTextWithInterfaceClient(client, "Hola Mundo", "en-US")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
if text != "Hello World" {
|
|
||||||
t.Fatalf("got %q, want Hello World", text)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
If you prefer to not write your own mocks there are mocking frameworks such as
|
|
||||||
[golang/mock](https://github.com/golang/mock) which can generate mocks for you
|
|
||||||
from an interface. As a word of caution though, try to not
|
|
||||||
[overuse mocks](https://testing.googleblog.com/2013/05/testing-on-toilet-dont-overuse-mocks.html).
|
|
||||||
|
|
||||||
## Testing using emulators
|
|
||||||
|
|
||||||
Some of the client libraries provided in `cloud.google.com/go` support running
|
|
||||||
against a service emulator. The concept is similar to that of using fakes,
|
|
||||||
mentioned above, but the server is managed for you. You just need to start it up
|
|
||||||
and instruct the client library to talk to the emulator by setting a service
|
|
||||||
specific emulator environment variable. Current services/environment-variables
|
|
||||||
are:
|
|
||||||
|
|
||||||
- bigtable: `BIGTABLE_EMULATOR_HOST`
|
|
||||||
- datastore: `DATASTORE_EMULATOR_HOST`
|
|
||||||
- firestore: `FIRESTORE_EMULATOR_HOST`
|
|
||||||
- pubsub: `PUBSUB_EMULATOR_HOST`
|
|
||||||
- spanner: `SPANNER_EMULATOR_HOST`
|
|
||||||
- storage: `STORAGE_EMULATOR_HOST`
|
|
||||||
- Although the storage client supports an emulator environment variable there is no official emulator provided by gcloud.
|
|
||||||
|
|
||||||
For more information on emulators please refer to the
|
|
||||||
[gcloud documentation](https://cloud.google.com/sdk/gcloud/reference/beta/emulators).
|
|
2
vendor/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
|
@ -5,4 +5,4 @@ package aws
|
||||||
const SDKName = "aws-sdk-go"
|
const SDKName = "aws-sdk-go"
|
||||||
|
|
||||||
// SDKVersion is the version of this SDK
|
// SDKVersion is the version of this SDK
|
||||||
const SDKVersion = "1.44.91"
|
const SDKVersion = "1.44.93"
|
||||||
|
|
64
vendor/github.com/google/go-cmp/cmp/compare.go
generated
vendored
64
vendor/github.com/google/go-cmp/cmp/compare.go
generated
vendored
|
@ -13,21 +13,21 @@
|
||||||
//
|
//
|
||||||
// The primary features of cmp are:
|
// The primary features of cmp are:
|
||||||
//
|
//
|
||||||
// • When the default behavior of equality does not suit the needs of the test,
|
// - When the default behavior of equality does not suit the test's needs,
|
||||||
// custom equality functions can override the equality operation.
|
// custom equality functions can override the equality operation.
|
||||||
// For example, an equality function may report floats as equal so long as they
|
// For example, an equality function may report floats as equal so long as
|
||||||
// are within some tolerance of each other.
|
// they are within some tolerance of each other.
|
||||||
//
|
//
|
||||||
// • Types that have an Equal method may use that method to determine equality.
|
// - Types with an Equal method may use that method to determine equality.
|
||||||
// This allows package authors to determine the equality operation for the types
|
// This allows package authors to determine the equality operation
|
||||||
// that they define.
|
// for the types that they define.
|
||||||
//
|
//
|
||||||
// • If no custom equality functions are used and no Equal method is defined,
|
// - If no custom equality functions are used and no Equal method is defined,
|
||||||
// equality is determined by recursively comparing the primitive kinds on both
|
// equality is determined by recursively comparing the primitive kinds on
|
||||||
// values, much like reflect.DeepEqual. Unlike reflect.DeepEqual, unexported
|
// both values, much like reflect.DeepEqual. Unlike reflect.DeepEqual,
|
||||||
// fields are not compared by default; they result in panics unless suppressed
|
// unexported fields are not compared by default; they result in panics
|
||||||
// by using an Ignore option (see cmpopts.IgnoreUnexported) or explicitly
|
// unless suppressed by using an Ignore option (see cmpopts.IgnoreUnexported)
|
||||||
// compared using the Exporter option.
|
// or explicitly compared using the Exporter option.
|
||||||
package cmp
|
package cmp
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
@ -45,25 +45,25 @@ import (
|
||||||
// Equal reports whether x and y are equal by recursively applying the
|
// Equal reports whether x and y are equal by recursively applying the
|
||||||
// following rules in the given order to x and y and all of their sub-values:
|
// following rules in the given order to x and y and all of their sub-values:
|
||||||
//
|
//
|
||||||
// • Let S be the set of all Ignore, Transformer, and Comparer options that
|
// - Let S be the set of all Ignore, Transformer, and Comparer options that
|
||||||
// remain after applying all path filters, value filters, and type filters.
|
// remain after applying all path filters, value filters, and type filters.
|
||||||
// If at least one Ignore exists in S, then the comparison is ignored.
|
// If at least one Ignore exists in S, then the comparison is ignored.
|
||||||
// If the number of Transformer and Comparer options in S is greater than one,
|
// If the number of Transformer and Comparer options in S is non-zero,
|
||||||
// then Equal panics because it is ambiguous which option to use.
|
// then Equal panics because it is ambiguous which option to use.
|
||||||
// If S contains a single Transformer, then use that to transform the current
|
// If S contains a single Transformer, then use that to transform
|
||||||
// values and recursively call Equal on the output values.
|
// the current values and recursively call Equal on the output values.
|
||||||
// If S contains a single Comparer, then use that to compare the current values.
|
// If S contains a single Comparer, then use that to compare the current values.
|
||||||
// Otherwise, evaluation proceeds to the next rule.
|
// Otherwise, evaluation proceeds to the next rule.
|
||||||
//
|
//
|
||||||
// • If the values have an Equal method of the form "(T) Equal(T) bool" or
|
// - If the values have an Equal method of the form "(T) Equal(T) bool" or
|
||||||
// "(T) Equal(I) bool" where T is assignable to I, then use the result of
|
// "(T) Equal(I) bool" where T is assignable to I, then use the result of
|
||||||
// x.Equal(y) even if x or y is nil. Otherwise, no such method exists and
|
// x.Equal(y) even if x or y is nil. Otherwise, no such method exists and
|
||||||
// evaluation proceeds to the next rule.
|
// evaluation proceeds to the next rule.
|
||||||
//
|
//
|
||||||
// • Lastly, try to compare x and y based on their basic kinds.
|
// - Lastly, try to compare x and y based on their basic kinds.
|
||||||
// Simple kinds like booleans, integers, floats, complex numbers, strings, and
|
// Simple kinds like booleans, integers, floats, complex numbers, strings,
|
||||||
// channels are compared using the equivalent of the == operator in Go.
|
// and channels are compared using the equivalent of the == operator in Go.
|
||||||
// Functions are only equal if they are both nil, otherwise they are unequal.
|
// Functions are only equal if they are both nil, otherwise they are unequal.
|
||||||
//
|
//
|
||||||
// Structs are equal if recursively calling Equal on all fields report equal.
|
// Structs are equal if recursively calling Equal on all fields report equal.
|
||||||
// If a struct contains unexported fields, Equal panics unless an Ignore option
|
// If a struct contains unexported fields, Equal panics unless an Ignore option
|
||||||
|
@ -144,7 +144,7 @@ func rootStep(x, y interface{}) PathStep {
|
||||||
// so that they have the same parent type.
|
// so that they have the same parent type.
|
||||||
var t reflect.Type
|
var t reflect.Type
|
||||||
if !vx.IsValid() || !vy.IsValid() || vx.Type() != vy.Type() {
|
if !vx.IsValid() || !vy.IsValid() || vx.Type() != vy.Type() {
|
||||||
t = reflect.TypeOf((*interface{})(nil)).Elem()
|
t = anyType
|
||||||
if vx.IsValid() {
|
if vx.IsValid() {
|
||||||
vvx := reflect.New(t).Elem()
|
vvx := reflect.New(t).Elem()
|
||||||
vvx.Set(vx)
|
vvx.Set(vx)
|
||||||
|
@ -639,7 +639,9 @@ type dynChecker struct{ curr, next int }
|
||||||
// Next increments the state and reports whether a check should be performed.
|
// Next increments the state and reports whether a check should be performed.
|
||||||
//
|
//
|
||||||
// Checks occur every Nth function call, where N is a triangular number:
|
// Checks occur every Nth function call, where N is a triangular number:
|
||||||
|
//
|
||||||
// 0 1 3 6 10 15 21 28 36 45 55 66 78 91 105 120 136 153 171 190 ...
|
// 0 1 3 6 10 15 21 28 36 45 55 66 78 91 105 120 136 153 171 190 ...
|
||||||
|
//
|
||||||
// See https://en.wikipedia.org/wiki/Triangular_number
|
// See https://en.wikipedia.org/wiki/Triangular_number
|
||||||
//
|
//
|
||||||
// This sequence ensures that the cost of checks drops significantly as
|
// This sequence ensures that the cost of checks drops significantly as
|
||||||
|
|
44
vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go
generated
vendored
44
vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go
generated
vendored
|
@ -127,9 +127,9 @@ var randBool = rand.New(rand.NewSource(time.Now().Unix())).Intn(2) == 0
|
||||||
// This function returns an edit-script, which is a sequence of operations
|
// This function returns an edit-script, which is a sequence of operations
|
||||||
// needed to convert one list into the other. The following invariants for
|
// needed to convert one list into the other. The following invariants for
|
||||||
// the edit-script are maintained:
|
// the edit-script are maintained:
|
||||||
// • eq == (es.Dist()==0)
|
// - eq == (es.Dist()==0)
|
||||||
// • nx == es.LenX()
|
// - nx == es.LenX()
|
||||||
// • ny == es.LenY()
|
// - ny == es.LenY()
|
||||||
//
|
//
|
||||||
// This algorithm is not guaranteed to be an optimal solution (i.e., one that
|
// This algorithm is not guaranteed to be an optimal solution (i.e., one that
|
||||||
// produces an edit-script with a minimal Levenshtein distance). This algorithm
|
// produces an edit-script with a minimal Levenshtein distance). This algorithm
|
||||||
|
@ -169,12 +169,13 @@ func Difference(nx, ny int, f EqualFunc) (es EditScript) {
|
||||||
// A diagonal edge is equivalent to a matching symbol between both X and Y.
|
// A diagonal edge is equivalent to a matching symbol between both X and Y.
|
||||||
|
|
||||||
// Invariants:
|
// Invariants:
|
||||||
// • 0 ≤ fwdPath.X ≤ (fwdFrontier.X, revFrontier.X) ≤ revPath.X ≤ nx
|
// - 0 ≤ fwdPath.X ≤ (fwdFrontier.X, revFrontier.X) ≤ revPath.X ≤ nx
|
||||||
// • 0 ≤ fwdPath.Y ≤ (fwdFrontier.Y, revFrontier.Y) ≤ revPath.Y ≤ ny
|
// - 0 ≤ fwdPath.Y ≤ (fwdFrontier.Y, revFrontier.Y) ≤ revPath.Y ≤ ny
|
||||||
//
|
//
|
||||||
// In general:
|
// In general:
|
||||||
// • fwdFrontier.X < revFrontier.X
|
// - fwdFrontier.X < revFrontier.X
|
||||||
// • fwdFrontier.Y < revFrontier.Y
|
// - fwdFrontier.Y < revFrontier.Y
|
||||||
|
//
|
||||||
// Unless, it is time for the algorithm to terminate.
|
// Unless, it is time for the algorithm to terminate.
|
||||||
fwdPath := path{+1, point{0, 0}, make(EditScript, 0, (nx+ny)/2)}
|
fwdPath := path{+1, point{0, 0}, make(EditScript, 0, (nx+ny)/2)}
|
||||||
revPath := path{-1, point{nx, ny}, make(EditScript, 0)}
|
revPath := path{-1, point{nx, ny}, make(EditScript, 0)}
|
||||||
|
@ -195,19 +196,21 @@ func Difference(nx, ny int, f EqualFunc) (es EditScript) {
|
||||||
// computing sub-optimal edit-scripts between two lists.
|
// computing sub-optimal edit-scripts between two lists.
|
||||||
//
|
//
|
||||||
// The algorithm is approximately as follows:
|
// The algorithm is approximately as follows:
|
||||||
// • Searching for differences switches back-and-forth between
|
// - Searching for differences switches back-and-forth between
|
||||||
// a search that starts at the beginning (the top-left corner), and
|
// a search that starts at the beginning (the top-left corner), and
|
||||||
// a search that starts at the end (the bottom-right corner). The goal of
|
// a search that starts at the end (the bottom-right corner).
|
||||||
// the search is connect with the search from the opposite corner.
|
// The goal of the search is connect with the search
|
||||||
// • As we search, we build a path in a greedy manner, where the first
|
// from the opposite corner.
|
||||||
// match seen is added to the path (this is sub-optimal, but provides a
|
// - As we search, we build a path in a greedy manner,
|
||||||
// decent result in practice). When matches are found, we try the next pair
|
// where the first match seen is added to the path (this is sub-optimal,
|
||||||
// of symbols in the lists and follow all matches as far as possible.
|
// but provides a decent result in practice). When matches are found,
|
||||||
// • When searching for matches, we search along a diagonal going through
|
// we try the next pair of symbols in the lists and follow all matches
|
||||||
// through the "frontier" point. If no matches are found, we advance the
|
// as far as possible.
|
||||||
// frontier towards the opposite corner.
|
// - When searching for matches, we search along a diagonal going through
|
||||||
// • This algorithm terminates when either the X coordinates or the
|
// through the "frontier" point. If no matches are found,
|
||||||
// Y coordinates of the forward and reverse frontier points ever intersect.
|
// we advance the frontier towards the opposite corner.
|
||||||
|
// - This algorithm terminates when either the X coordinates or the
|
||||||
|
// Y coordinates of the forward and reverse frontier points ever intersect.
|
||||||
|
|
||||||
// This algorithm is correct even if searching only in the forward direction
|
// This algorithm is correct even if searching only in the forward direction
|
||||||
// or in the reverse direction. We do both because it is commonly observed
|
// or in the reverse direction. We do both because it is commonly observed
|
||||||
|
@ -389,6 +392,7 @@ type point struct{ X, Y int }
|
||||||
func (p *point) add(dx, dy int) { p.X += dx; p.Y += dy }
|
func (p *point) add(dx, dy int) { p.X += dx; p.Y += dy }
|
||||||
|
|
||||||
// zigzag maps a consecutive sequence of integers to a zig-zag sequence.
|
// zigzag maps a consecutive sequence of integers to a zig-zag sequence.
|
||||||
|
//
|
||||||
// [0 1 2 3 4 5 ...] => [0 -1 +1 -2 +2 ...]
|
// [0 1 2 3 4 5 ...] => [0 -1 +1 -2 +2 ...]
|
||||||
func zigzag(x int) int {
|
func zigzag(x int) int {
|
||||||
if x&1 != 0 {
|
if x&1 != 0 {
|
||||||
|
|
48
vendor/github.com/google/go-cmp/cmp/internal/value/zero.go
generated
vendored
48
vendor/github.com/google/go-cmp/cmp/internal/value/zero.go
generated
vendored
|
@ -1,48 +0,0 @@
|
||||||
// Copyright 2017, The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package value
|
|
||||||
|
|
||||||
import (
|
|
||||||
"math"
|
|
||||||
"reflect"
|
|
||||||
)
|
|
||||||
|
|
||||||
// IsZero reports whether v is the zero value.
|
|
||||||
// This does not rely on Interface and so can be used on unexported fields.
|
|
||||||
func IsZero(v reflect.Value) bool {
|
|
||||||
switch v.Kind() {
|
|
||||||
case reflect.Bool:
|
|
||||||
return v.Bool() == false
|
|
||||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
|
||||||
return v.Int() == 0
|
|
||||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
|
||||||
return v.Uint() == 0
|
|
||||||
case reflect.Float32, reflect.Float64:
|
|
||||||
return math.Float64bits(v.Float()) == 0
|
|
||||||
case reflect.Complex64, reflect.Complex128:
|
|
||||||
return math.Float64bits(real(v.Complex())) == 0 && math.Float64bits(imag(v.Complex())) == 0
|
|
||||||
case reflect.String:
|
|
||||||
return v.String() == ""
|
|
||||||
case reflect.UnsafePointer:
|
|
||||||
return v.Pointer() == 0
|
|
||||||
case reflect.Chan, reflect.Func, reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice:
|
|
||||||
return v.IsNil()
|
|
||||||
case reflect.Array:
|
|
||||||
for i := 0; i < v.Len(); i++ {
|
|
||||||
if !IsZero(v.Index(i)) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
case reflect.Struct:
|
|
||||||
for i := 0; i < v.NumField(); i++ {
|
|
||||||
if !IsZero(v.Field(i)) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
10
vendor/github.com/google/go-cmp/cmp/options.go
generated
vendored
10
vendor/github.com/google/go-cmp/cmp/options.go
generated
vendored
|
@ -33,6 +33,7 @@ type Option interface {
|
||||||
}
|
}
|
||||||
|
|
||||||
// applicableOption represents the following types:
|
// applicableOption represents the following types:
|
||||||
|
//
|
||||||
// Fundamental: ignore | validator | *comparer | *transformer
|
// Fundamental: ignore | validator | *comparer | *transformer
|
||||||
// Grouping: Options
|
// Grouping: Options
|
||||||
type applicableOption interface {
|
type applicableOption interface {
|
||||||
|
@ -43,6 +44,7 @@ type applicableOption interface {
|
||||||
}
|
}
|
||||||
|
|
||||||
// coreOption represents the following types:
|
// coreOption represents the following types:
|
||||||
|
//
|
||||||
// Fundamental: ignore | validator | *comparer | *transformer
|
// Fundamental: ignore | validator | *comparer | *transformer
|
||||||
// Filters: *pathFilter | *valuesFilter
|
// Filters: *pathFilter | *valuesFilter
|
||||||
type coreOption interface {
|
type coreOption interface {
|
||||||
|
@ -336,9 +338,9 @@ func (tr transformer) String() string {
|
||||||
// both implement T.
|
// both implement T.
|
||||||
//
|
//
|
||||||
// The equality function must be:
|
// The equality function must be:
|
||||||
// • Symmetric: equal(x, y) == equal(y, x)
|
// - Symmetric: equal(x, y) == equal(y, x)
|
||||||
// • Deterministic: equal(x, y) == equal(x, y)
|
// - Deterministic: equal(x, y) == equal(x, y)
|
||||||
// • Pure: equal(x, y) does not modify x or y
|
// - Pure: equal(x, y) does not modify x or y
|
||||||
func Comparer(f interface{}) Option {
|
func Comparer(f interface{}) Option {
|
||||||
v := reflect.ValueOf(f)
|
v := reflect.ValueOf(f)
|
||||||
if !function.IsType(v.Type(), function.Equal) || v.IsNil() {
|
if !function.IsType(v.Type(), function.Equal) || v.IsNil() {
|
||||||
|
@ -430,7 +432,7 @@ func AllowUnexported(types ...interface{}) Option {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Result represents the comparison result for a single node and
|
// Result represents the comparison result for a single node and
|
||||||
// is provided by cmp when calling Result (see Reporter).
|
// is provided by cmp when calling Report (see Reporter).
|
||||||
type Result struct {
|
type Result struct {
|
||||||
_ [0]func() // Make Result incomparable
|
_ [0]func() // Make Result incomparable
|
||||||
flags resultFlags
|
flags resultFlags
|
||||||
|
|
20
vendor/github.com/google/go-cmp/cmp/path.go
generated
vendored
20
vendor/github.com/google/go-cmp/cmp/path.go
generated
vendored
|
@ -41,13 +41,13 @@ type PathStep interface {
|
||||||
// The type of each valid value is guaranteed to be identical to Type.
|
// The type of each valid value is guaranteed to be identical to Type.
|
||||||
//
|
//
|
||||||
// In some cases, one or both may be invalid or have restrictions:
|
// In some cases, one or both may be invalid or have restrictions:
|
||||||
// • For StructField, both are not interface-able if the current field
|
// - For StructField, both are not interface-able if the current field
|
||||||
// is unexported and the struct type is not explicitly permitted by
|
// is unexported and the struct type is not explicitly permitted by
|
||||||
// an Exporter to traverse unexported fields.
|
// an Exporter to traverse unexported fields.
|
||||||
// • For SliceIndex, one may be invalid if an element is missing from
|
// - For SliceIndex, one may be invalid if an element is missing from
|
||||||
// either the x or y slice.
|
// either the x or y slice.
|
||||||
// • For MapIndex, one may be invalid if an entry is missing from
|
// - For MapIndex, one may be invalid if an entry is missing from
|
||||||
// either the x or y map.
|
// either the x or y map.
|
||||||
//
|
//
|
||||||
// The provided values must not be mutated.
|
// The provided values must not be mutated.
|
||||||
Values() (vx, vy reflect.Value)
|
Values() (vx, vy reflect.Value)
|
||||||
|
@ -94,6 +94,7 @@ func (pa Path) Index(i int) PathStep {
|
||||||
// The simplified path only contains struct field accesses.
|
// The simplified path only contains struct field accesses.
|
||||||
//
|
//
|
||||||
// For example:
|
// For example:
|
||||||
|
//
|
||||||
// MyMap.MySlices.MyField
|
// MyMap.MySlices.MyField
|
||||||
func (pa Path) String() string {
|
func (pa Path) String() string {
|
||||||
var ss []string
|
var ss []string
|
||||||
|
@ -108,6 +109,7 @@ func (pa Path) String() string {
|
||||||
// GoString returns the path to a specific node using Go syntax.
|
// GoString returns the path to a specific node using Go syntax.
|
||||||
//
|
//
|
||||||
// For example:
|
// For example:
|
||||||
|
//
|
||||||
// (*root.MyMap["key"].(*mypkg.MyStruct).MySlices)[2][3].MyField
|
// (*root.MyMap["key"].(*mypkg.MyStruct).MySlices)[2][3].MyField
|
||||||
func (pa Path) GoString() string {
|
func (pa Path) GoString() string {
|
||||||
var ssPre, ssPost []string
|
var ssPre, ssPost []string
|
||||||
|
@ -159,7 +161,7 @@ func (ps pathStep) String() string {
|
||||||
if ps.typ == nil {
|
if ps.typ == nil {
|
||||||
return "<nil>"
|
return "<nil>"
|
||||||
}
|
}
|
||||||
s := ps.typ.String()
|
s := value.TypeString(ps.typ, false)
|
||||||
if s == "" || strings.ContainsAny(s, "{}\n") {
|
if s == "" || strings.ContainsAny(s, "{}\n") {
|
||||||
return "root" // Type too simple or complex to print
|
return "root" // Type too simple or complex to print
|
||||||
}
|
}
|
||||||
|
@ -282,7 +284,7 @@ type typeAssertion struct {
|
||||||
|
|
||||||
func (ta TypeAssertion) Type() reflect.Type { return ta.typ }
|
func (ta TypeAssertion) Type() reflect.Type { return ta.typ }
|
||||||
func (ta TypeAssertion) Values() (vx, vy reflect.Value) { return ta.vx, ta.vy }
|
func (ta TypeAssertion) Values() (vx, vy reflect.Value) { return ta.vx, ta.vy }
|
||||||
func (ta TypeAssertion) String() string { return fmt.Sprintf(".(%v)", ta.typ) }
|
func (ta TypeAssertion) String() string { return fmt.Sprintf(".(%v)", value.TypeString(ta.typ, false)) }
|
||||||
|
|
||||||
// Transform is a transformation from the parent type to the current type.
|
// Transform is a transformation from the parent type to the current type.
|
||||||
type Transform struct{ *transform }
|
type Transform struct{ *transform }
|
||||||
|
|
10
vendor/github.com/google/go-cmp/cmp/report_compare.go
generated
vendored
10
vendor/github.com/google/go-cmp/cmp/report_compare.go
generated
vendored
|
@ -7,8 +7,6 @@ package cmp
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"reflect"
|
"reflect"
|
||||||
|
|
||||||
"github.com/google/go-cmp/cmp/internal/value"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// numContextRecords is the number of surrounding equal records to print.
|
// numContextRecords is the number of surrounding equal records to print.
|
||||||
|
@ -117,7 +115,7 @@ func (opts formatOptions) FormatDiff(v *valueNode, ptrs *pointerReferences) (out
|
||||||
|
|
||||||
// For leaf nodes, format the value based on the reflect.Values alone.
|
// For leaf nodes, format the value based on the reflect.Values alone.
|
||||||
// As a special case, treat equal []byte as a leaf nodes.
|
// As a special case, treat equal []byte as a leaf nodes.
|
||||||
isBytes := v.Type.Kind() == reflect.Slice && v.Type.Elem() == reflect.TypeOf(byte(0))
|
isBytes := v.Type.Kind() == reflect.Slice && v.Type.Elem() == byteType
|
||||||
isEqualBytes := isBytes && v.NumDiff+v.NumIgnored+v.NumTransformed == 0
|
isEqualBytes := isBytes && v.NumDiff+v.NumIgnored+v.NumTransformed == 0
|
||||||
if v.MaxDepth == 0 || isEqualBytes {
|
if v.MaxDepth == 0 || isEqualBytes {
|
||||||
switch opts.DiffMode {
|
switch opts.DiffMode {
|
||||||
|
@ -248,11 +246,11 @@ func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind, pt
|
||||||
var isZero bool
|
var isZero bool
|
||||||
switch opts.DiffMode {
|
switch opts.DiffMode {
|
||||||
case diffIdentical:
|
case diffIdentical:
|
||||||
isZero = value.IsZero(r.Value.ValueX) || value.IsZero(r.Value.ValueY)
|
isZero = r.Value.ValueX.IsZero() || r.Value.ValueY.IsZero()
|
||||||
case diffRemoved:
|
case diffRemoved:
|
||||||
isZero = value.IsZero(r.Value.ValueX)
|
isZero = r.Value.ValueX.IsZero()
|
||||||
case diffInserted:
|
case diffInserted:
|
||||||
isZero = value.IsZero(r.Value.ValueY)
|
isZero = r.Value.ValueY.IsZero()
|
||||||
}
|
}
|
||||||
if isZero {
|
if isZero {
|
||||||
continue
|
continue
|
||||||
|
|
11
vendor/github.com/google/go-cmp/cmp/report_reflect.go
generated
vendored
11
vendor/github.com/google/go-cmp/cmp/report_reflect.go
generated
vendored
|
@ -16,6 +16,13 @@ import (
|
||||||
"github.com/google/go-cmp/cmp/internal/value"
|
"github.com/google/go-cmp/cmp/internal/value"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
anyType = reflect.TypeOf((*interface{})(nil)).Elem()
|
||||||
|
stringType = reflect.TypeOf((*string)(nil)).Elem()
|
||||||
|
bytesType = reflect.TypeOf((*[]byte)(nil)).Elem()
|
||||||
|
byteType = reflect.TypeOf((*byte)(nil)).Elem()
|
||||||
|
)
|
||||||
|
|
||||||
type formatValueOptions struct {
|
type formatValueOptions struct {
|
||||||
// AvoidStringer controls whether to avoid calling custom stringer
|
// AvoidStringer controls whether to avoid calling custom stringer
|
||||||
// methods like error.Error or fmt.Stringer.String.
|
// methods like error.Error or fmt.Stringer.String.
|
||||||
|
@ -184,7 +191,7 @@ func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind,
|
||||||
}
|
}
|
||||||
for i := 0; i < v.NumField(); i++ {
|
for i := 0; i < v.NumField(); i++ {
|
||||||
vv := v.Field(i)
|
vv := v.Field(i)
|
||||||
if value.IsZero(vv) {
|
if vv.IsZero() {
|
||||||
continue // Elide fields with zero values
|
continue // Elide fields with zero values
|
||||||
}
|
}
|
||||||
if len(list) == maxLen {
|
if len(list) == maxLen {
|
||||||
|
@ -205,7 +212,7 @@ func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check whether this is a []byte of text data.
|
// Check whether this is a []byte of text data.
|
||||||
if t.Elem() == reflect.TypeOf(byte(0)) {
|
if t.Elem() == byteType {
|
||||||
b := v.Bytes()
|
b := v.Bytes()
|
||||||
isPrintSpace := func(r rune) bool { return unicode.IsPrint(r) || unicode.IsSpace(r) }
|
isPrintSpace := func(r rune) bool { return unicode.IsPrint(r) || unicode.IsSpace(r) }
|
||||||
if len(b) > 0 && utf8.Valid(b) && len(bytes.TrimFunc(b, isPrintSpace)) == 0 {
|
if len(b) > 0 && utf8.Valid(b) && len(bytes.TrimFunc(b, isPrintSpace)) == 0 {
|
||||||
|
|
25
vendor/github.com/google/go-cmp/cmp/report_slices.go
generated
vendored
25
vendor/github.com/google/go-cmp/cmp/report_slices.go
generated
vendored
|
@ -104,7 +104,7 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode {
|
||||||
case t.Kind() == reflect.String:
|
case t.Kind() == reflect.String:
|
||||||
sx, sy = vx.String(), vy.String()
|
sx, sy = vx.String(), vy.String()
|
||||||
isString = true
|
isString = true
|
||||||
case t.Kind() == reflect.Slice && t.Elem() == reflect.TypeOf(byte(0)):
|
case t.Kind() == reflect.Slice && t.Elem() == byteType:
|
||||||
sx, sy = string(vx.Bytes()), string(vy.Bytes())
|
sx, sy = string(vx.Bytes()), string(vy.Bytes())
|
||||||
isString = true
|
isString = true
|
||||||
case t.Kind() == reflect.Array:
|
case t.Kind() == reflect.Array:
|
||||||
|
@ -147,7 +147,10 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode {
|
||||||
})
|
})
|
||||||
efficiencyLines := float64(esLines.Dist()) / float64(len(esLines))
|
efficiencyLines := float64(esLines.Dist()) / float64(len(esLines))
|
||||||
efficiencyBytes := float64(esBytes.Dist()) / float64(len(esBytes))
|
efficiencyBytes := float64(esBytes.Dist()) / float64(len(esBytes))
|
||||||
isPureLinedText = efficiencyLines < 4*efficiencyBytes
|
quotedLength := len(strconv.Quote(sx + sy))
|
||||||
|
unquotedLength := len(sx) + len(sy)
|
||||||
|
escapeExpansionRatio := float64(quotedLength) / float64(unquotedLength)
|
||||||
|
isPureLinedText = efficiencyLines < 4*efficiencyBytes || escapeExpansionRatio > 1.1
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -171,12 +174,13 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode {
|
||||||
// differences in a string literal. This format is more readable,
|
// differences in a string literal. This format is more readable,
|
||||||
// but has edge-cases where differences are visually indistinguishable.
|
// but has edge-cases where differences are visually indistinguishable.
|
||||||
// This format is avoided under the following conditions:
|
// This format is avoided under the following conditions:
|
||||||
// • A line starts with `"""`
|
// - A line starts with `"""`
|
||||||
// • A line starts with "..."
|
// - A line starts with "..."
|
||||||
// • A line contains non-printable characters
|
// - A line contains non-printable characters
|
||||||
// • Adjacent different lines differ only by whitespace
|
// - Adjacent different lines differ only by whitespace
|
||||||
//
|
//
|
||||||
// For example:
|
// For example:
|
||||||
|
//
|
||||||
// """
|
// """
|
||||||
// ... // 3 identical lines
|
// ... // 3 identical lines
|
||||||
// foo
|
// foo
|
||||||
|
@ -231,7 +235,7 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode {
|
||||||
var out textNode = &textWrap{Prefix: "(", Value: list2, Suffix: ")"}
|
var out textNode = &textWrap{Prefix: "(", Value: list2, Suffix: ")"}
|
||||||
switch t.Kind() {
|
switch t.Kind() {
|
||||||
case reflect.String:
|
case reflect.String:
|
||||||
if t != reflect.TypeOf(string("")) {
|
if t != stringType {
|
||||||
out = opts.FormatType(t, out)
|
out = opts.FormatType(t, out)
|
||||||
}
|
}
|
||||||
case reflect.Slice:
|
case reflect.Slice:
|
||||||
|
@ -326,12 +330,12 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode {
|
||||||
switch t.Kind() {
|
switch t.Kind() {
|
||||||
case reflect.String:
|
case reflect.String:
|
||||||
out = &textWrap{Prefix: "strings.Join(", Value: out, Suffix: fmt.Sprintf(", %q)", delim)}
|
out = &textWrap{Prefix: "strings.Join(", Value: out, Suffix: fmt.Sprintf(", %q)", delim)}
|
||||||
if t != reflect.TypeOf(string("")) {
|
if t != stringType {
|
||||||
out = opts.FormatType(t, out)
|
out = opts.FormatType(t, out)
|
||||||
}
|
}
|
||||||
case reflect.Slice:
|
case reflect.Slice:
|
||||||
out = &textWrap{Prefix: "bytes.Join(", Value: out, Suffix: fmt.Sprintf(", %q)", delim)}
|
out = &textWrap{Prefix: "bytes.Join(", Value: out, Suffix: fmt.Sprintf(", %q)", delim)}
|
||||||
if t != reflect.TypeOf([]byte(nil)) {
|
if t != bytesType {
|
||||||
out = opts.FormatType(t, out)
|
out = opts.FormatType(t, out)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -446,7 +450,6 @@ func (opts formatOptions) formatDiffSlice(
|
||||||
// {NumIdentical: 3},
|
// {NumIdentical: 3},
|
||||||
// {NumInserted: 1},
|
// {NumInserted: 1},
|
||||||
// ]
|
// ]
|
||||||
//
|
|
||||||
func coalesceAdjacentEdits(name string, es diff.EditScript) (groups []diffStats) {
|
func coalesceAdjacentEdits(name string, es diff.EditScript) (groups []diffStats) {
|
||||||
var prevMode byte
|
var prevMode byte
|
||||||
lastStats := func(mode byte) *diffStats {
|
lastStats := func(mode byte) *diffStats {
|
||||||
|
@ -503,7 +506,6 @@ func coalesceAdjacentEdits(name string, es diff.EditScript) (groups []diffStats)
|
||||||
// {NumIdentical: 8, NumRemoved: 12, NumInserted: 3},
|
// {NumIdentical: 8, NumRemoved: 12, NumInserted: 3},
|
||||||
// {NumIdentical: 63},
|
// {NumIdentical: 63},
|
||||||
// ]
|
// ]
|
||||||
//
|
|
||||||
func coalesceInterveningIdentical(groups []diffStats, windowSize int) []diffStats {
|
func coalesceInterveningIdentical(groups []diffStats, windowSize int) []diffStats {
|
||||||
groups, groupsOrig := groups[:0], groups
|
groups, groupsOrig := groups[:0], groups
|
||||||
for i, ds := range groupsOrig {
|
for i, ds := range groupsOrig {
|
||||||
|
@ -548,7 +550,6 @@ func coalesceInterveningIdentical(groups []diffStats, windowSize int) []diffStat
|
||||||
// {NumRemoved: 9},
|
// {NumRemoved: 9},
|
||||||
// {NumIdentical: 64}, // incremented by 10
|
// {NumIdentical: 64}, // incremented by 10
|
||||||
// ]
|
// ]
|
||||||
//
|
|
||||||
func cleanupSurroundingIdentical(groups []diffStats, eq func(i, j int) bool) []diffStats {
|
func cleanupSurroundingIdentical(groups []diffStats, eq func(i, j int) bool) []diffStats {
|
||||||
var ix, iy int // indexes into sequence x and y
|
var ix, iy int // indexes into sequence x and y
|
||||||
for i, ds := range groups {
|
for i, ds := range groups {
|
||||||
|
|
1
vendor/github.com/google/go-cmp/cmp/report_text.go
generated
vendored
1
vendor/github.com/google/go-cmp/cmp/report_text.go
generated
vendored
|
@ -393,6 +393,7 @@ func (s diffStats) Append(ds diffStats) diffStats {
|
||||||
// String prints a humanly-readable summary of coalesced records.
|
// String prints a humanly-readable summary of coalesced records.
|
||||||
//
|
//
|
||||||
// Example:
|
// Example:
|
||||||
|
//
|
||||||
// diffStats{Name: "Field", NumIgnored: 5}.String() => "5 ignored fields"
|
// diffStats{Name: "Field", NumIgnored: 5}.String() => "5 ignored fields"
|
||||||
func (s diffStats) String() string {
|
func (s diffStats) String() string {
|
||||||
var ss []string
|
var ss []string
|
||||||
|
|
7
vendor/github.com/urfave/cli/v2/flag_bool.go
generated
vendored
7
vendor/github.com/urfave/cli/v2/flag_bool.go
generated
vendored
|
@ -51,8 +51,13 @@ func (f *BoolFlag) Apply(set *flag.FlagSet) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
f.Value = valBool
|
f.Value = valBool
|
||||||
f.HasBeenSet = true
|
} else {
|
||||||
|
// empty value implies that the env is defined but set to empty string, we have to assume that this is
|
||||||
|
// what the user wants. If user doesnt want this then the env needs to be deleted or the flag removed from
|
||||||
|
// file
|
||||||
|
f.Value = false
|
||||||
}
|
}
|
||||||
|
f.HasBeenSet = true
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, name := range f.Names() {
|
for _, name := range f.Names() {
|
||||||
|
|
2
vendor/github.com/urfave/cli/v2/flag_generic.go
generated
vendored
2
vendor/github.com/urfave/cli/v2/flag_generic.go
generated
vendored
|
@ -50,7 +50,7 @@ func (f *GenericFlag) GetEnvVars() []string {
|
||||||
|
|
||||||
// Apply takes the flagset and calls Set on the generic flag with the value
|
// Apply takes the flagset and calls Set on the generic flag with the value
|
||||||
// provided by the user for parsing by the flag
|
// provided by the user for parsing by the flag
|
||||||
func (f GenericFlag) Apply(set *flag.FlagSet) error {
|
func (f *GenericFlag) Apply(set *flag.FlagSet) error {
|
||||||
if val, source, found := flagFromEnvOrFile(f.EnvVars, f.FilePath); found {
|
if val, source, found := flagFromEnvOrFile(f.EnvVars, f.FilePath); found {
|
||||||
if val != "" {
|
if val != "" {
|
||||||
if err := f.Value.Set(val); err != nil {
|
if err := f.Value.Set(val); err != nil {
|
||||||
|
|
26
vendor/github.com/urfave/cli/v2/godoc-current.txt
generated
vendored
26
vendor/github.com/urfave/cli/v2/godoc-current.txt
generated
vendored
|
@ -5,24 +5,24 @@ line Go applications. cli is designed to be easy to understand and write,
|
||||||
the most simple cli application can be written as follows:
|
the most simple cli application can be written as follows:
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
(&cli.App{}).Run(os.Args)
|
(&cli.App{}).Run(os.Args)
|
||||||
}
|
}
|
||||||
|
|
||||||
Of course this application does not do much, so let's make this an actual
|
Of course this application does not do much, so let's make this an actual
|
||||||
application:
|
application:
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
app := &cli.App{
|
app := &cli.App{
|
||||||
Name: "greet",
|
Name: "greet",
|
||||||
Usage: "say a greeting",
|
Usage: "say a greeting",
|
||||||
Action: func(c *cli.Context) error {
|
Action: func(c *cli.Context) error {
|
||||||
fmt.Println("Greetings")
|
fmt.Println("Greetings")
|
||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
app.Run(os.Args)
|
app.Run(os.Args)
|
||||||
}
|
}
|
||||||
|
|
||||||
VARIABLES
|
VARIABLES
|
||||||
|
|
||||||
|
@ -1073,7 +1073,7 @@ type GenericFlag struct {
|
||||||
}
|
}
|
||||||
GenericFlag is a flag with type Generic
|
GenericFlag is a flag with type Generic
|
||||||
|
|
||||||
func (f GenericFlag) Apply(set *flag.FlagSet) error
|
func (f *GenericFlag) Apply(set *flag.FlagSet) error
|
||||||
Apply takes the flagset and calls Set on the generic flag with the value
|
Apply takes the flagset and calls Set on the generic flag with the value
|
||||||
provided by the user for parsing by the flag
|
provided by the user for parsing by the flag
|
||||||
|
|
||||||
|
|
3
vendor/golang.org/x/net/http2/server.go
generated
vendored
3
vendor/golang.org/x/net/http2/server.go
generated
vendored
|
@ -1371,6 +1371,9 @@ func (sc *serverConn) startGracefulShutdownInternal() {
|
||||||
func (sc *serverConn) goAway(code ErrCode) {
|
func (sc *serverConn) goAway(code ErrCode) {
|
||||||
sc.serveG.check()
|
sc.serveG.check()
|
||||||
if sc.inGoAway {
|
if sc.inGoAway {
|
||||||
|
if sc.goAwayCode == ErrCodeNo {
|
||||||
|
sc.goAwayCode = code
|
||||||
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
sc.inGoAway = true
|
sc.inGoAway = true
|
||||||
|
|
2
vendor/golang.org/x/sys/unix/syscall_freebsd_386.go
generated
vendored
2
vendor/golang.org/x/sys/unix/syscall_freebsd_386.go
generated
vendored
|
@ -61,7 +61,7 @@ func PtraceGetFsBase(pid int, fsbase *int64) (err error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) {
|
func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) {
|
||||||
ioDesc := PtraceIoDesc{Op: int32(req), Offs: (*byte)(unsafe.Pointer(addr)), Addr: (*byte)(unsafe.Pointer(&out[0])), Len: uint32(countin)}
|
ioDesc := PtraceIoDesc{Op: int32(req), Offs: uintptr(unsafe.Pointer(addr)), Addr: uintptr(unsafe.Pointer(&out[0])), Len: uint32(countin)}
|
||||||
err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0)
|
err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0)
|
||||||
return int(ioDesc.Len), err
|
return int(ioDesc.Len), err
|
||||||
}
|
}
|
||||||
|
|
2
vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go
generated
vendored
2
vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go
generated
vendored
|
@ -61,7 +61,7 @@ func PtraceGetFsBase(pid int, fsbase *int64) (err error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) {
|
func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) {
|
||||||
ioDesc := PtraceIoDesc{Op: int32(req), Offs: (*byte)(unsafe.Pointer(addr)), Addr: (*byte)(unsafe.Pointer(&out[0])), Len: uint64(countin)}
|
ioDesc := PtraceIoDesc{Op: int32(req), Offs: uintptr(unsafe.Pointer(addr)), Addr: uintptr(unsafe.Pointer(&out[0])), Len: uint64(countin)}
|
||||||
err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0)
|
err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0)
|
||||||
return int(ioDesc.Len), err
|
return int(ioDesc.Len), err
|
||||||
}
|
}
|
||||||
|
|
2
vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go
generated
vendored
2
vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go
generated
vendored
|
@ -57,7 +57,7 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e
|
||||||
func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno)
|
func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno)
|
||||||
|
|
||||||
func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) {
|
func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) {
|
||||||
ioDesc := PtraceIoDesc{Op: int32(req), Offs: (*byte)(unsafe.Pointer(addr)), Addr: (*byte)(unsafe.Pointer(&out[0])), Len: uint32(countin)}
|
ioDesc := PtraceIoDesc{Op: int32(req), Offs: uintptr(unsafe.Pointer(addr)), Addr: uintptr(unsafe.Pointer(&out[0])), Len: uint32(countin)}
|
||||||
err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0)
|
err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0)
|
||||||
return int(ioDesc.Len), err
|
return int(ioDesc.Len), err
|
||||||
}
|
}
|
||||||
|
|
2
vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go
generated
vendored
2
vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go
generated
vendored
|
@ -57,7 +57,7 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e
|
||||||
func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno)
|
func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno)
|
||||||
|
|
||||||
func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) {
|
func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) {
|
||||||
ioDesc := PtraceIoDesc{Op: int32(req), Offs: (*byte)(unsafe.Pointer(addr)), Addr: (*byte)(unsafe.Pointer(&out[0])), Len: uint64(countin)}
|
ioDesc := PtraceIoDesc{Op: int32(req), Offs: uintptr(unsafe.Pointer(addr)), Addr: uintptr(unsafe.Pointer(&out[0])), Len: uint64(countin)}
|
||||||
err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0)
|
err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0)
|
||||||
return int(ioDesc.Len), err
|
return int(ioDesc.Len), err
|
||||||
}
|
}
|
||||||
|
|
2
vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go
generated
vendored
2
vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go
generated
vendored
|
@ -57,7 +57,7 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e
|
||||||
func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno)
|
func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno)
|
||||||
|
|
||||||
func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) {
|
func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) {
|
||||||
ioDesc := PtraceIoDesc{Op: int32(req), Offs: (*byte)(unsafe.Pointer(addr)), Addr: (*byte)(unsafe.Pointer(&out[0])), Len: uint64(countin)}
|
ioDesc := PtraceIoDesc{Op: int32(req), Offs: uintptr(unsafe.Pointer(addr)), Addr: uintptr(unsafe.Pointer(&out[0])), Len: uint64(countin)}
|
||||||
err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0)
|
err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0)
|
||||||
return int(ioDesc.Len), err
|
return int(ioDesc.Len), err
|
||||||
}
|
}
|
||||||
|
|
27
vendor/golang.org/x/sys/unix/syscall_linux.go
generated
vendored
27
vendor/golang.org/x/sys/unix/syscall_linux.go
generated
vendored
|
@ -1892,17 +1892,28 @@ func PrctlRetInt(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uint
|
||||||
return int(ret), nil
|
return int(ret), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// issue 1435.
|
|
||||||
// On linux Setuid and Setgid only affects the current thread, not the process.
|
|
||||||
// This does not match what most callers expect so we must return an error
|
|
||||||
// here rather than letting the caller think that the call succeeded.
|
|
||||||
|
|
||||||
func Setuid(uid int) (err error) {
|
func Setuid(uid int) (err error) {
|
||||||
return EOPNOTSUPP
|
return syscall.Setuid(uid)
|
||||||
}
|
}
|
||||||
|
|
||||||
func Setgid(uid int) (err error) {
|
func Setgid(gid int) (err error) {
|
||||||
return EOPNOTSUPP
|
return syscall.Setgid(gid)
|
||||||
|
}
|
||||||
|
|
||||||
|
func Setreuid(ruid, euid int) (err error) {
|
||||||
|
return syscall.Setreuid(ruid, euid)
|
||||||
|
}
|
||||||
|
|
||||||
|
func Setregid(rgid, egid int) (err error) {
|
||||||
|
return syscall.Setregid(rgid, egid)
|
||||||
|
}
|
||||||
|
|
||||||
|
func Setresuid(ruid, euid, suid int) (err error) {
|
||||||
|
return syscall.Setresuid(ruid, euid, suid)
|
||||||
|
}
|
||||||
|
|
||||||
|
func Setresgid(rgid, egid, sgid int) (err error) {
|
||||||
|
return syscall.Setresgid(rgid, egid, sgid)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetfsgidRetGid sets fsgid for current thread and returns previous fsgid set.
|
// SetfsgidRetGid sets fsgid for current thread and returns previous fsgid set.
|
||||||
|
|
4
vendor/golang.org/x/sys/unix/syscall_linux_386.go
generated
vendored
4
vendor/golang.org/x/sys/unix/syscall_linux_386.go
generated
vendored
|
@ -41,10 +41,6 @@ func setTimeval(sec, usec int64) Timeval {
|
||||||
//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) = SYS_SENDFILE64
|
//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) = SYS_SENDFILE64
|
||||||
//sys setfsgid(gid int) (prev int, err error) = SYS_SETFSGID32
|
//sys setfsgid(gid int) (prev int, err error) = SYS_SETFSGID32
|
||||||
//sys setfsuid(uid int) (prev int, err error) = SYS_SETFSUID32
|
//sys setfsuid(uid int) (prev int, err error) = SYS_SETFSUID32
|
||||||
//sysnb Setregid(rgid int, egid int) (err error) = SYS_SETREGID32
|
|
||||||
//sysnb Setresgid(rgid int, egid int, sgid int) (err error) = SYS_SETRESGID32
|
|
||||||
//sysnb Setresuid(ruid int, euid int, suid int) (err error) = SYS_SETRESUID32
|
|
||||||
//sysnb Setreuid(ruid int, euid int) (err error) = SYS_SETREUID32
|
|
||||||
//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error)
|
//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error)
|
||||||
//sys Stat(path string, stat *Stat_t) (err error) = SYS_STAT64
|
//sys Stat(path string, stat *Stat_t) (err error) = SYS_STAT64
|
||||||
//sys SyncFileRange(fd int, off int64, n int64, flags int) (err error)
|
//sys SyncFileRange(fd int, off int64, n int64, flags int) (err error)
|
||||||
|
|
4
vendor/golang.org/x/sys/unix/syscall_linux_amd64.go
generated
vendored
4
vendor/golang.org/x/sys/unix/syscall_linux_amd64.go
generated
vendored
|
@ -46,11 +46,7 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err
|
||||||
//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error)
|
//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error)
|
||||||
//sys setfsgid(gid int) (prev int, err error)
|
//sys setfsgid(gid int) (prev int, err error)
|
||||||
//sys setfsuid(uid int) (prev int, err error)
|
//sys setfsuid(uid int) (prev int, err error)
|
||||||
//sysnb Setregid(rgid int, egid int) (err error)
|
|
||||||
//sysnb Setresgid(rgid int, egid int, sgid int) (err error)
|
|
||||||
//sysnb Setresuid(ruid int, euid int, suid int) (err error)
|
|
||||||
//sysnb Setrlimit(resource int, rlim *Rlimit) (err error)
|
//sysnb Setrlimit(resource int, rlim *Rlimit) (err error)
|
||||||
//sysnb Setreuid(ruid int, euid int) (err error)
|
|
||||||
//sys Shutdown(fd int, how int) (err error)
|
//sys Shutdown(fd int, how int) (err error)
|
||||||
//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error)
|
//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error)
|
||||||
|
|
||||||
|
|
4
vendor/golang.org/x/sys/unix/syscall_linux_arm.go
generated
vendored
4
vendor/golang.org/x/sys/unix/syscall_linux_arm.go
generated
vendored
|
@ -62,10 +62,6 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) {
|
||||||
//sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) = SYS__NEWSELECT
|
//sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) = SYS__NEWSELECT
|
||||||
//sys setfsgid(gid int) (prev int, err error) = SYS_SETFSGID32
|
//sys setfsgid(gid int) (prev int, err error) = SYS_SETFSGID32
|
||||||
//sys setfsuid(uid int) (prev int, err error) = SYS_SETFSUID32
|
//sys setfsuid(uid int) (prev int, err error) = SYS_SETFSUID32
|
||||||
//sysnb Setregid(rgid int, egid int) (err error) = SYS_SETREGID32
|
|
||||||
//sysnb Setresgid(rgid int, egid int, sgid int) (err error) = SYS_SETRESGID32
|
|
||||||
//sysnb Setresuid(ruid int, euid int, suid int) (err error) = SYS_SETRESUID32
|
|
||||||
//sysnb Setreuid(ruid int, euid int) (err error) = SYS_SETREUID32
|
|
||||||
//sys Shutdown(fd int, how int) (err error)
|
//sys Shutdown(fd int, how int) (err error)
|
||||||
//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error)
|
//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error)
|
||||||
//sys Stat(path string, stat *Stat_t) (err error) = SYS_STAT64
|
//sys Stat(path string, stat *Stat_t) (err error) = SYS_STAT64
|
||||||
|
|
4
vendor/golang.org/x/sys/unix/syscall_linux_arm64.go
generated
vendored
4
vendor/golang.org/x/sys/unix/syscall_linux_arm64.go
generated
vendored
|
@ -39,11 +39,7 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err
|
||||||
//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error)
|
//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error)
|
||||||
//sys setfsgid(gid int) (prev int, err error)
|
//sys setfsgid(gid int) (prev int, err error)
|
||||||
//sys setfsuid(uid int) (prev int, err error)
|
//sys setfsuid(uid int) (prev int, err error)
|
||||||
//sysnb Setregid(rgid int, egid int) (err error)
|
|
||||||
//sysnb Setresgid(rgid int, egid int, sgid int) (err error)
|
|
||||||
//sysnb Setresuid(ruid int, euid int, suid int) (err error)
|
|
||||||
//sysnb setrlimit(resource int, rlim *Rlimit) (err error)
|
//sysnb setrlimit(resource int, rlim *Rlimit) (err error)
|
||||||
//sysnb Setreuid(ruid int, euid int) (err error)
|
|
||||||
//sys Shutdown(fd int, how int) (err error)
|
//sys Shutdown(fd int, how int) (err error)
|
||||||
//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error)
|
//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error)
|
||||||
|
|
||||||
|
|
4
vendor/golang.org/x/sys/unix/syscall_linux_loong64.go
generated
vendored
4
vendor/golang.org/x/sys/unix/syscall_linux_loong64.go
generated
vendored
|
@ -34,10 +34,6 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err
|
||||||
//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error)
|
//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error)
|
||||||
//sys setfsgid(gid int) (prev int, err error)
|
//sys setfsgid(gid int) (prev int, err error)
|
||||||
//sys setfsuid(uid int) (prev int, err error)
|
//sys setfsuid(uid int) (prev int, err error)
|
||||||
//sysnb Setregid(rgid int, egid int) (err error)
|
|
||||||
//sysnb Setresgid(rgid int, egid int, sgid int) (err error)
|
|
||||||
//sysnb Setresuid(ruid int, euid int, suid int) (err error)
|
|
||||||
//sysnb Setreuid(ruid int, euid int) (err error)
|
|
||||||
//sys Shutdown(fd int, how int) (err error)
|
//sys Shutdown(fd int, how int) (err error)
|
||||||
//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error)
|
//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error)
|
||||||
|
|
||||||
|
|
4
vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go
generated
vendored
4
vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go
generated
vendored
|
@ -37,11 +37,7 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err
|
||||||
//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error)
|
//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error)
|
||||||
//sys setfsgid(gid int) (prev int, err error)
|
//sys setfsgid(gid int) (prev int, err error)
|
||||||
//sys setfsuid(uid int) (prev int, err error)
|
//sys setfsuid(uid int) (prev int, err error)
|
||||||
//sysnb Setregid(rgid int, egid int) (err error)
|
|
||||||
//sysnb Setresgid(rgid int, egid int, sgid int) (err error)
|
|
||||||
//sysnb Setresuid(ruid int, euid int, suid int) (err error)
|
|
||||||
//sysnb Setrlimit(resource int, rlim *Rlimit) (err error)
|
//sysnb Setrlimit(resource int, rlim *Rlimit) (err error)
|
||||||
//sysnb Setreuid(ruid int, euid int) (err error)
|
|
||||||
//sys Shutdown(fd int, how int) (err error)
|
//sys Shutdown(fd int, how int) (err error)
|
||||||
//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error)
|
//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error)
|
||||||
//sys Statfs(path string, buf *Statfs_t) (err error)
|
//sys Statfs(path string, buf *Statfs_t) (err error)
|
||||||
|
|
4
vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go
generated
vendored
4
vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go
generated
vendored
|
@ -32,10 +32,6 @@ func Syscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr,
|
||||||
//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) = SYS_SENDFILE64
|
//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) = SYS_SENDFILE64
|
||||||
//sys setfsgid(gid int) (prev int, err error)
|
//sys setfsgid(gid int) (prev int, err error)
|
||||||
//sys setfsuid(uid int) (prev int, err error)
|
//sys setfsuid(uid int) (prev int, err error)
|
||||||
//sysnb Setregid(rgid int, egid int) (err error)
|
|
||||||
//sysnb Setresgid(rgid int, egid int, sgid int) (err error)
|
|
||||||
//sysnb Setresuid(ruid int, euid int, suid int) (err error)
|
|
||||||
//sysnb Setreuid(ruid int, euid int) (err error)
|
|
||||||
//sys Shutdown(fd int, how int) (err error)
|
//sys Shutdown(fd int, how int) (err error)
|
||||||
//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error)
|
//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error)
|
||||||
//sys SyncFileRange(fd int, off int64, n int64, flags int) (err error)
|
//sys SyncFileRange(fd int, off int64, n int64, flags int) (err error)
|
||||||
|
|
4
vendor/golang.org/x/sys/unix/syscall_linux_ppc.go
generated
vendored
4
vendor/golang.org/x/sys/unix/syscall_linux_ppc.go
generated
vendored
|
@ -34,10 +34,6 @@ import (
|
||||||
//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) = SYS_SENDFILE64
|
//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) = SYS_SENDFILE64
|
||||||
//sys setfsgid(gid int) (prev int, err error)
|
//sys setfsgid(gid int) (prev int, err error)
|
||||||
//sys setfsuid(uid int) (prev int, err error)
|
//sys setfsuid(uid int) (prev int, err error)
|
||||||
//sysnb Setregid(rgid int, egid int) (err error)
|
|
||||||
//sysnb Setresgid(rgid int, egid int, sgid int) (err error)
|
|
||||||
//sysnb Setresuid(ruid int, euid int, suid int) (err error)
|
|
||||||
//sysnb Setreuid(ruid int, euid int) (err error)
|
|
||||||
//sys Shutdown(fd int, how int) (err error)
|
//sys Shutdown(fd int, how int) (err error)
|
||||||
//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error)
|
//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error)
|
||||||
//sys Stat(path string, stat *Stat_t) (err error) = SYS_STAT64
|
//sys Stat(path string, stat *Stat_t) (err error) = SYS_STAT64
|
||||||
|
|
4
vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go
generated
vendored
4
vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go
generated
vendored
|
@ -34,11 +34,7 @@ package unix
|
||||||
//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error)
|
//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error)
|
||||||
//sys setfsgid(gid int) (prev int, err error)
|
//sys setfsgid(gid int) (prev int, err error)
|
||||||
//sys setfsuid(uid int) (prev int, err error)
|
//sys setfsuid(uid int) (prev int, err error)
|
||||||
//sysnb Setregid(rgid int, egid int) (err error)
|
|
||||||
//sysnb Setresgid(rgid int, egid int, sgid int) (err error)
|
|
||||||
//sysnb Setresuid(ruid int, euid int, suid int) (err error)
|
|
||||||
//sysnb Setrlimit(resource int, rlim *Rlimit) (err error)
|
//sysnb Setrlimit(resource int, rlim *Rlimit) (err error)
|
||||||
//sysnb Setreuid(ruid int, euid int) (err error)
|
|
||||||
//sys Shutdown(fd int, how int) (err error)
|
//sys Shutdown(fd int, how int) (err error)
|
||||||
//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error)
|
//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error)
|
||||||
//sys Stat(path string, stat *Stat_t) (err error)
|
//sys Stat(path string, stat *Stat_t) (err error)
|
||||||
|
|
4
vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go
generated
vendored
4
vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go
generated
vendored
|
@ -38,11 +38,7 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err
|
||||||
//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error)
|
//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error)
|
||||||
//sys setfsgid(gid int) (prev int, err error)
|
//sys setfsgid(gid int) (prev int, err error)
|
||||||
//sys setfsuid(uid int) (prev int, err error)
|
//sys setfsuid(uid int) (prev int, err error)
|
||||||
//sysnb Setregid(rgid int, egid int) (err error)
|
|
||||||
//sysnb Setresgid(rgid int, egid int, sgid int) (err error)
|
|
||||||
//sysnb Setresuid(ruid int, euid int, suid int) (err error)
|
|
||||||
//sysnb Setrlimit(resource int, rlim *Rlimit) (err error)
|
//sysnb Setrlimit(resource int, rlim *Rlimit) (err error)
|
||||||
//sysnb Setreuid(ruid int, euid int) (err error)
|
|
||||||
//sys Shutdown(fd int, how int) (err error)
|
//sys Shutdown(fd int, how int) (err error)
|
||||||
//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error)
|
//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error)
|
||||||
|
|
||||||
|
|
4
vendor/golang.org/x/sys/unix/syscall_linux_s390x.go
generated
vendored
4
vendor/golang.org/x/sys/unix/syscall_linux_s390x.go
generated
vendored
|
@ -34,11 +34,7 @@ import (
|
||||||
//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error)
|
//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error)
|
||||||
//sys setfsgid(gid int) (prev int, err error)
|
//sys setfsgid(gid int) (prev int, err error)
|
||||||
//sys setfsuid(uid int) (prev int, err error)
|
//sys setfsuid(uid int) (prev int, err error)
|
||||||
//sysnb Setregid(rgid int, egid int) (err error)
|
|
||||||
//sysnb Setresgid(rgid int, egid int, sgid int) (err error)
|
|
||||||
//sysnb Setresuid(ruid int, euid int, suid int) (err error)
|
|
||||||
//sysnb Setrlimit(resource int, rlim *Rlimit) (err error)
|
//sysnb Setrlimit(resource int, rlim *Rlimit) (err error)
|
||||||
//sysnb Setreuid(ruid int, euid int) (err error)
|
|
||||||
//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error)
|
//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error)
|
||||||
//sys Stat(path string, stat *Stat_t) (err error)
|
//sys Stat(path string, stat *Stat_t) (err error)
|
||||||
//sys Statfs(path string, buf *Statfs_t) (err error)
|
//sys Statfs(path string, buf *Statfs_t) (err error)
|
||||||
|
|
4
vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go
generated
vendored
4
vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go
generated
vendored
|
@ -31,11 +31,7 @@ package unix
|
||||||
//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error)
|
//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error)
|
||||||
//sys setfsgid(gid int) (prev int, err error)
|
//sys setfsgid(gid int) (prev int, err error)
|
||||||
//sys setfsuid(uid int) (prev int, err error)
|
//sys setfsuid(uid int) (prev int, err error)
|
||||||
//sysnb Setregid(rgid int, egid int) (err error)
|
|
||||||
//sysnb Setresgid(rgid int, egid int, sgid int) (err error)
|
|
||||||
//sysnb Setresuid(ruid int, euid int, suid int) (err error)
|
|
||||||
//sysnb Setrlimit(resource int, rlim *Rlimit) (err error)
|
//sysnb Setrlimit(resource int, rlim *Rlimit) (err error)
|
||||||
//sysnb Setreuid(ruid int, euid int) (err error)
|
|
||||||
//sys Shutdown(fd int, how int) (err error)
|
//sys Shutdown(fd int, how int) (err error)
|
||||||
//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error)
|
//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error)
|
||||||
//sys Stat(path string, stat *Stat_t) (err error)
|
//sys Stat(path string, stat *Stat_t) (err error)
|
||||||
|
|
40
vendor/golang.org/x/sys/unix/zsyscall_linux_386.go
generated
vendored
40
vendor/golang.org/x/sys/unix/zsyscall_linux_386.go
generated
vendored
|
@ -287,46 +287,6 @@ func setfsuid(uid int) (prev int, err error) {
|
||||||
|
|
||||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||||
|
|
||||||
func Setregid(rgid int, egid int) (err error) {
|
|
||||||
_, _, e1 := RawSyscall(SYS_SETREGID32, uintptr(rgid), uintptr(egid), 0)
|
|
||||||
if e1 != 0 {
|
|
||||||
err = errnoErr(e1)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
|
||||||
|
|
||||||
func Setresgid(rgid int, egid int, sgid int) (err error) {
|
|
||||||
_, _, e1 := RawSyscall(SYS_SETRESGID32, uintptr(rgid), uintptr(egid), uintptr(sgid))
|
|
||||||
if e1 != 0 {
|
|
||||||
err = errnoErr(e1)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
|
||||||
|
|
||||||
func Setresuid(ruid int, euid int, suid int) (err error) {
|
|
||||||
_, _, e1 := RawSyscall(SYS_SETRESUID32, uintptr(ruid), uintptr(euid), uintptr(suid))
|
|
||||||
if e1 != 0 {
|
|
||||||
err = errnoErr(e1)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
|
||||||
|
|
||||||
func Setreuid(ruid int, euid int) (err error) {
|
|
||||||
_, _, e1 := RawSyscall(SYS_SETREUID32, uintptr(ruid), uintptr(euid), 0)
|
|
||||||
if e1 != 0 {
|
|
||||||
err = errnoErr(e1)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
|
||||||
|
|
||||||
func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) {
|
func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) {
|
||||||
r0, _, e1 := Syscall6(SYS_SPLICE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags))
|
r0, _, e1 := Syscall6(SYS_SPLICE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags))
|
||||||
n = int(r0)
|
n = int(r0)
|
||||||
|
|
40
vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go
generated
vendored
40
vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go
generated
vendored
|
@ -334,36 +334,6 @@ func setfsuid(uid int) (prev int, err error) {
|
||||||
|
|
||||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||||
|
|
||||||
func Setregid(rgid int, egid int) (err error) {
|
|
||||||
_, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0)
|
|
||||||
if e1 != 0 {
|
|
||||||
err = errnoErr(e1)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
|
||||||
|
|
||||||
func Setresgid(rgid int, egid int, sgid int) (err error) {
|
|
||||||
_, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid))
|
|
||||||
if e1 != 0 {
|
|
||||||
err = errnoErr(e1)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
|
||||||
|
|
||||||
func Setresuid(ruid int, euid int, suid int) (err error) {
|
|
||||||
_, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid))
|
|
||||||
if e1 != 0 {
|
|
||||||
err = errnoErr(e1)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
|
||||||
|
|
||||||
func Setrlimit(resource int, rlim *Rlimit) (err error) {
|
func Setrlimit(resource int, rlim *Rlimit) (err error) {
|
||||||
_, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0)
|
_, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0)
|
||||||
if e1 != 0 {
|
if e1 != 0 {
|
||||||
|
@ -374,16 +344,6 @@ func Setrlimit(resource int, rlim *Rlimit) (err error) {
|
||||||
|
|
||||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||||
|
|
||||||
func Setreuid(ruid int, euid int) (err error) {
|
|
||||||
_, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0)
|
|
||||||
if e1 != 0 {
|
|
||||||
err = errnoErr(e1)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
|
||||||
|
|
||||||
func Shutdown(fd int, how int) (err error) {
|
func Shutdown(fd int, how int) (err error) {
|
||||||
_, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0)
|
_, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0)
|
||||||
if e1 != 0 {
|
if e1 != 0 {
|
||||||
|
|
40
vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go
generated
vendored
40
vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go
generated
vendored
|
@ -412,46 +412,6 @@ func setfsuid(uid int) (prev int, err error) {
|
||||||
|
|
||||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||||
|
|
||||||
func Setregid(rgid int, egid int) (err error) {
|
|
||||||
_, _, e1 := RawSyscall(SYS_SETREGID32, uintptr(rgid), uintptr(egid), 0)
|
|
||||||
if e1 != 0 {
|
|
||||||
err = errnoErr(e1)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
|
||||||
|
|
||||||
func Setresgid(rgid int, egid int, sgid int) (err error) {
|
|
||||||
_, _, e1 := RawSyscall(SYS_SETRESGID32, uintptr(rgid), uintptr(egid), uintptr(sgid))
|
|
||||||
if e1 != 0 {
|
|
||||||
err = errnoErr(e1)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
|
||||||
|
|
||||||
func Setresuid(ruid int, euid int, suid int) (err error) {
|
|
||||||
_, _, e1 := RawSyscall(SYS_SETRESUID32, uintptr(ruid), uintptr(euid), uintptr(suid))
|
|
||||||
if e1 != 0 {
|
|
||||||
err = errnoErr(e1)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
|
||||||
|
|
||||||
func Setreuid(ruid int, euid int) (err error) {
|
|
||||||
_, _, e1 := RawSyscall(SYS_SETREUID32, uintptr(ruid), uintptr(euid), 0)
|
|
||||||
if e1 != 0 {
|
|
||||||
err = errnoErr(e1)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
|
||||||
|
|
||||||
func Shutdown(fd int, how int) (err error) {
|
func Shutdown(fd int, how int) (err error) {
|
||||||
_, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0)
|
_, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0)
|
||||||
if e1 != 0 {
|
if e1 != 0 {
|
||||||
|
|
40
vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go
generated
vendored
40
vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go
generated
vendored
|
@ -289,36 +289,6 @@ func setfsuid(uid int) (prev int, err error) {
|
||||||
|
|
||||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||||
|
|
||||||
func Setregid(rgid int, egid int) (err error) {
|
|
||||||
_, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0)
|
|
||||||
if e1 != 0 {
|
|
||||||
err = errnoErr(e1)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
|
||||||
|
|
||||||
func Setresgid(rgid int, egid int, sgid int) (err error) {
|
|
||||||
_, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid))
|
|
||||||
if e1 != 0 {
|
|
||||||
err = errnoErr(e1)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
|
||||||
|
|
||||||
func Setresuid(ruid int, euid int, suid int) (err error) {
|
|
||||||
_, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid))
|
|
||||||
if e1 != 0 {
|
|
||||||
err = errnoErr(e1)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
|
||||||
|
|
||||||
func setrlimit(resource int, rlim *Rlimit) (err error) {
|
func setrlimit(resource int, rlim *Rlimit) (err error) {
|
||||||
_, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0)
|
_, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0)
|
||||||
if e1 != 0 {
|
if e1 != 0 {
|
||||||
|
@ -329,16 +299,6 @@ func setrlimit(resource int, rlim *Rlimit) (err error) {
|
||||||
|
|
||||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||||
|
|
||||||
func Setreuid(ruid int, euid int) (err error) {
|
|
||||||
_, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0)
|
|
||||||
if e1 != 0 {
|
|
||||||
err = errnoErr(e1)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
|
||||||
|
|
||||||
func Shutdown(fd int, how int) (err error) {
|
func Shutdown(fd int, how int) (err error) {
|
||||||
_, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0)
|
_, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0)
|
||||||
if e1 != 0 {
|
if e1 != 0 {
|
||||||
|
|
40
vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go
generated
vendored
40
vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go
generated
vendored
|
@ -223,46 +223,6 @@ func setfsuid(uid int) (prev int, err error) {
|
||||||
|
|
||||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||||
|
|
||||||
func Setregid(rgid int, egid int) (err error) {
|
|
||||||
_, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0)
|
|
||||||
if e1 != 0 {
|
|
||||||
err = errnoErr(e1)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
|
||||||
|
|
||||||
func Setresgid(rgid int, egid int, sgid int) (err error) {
|
|
||||||
_, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid))
|
|
||||||
if e1 != 0 {
|
|
||||||
err = errnoErr(e1)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
|
||||||
|
|
||||||
func Setresuid(ruid int, euid int, suid int) (err error) {
|
|
||||||
_, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid))
|
|
||||||
if e1 != 0 {
|
|
||||||
err = errnoErr(e1)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
|
||||||
|
|
||||||
func Setreuid(ruid int, euid int) (err error) {
|
|
||||||
_, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0)
|
|
||||||
if e1 != 0 {
|
|
||||||
err = errnoErr(e1)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
|
||||||
|
|
||||||
func Shutdown(fd int, how int) (err error) {
|
func Shutdown(fd int, how int) (err error) {
|
||||||
_, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0)
|
_, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0)
|
||||||
if e1 != 0 {
|
if e1 != 0 {
|
||||||
|
|
40
vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go
generated
vendored
40
vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go
generated
vendored
|
@ -248,46 +248,6 @@ func setfsuid(uid int) (prev int, err error) {
|
||||||
|
|
||||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||||
|
|
||||||
func Setregid(rgid int, egid int) (err error) {
|
|
||||||
_, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0)
|
|
||||||
if e1 != 0 {
|
|
||||||
err = errnoErr(e1)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
|
||||||
|
|
||||||
func Setresgid(rgid int, egid int, sgid int) (err error) {
|
|
||||||
_, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid))
|
|
||||||
if e1 != 0 {
|
|
||||||
err = errnoErr(e1)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
|
||||||
|
|
||||||
func Setresuid(ruid int, euid int, suid int) (err error) {
|
|
||||||
_, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid))
|
|
||||||
if e1 != 0 {
|
|
||||||
err = errnoErr(e1)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
|
||||||
|
|
||||||
func Setreuid(ruid int, euid int) (err error) {
|
|
||||||
_, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0)
|
|
||||||
if e1 != 0 {
|
|
||||||
err = errnoErr(e1)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
|
||||||
|
|
||||||
func Shutdown(fd int, how int) (err error) {
|
func Shutdown(fd int, how int) (err error) {
|
||||||
_, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0)
|
_, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0)
|
||||||
if e1 != 0 {
|
if e1 != 0 {
|
||||||
|
|
40
vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go
generated
vendored
40
vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go
generated
vendored
|
@ -278,36 +278,6 @@ func setfsuid(uid int) (prev int, err error) {
|
||||||
|
|
||||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||||
|
|
||||||
func Setregid(rgid int, egid int) (err error) {
|
|
||||||
_, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0)
|
|
||||||
if e1 != 0 {
|
|
||||||
err = errnoErr(e1)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
|
||||||
|
|
||||||
func Setresgid(rgid int, egid int, sgid int) (err error) {
|
|
||||||
_, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid))
|
|
||||||
if e1 != 0 {
|
|
||||||
err = errnoErr(e1)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
|
||||||
|
|
||||||
func Setresuid(ruid int, euid int, suid int) (err error) {
|
|
||||||
_, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid))
|
|
||||||
if e1 != 0 {
|
|
||||||
err = errnoErr(e1)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
|
||||||
|
|
||||||
func Setrlimit(resource int, rlim *Rlimit) (err error) {
|
func Setrlimit(resource int, rlim *Rlimit) (err error) {
|
||||||
_, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0)
|
_, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0)
|
||||||
if e1 != 0 {
|
if e1 != 0 {
|
||||||
|
@ -318,16 +288,6 @@ func Setrlimit(resource int, rlim *Rlimit) (err error) {
|
||||||
|
|
||||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||||
|
|
||||||
func Setreuid(ruid int, euid int) (err error) {
|
|
||||||
_, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0)
|
|
||||||
if e1 != 0 {
|
|
||||||
err = errnoErr(e1)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
|
||||||
|
|
||||||
func Shutdown(fd int, how int) (err error) {
|
func Shutdown(fd int, how int) (err error) {
|
||||||
_, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0)
|
_, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0)
|
||||||
if e1 != 0 {
|
if e1 != 0 {
|
||||||
|
|
40
vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go
generated
vendored
40
vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go
generated
vendored
|
@ -278,36 +278,6 @@ func setfsuid(uid int) (prev int, err error) {
|
||||||
|
|
||||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||||
|
|
||||||
func Setregid(rgid int, egid int) (err error) {
|
|
||||||
_, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0)
|
|
||||||
if e1 != 0 {
|
|
||||||
err = errnoErr(e1)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
|
||||||
|
|
||||||
func Setresgid(rgid int, egid int, sgid int) (err error) {
|
|
||||||
_, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid))
|
|
||||||
if e1 != 0 {
|
|
||||||
err = errnoErr(e1)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
|
||||||
|
|
||||||
func Setresuid(ruid int, euid int, suid int) (err error) {
|
|
||||||
_, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid))
|
|
||||||
if e1 != 0 {
|
|
||||||
err = errnoErr(e1)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
|
||||||
|
|
||||||
func Setrlimit(resource int, rlim *Rlimit) (err error) {
|
func Setrlimit(resource int, rlim *Rlimit) (err error) {
|
||||||
_, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0)
|
_, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0)
|
||||||
if e1 != 0 {
|
if e1 != 0 {
|
||||||
|
@ -318,16 +288,6 @@ func Setrlimit(resource int, rlim *Rlimit) (err error) {
|
||||||
|
|
||||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||||
|
|
||||||
func Setreuid(ruid int, euid int) (err error) {
|
|
||||||
_, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0)
|
|
||||||
if e1 != 0 {
|
|
||||||
err = errnoErr(e1)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
|
||||||
|
|
||||||
func Shutdown(fd int, how int) (err error) {
|
func Shutdown(fd int, how int) (err error) {
|
||||||
_, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0)
|
_, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0)
|
||||||
if e1 != 0 {
|
if e1 != 0 {
|
||||||
|
|
40
vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go
generated
vendored
40
vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go
generated
vendored
|
@ -248,46 +248,6 @@ func setfsuid(uid int) (prev int, err error) {
|
||||||
|
|
||||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||||
|
|
||||||
func Setregid(rgid int, egid int) (err error) {
|
|
||||||
_, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0)
|
|
||||||
if e1 != 0 {
|
|
||||||
err = errnoErr(e1)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
|
||||||
|
|
||||||
func Setresgid(rgid int, egid int, sgid int) (err error) {
|
|
||||||
_, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid))
|
|
||||||
if e1 != 0 {
|
|
||||||
err = errnoErr(e1)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
|
||||||
|
|
||||||
func Setresuid(ruid int, euid int, suid int) (err error) {
|
|
||||||
_, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid))
|
|
||||||
if e1 != 0 {
|
|
||||||
err = errnoErr(e1)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
|
||||||
|
|
||||||
func Setreuid(ruid int, euid int) (err error) {
|
|
||||||
_, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0)
|
|
||||||
if e1 != 0 {
|
|
||||||
err = errnoErr(e1)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
|
||||||
|
|
||||||
func Shutdown(fd int, how int) (err error) {
|
func Shutdown(fd int, how int) (err error) {
|
||||||
_, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0)
|
_, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0)
|
||||||
if e1 != 0 {
|
if e1 != 0 {
|
||||||
|
|
40
vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go
generated
vendored
40
vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go
generated
vendored
|
@ -308,46 +308,6 @@ func setfsuid(uid int) (prev int, err error) {
|
||||||
|
|
||||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||||
|
|
||||||
func Setregid(rgid int, egid int) (err error) {
|
|
||||||
_, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0)
|
|
||||||
if e1 != 0 {
|
|
||||||
err = errnoErr(e1)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
|
||||||
|
|
||||||
func Setresgid(rgid int, egid int, sgid int) (err error) {
|
|
||||||
_, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid))
|
|
||||||
if e1 != 0 {
|
|
||||||
err = errnoErr(e1)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
|
||||||
|
|
||||||
func Setresuid(ruid int, euid int, suid int) (err error) {
|
|
||||||
_, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid))
|
|
||||||
if e1 != 0 {
|
|
||||||
err = errnoErr(e1)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
|
||||||
|
|
||||||
func Setreuid(ruid int, euid int) (err error) {
|
|
||||||
_, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0)
|
|
||||||
if e1 != 0 {
|
|
||||||
err = errnoErr(e1)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
|
||||||
|
|
||||||
func Shutdown(fd int, how int) (err error) {
|
func Shutdown(fd int, how int) (err error) {
|
||||||
_, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0)
|
_, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0)
|
||||||
if e1 != 0 {
|
if e1 != 0 {
|
||||||
|
|
40
vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go
generated
vendored
40
vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go
generated
vendored
|
@ -349,36 +349,6 @@ func setfsuid(uid int) (prev int, err error) {
|
||||||
|
|
||||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||||
|
|
||||||
func Setregid(rgid int, egid int) (err error) {
|
|
||||||
_, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0)
|
|
||||||
if e1 != 0 {
|
|
||||||
err = errnoErr(e1)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
|
||||||
|
|
||||||
func Setresgid(rgid int, egid int, sgid int) (err error) {
|
|
||||||
_, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid))
|
|
||||||
if e1 != 0 {
|
|
||||||
err = errnoErr(e1)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
|
||||||
|
|
||||||
func Setresuid(ruid int, euid int, suid int) (err error) {
|
|
||||||
_, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid))
|
|
||||||
if e1 != 0 {
|
|
||||||
err = errnoErr(e1)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
|
||||||
|
|
||||||
func Setrlimit(resource int, rlim *Rlimit) (err error) {
|
func Setrlimit(resource int, rlim *Rlimit) (err error) {
|
||||||
_, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0)
|
_, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0)
|
||||||
if e1 != 0 {
|
if e1 != 0 {
|
||||||
|
@ -389,16 +359,6 @@ func Setrlimit(resource int, rlim *Rlimit) (err error) {
|
||||||
|
|
||||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||||
|
|
||||||
func Setreuid(ruid int, euid int) (err error) {
|
|
||||||
_, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0)
|
|
||||||
if e1 != 0 {
|
|
||||||
err = errnoErr(e1)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
|
||||||
|
|
||||||
func Shutdown(fd int, how int) (err error) {
|
func Shutdown(fd int, how int) (err error) {
|
||||||
_, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0)
|
_, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0)
|
||||||
if e1 != 0 {
|
if e1 != 0 {
|
||||||
|
|
40
vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go
generated
vendored
40
vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go
generated
vendored
|
@ -349,36 +349,6 @@ func setfsuid(uid int) (prev int, err error) {
|
||||||
|
|
||||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||||
|
|
||||||
func Setregid(rgid int, egid int) (err error) {
|
|
||||||
_, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0)
|
|
||||||
if e1 != 0 {
|
|
||||||
err = errnoErr(e1)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
|
||||||
|
|
||||||
func Setresgid(rgid int, egid int, sgid int) (err error) {
|
|
||||||
_, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid))
|
|
||||||
if e1 != 0 {
|
|
||||||
err = errnoErr(e1)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
|
||||||
|
|
||||||
func Setresuid(ruid int, euid int, suid int) (err error) {
|
|
||||||
_, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid))
|
|
||||||
if e1 != 0 {
|
|
||||||
err = errnoErr(e1)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
|
||||||
|
|
||||||
func Setrlimit(resource int, rlim *Rlimit) (err error) {
|
func Setrlimit(resource int, rlim *Rlimit) (err error) {
|
||||||
_, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0)
|
_, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0)
|
||||||
if e1 != 0 {
|
if e1 != 0 {
|
||||||
|
@ -389,16 +359,6 @@ func Setrlimit(resource int, rlim *Rlimit) (err error) {
|
||||||
|
|
||||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||||
|
|
||||||
func Setreuid(ruid int, euid int) (err error) {
|
|
||||||
_, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0)
|
|
||||||
if e1 != 0 {
|
|
||||||
err = errnoErr(e1)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
|
||||||
|
|
||||||
func Shutdown(fd int, how int) (err error) {
|
func Shutdown(fd int, how int) (err error) {
|
||||||
_, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0)
|
_, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0)
|
||||||
if e1 != 0 {
|
if e1 != 0 {
|
||||||
|
|
40
vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go
generated
vendored
40
vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go
generated
vendored
|
@ -269,36 +269,6 @@ func setfsuid(uid int) (prev int, err error) {
|
||||||
|
|
||||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||||
|
|
||||||
func Setregid(rgid int, egid int) (err error) {
|
|
||||||
_, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0)
|
|
||||||
if e1 != 0 {
|
|
||||||
err = errnoErr(e1)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
|
||||||
|
|
||||||
func Setresgid(rgid int, egid int, sgid int) (err error) {
|
|
||||||
_, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid))
|
|
||||||
if e1 != 0 {
|
|
||||||
err = errnoErr(e1)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
|
||||||
|
|
||||||
func Setresuid(ruid int, euid int, suid int) (err error) {
|
|
||||||
_, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid))
|
|
||||||
if e1 != 0 {
|
|
||||||
err = errnoErr(e1)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
|
||||||
|
|
||||||
func Setrlimit(resource int, rlim *Rlimit) (err error) {
|
func Setrlimit(resource int, rlim *Rlimit) (err error) {
|
||||||
_, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0)
|
_, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0)
|
||||||
if e1 != 0 {
|
if e1 != 0 {
|
||||||
|
@ -309,16 +279,6 @@ func Setrlimit(resource int, rlim *Rlimit) (err error) {
|
||||||
|
|
||||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||||
|
|
||||||
func Setreuid(ruid int, euid int) (err error) {
|
|
||||||
_, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0)
|
|
||||||
if e1 != 0 {
|
|
||||||
err = errnoErr(e1)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
|
||||||
|
|
||||||
func Shutdown(fd int, how int) (err error) {
|
func Shutdown(fd int, how int) (err error) {
|
||||||
_, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0)
|
_, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0)
|
||||||
if e1 != 0 {
|
if e1 != 0 {
|
||||||
|
|
40
vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go
generated
vendored
40
vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go
generated
vendored
|
@ -319,36 +319,6 @@ func setfsuid(uid int) (prev int, err error) {
|
||||||
|
|
||||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||||
|
|
||||||
func Setregid(rgid int, egid int) (err error) {
|
|
||||||
_, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0)
|
|
||||||
if e1 != 0 {
|
|
||||||
err = errnoErr(e1)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
|
||||||
|
|
||||||
func Setresgid(rgid int, egid int, sgid int) (err error) {
|
|
||||||
_, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid))
|
|
||||||
if e1 != 0 {
|
|
||||||
err = errnoErr(e1)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
|
||||||
|
|
||||||
func Setresuid(ruid int, euid int, suid int) (err error) {
|
|
||||||
_, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid))
|
|
||||||
if e1 != 0 {
|
|
||||||
err = errnoErr(e1)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
|
||||||
|
|
||||||
func Setrlimit(resource int, rlim *Rlimit) (err error) {
|
func Setrlimit(resource int, rlim *Rlimit) (err error) {
|
||||||
_, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0)
|
_, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0)
|
||||||
if e1 != 0 {
|
if e1 != 0 {
|
||||||
|
@ -359,16 +329,6 @@ func Setrlimit(resource int, rlim *Rlimit) (err error) {
|
||||||
|
|
||||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||||
|
|
||||||
func Setreuid(ruid int, euid int) (err error) {
|
|
||||||
_, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0)
|
|
||||||
if e1 != 0 {
|
|
||||||
err = errnoErr(e1)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
|
||||||
|
|
||||||
func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) {
|
func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) {
|
||||||
r0, _, e1 := Syscall6(SYS_SPLICE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags))
|
r0, _, e1 := Syscall6(SYS_SPLICE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags))
|
||||||
n = int64(r0)
|
n = int64(r0)
|
||||||
|
|
40
vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go
generated
vendored
40
vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go
generated
vendored
|
@ -329,36 +329,6 @@ func setfsuid(uid int) (prev int, err error) {
|
||||||
|
|
||||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||||
|
|
||||||
func Setregid(rgid int, egid int) (err error) {
|
|
||||||
_, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0)
|
|
||||||
if e1 != 0 {
|
|
||||||
err = errnoErr(e1)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
|
||||||
|
|
||||||
func Setresgid(rgid int, egid int, sgid int) (err error) {
|
|
||||||
_, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid))
|
|
||||||
if e1 != 0 {
|
|
||||||
err = errnoErr(e1)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
|
||||||
|
|
||||||
func Setresuid(ruid int, euid int, suid int) (err error) {
|
|
||||||
_, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid))
|
|
||||||
if e1 != 0 {
|
|
||||||
err = errnoErr(e1)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
|
||||||
|
|
||||||
func Setrlimit(resource int, rlim *Rlimit) (err error) {
|
func Setrlimit(resource int, rlim *Rlimit) (err error) {
|
||||||
_, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0)
|
_, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0)
|
||||||
if e1 != 0 {
|
if e1 != 0 {
|
||||||
|
@ -369,16 +339,6 @@ func Setrlimit(resource int, rlim *Rlimit) (err error) {
|
||||||
|
|
||||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||||
|
|
||||||
func Setreuid(ruid int, euid int) (err error) {
|
|
||||||
_, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0)
|
|
||||||
if e1 != 0 {
|
|
||||||
err = errnoErr(e1)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
|
||||||
|
|
||||||
func Shutdown(fd int, how int) (err error) {
|
func Shutdown(fd int, how int) (err error) {
|
||||||
_, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0)
|
_, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0)
|
||||||
if e1 != 0 {
|
if e1 != 0 {
|
||||||
|
|
17
vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go
generated
vendored
17
vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go
generated
vendored
|
@ -294,7 +294,7 @@ type PtraceLwpInfoStruct struct {
|
||||||
Flags int32
|
Flags int32
|
||||||
Sigmask Sigset_t
|
Sigmask Sigset_t
|
||||||
Siglist Sigset_t
|
Siglist Sigset_t
|
||||||
Siginfo __Siginfo
|
Siginfo __PtraceSiginfo
|
||||||
Tdname [20]int8
|
Tdname [20]int8
|
||||||
Child_pid int32
|
Child_pid int32
|
||||||
Syscall_code uint32
|
Syscall_code uint32
|
||||||
|
@ -312,6 +312,17 @@ type __Siginfo struct {
|
||||||
Value [4]byte
|
Value [4]byte
|
||||||
_ [32]byte
|
_ [32]byte
|
||||||
}
|
}
|
||||||
|
type __PtraceSiginfo struct {
|
||||||
|
Signo int32
|
||||||
|
Errno int32
|
||||||
|
Code int32
|
||||||
|
Pid int32
|
||||||
|
Uid uint32
|
||||||
|
Status int32
|
||||||
|
Addr uintptr
|
||||||
|
Value [4]byte
|
||||||
|
_ [32]byte
|
||||||
|
}
|
||||||
|
|
||||||
type Sigset_t struct {
|
type Sigset_t struct {
|
||||||
Val [4]uint32
|
Val [4]uint32
|
||||||
|
@ -350,8 +361,8 @@ type FpExtendedPrecision struct{}
|
||||||
|
|
||||||
type PtraceIoDesc struct {
|
type PtraceIoDesc struct {
|
||||||
Op int32
|
Op int32
|
||||||
Offs *byte
|
Offs uintptr
|
||||||
Addr *byte
|
Addr uintptr
|
||||||
Len uint32
|
Len uint32
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
18
vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go
generated
vendored
18
vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go
generated
vendored
|
@ -291,7 +291,7 @@ type PtraceLwpInfoStruct struct {
|
||||||
Flags int32
|
Flags int32
|
||||||
Sigmask Sigset_t
|
Sigmask Sigset_t
|
||||||
Siglist Sigset_t
|
Siglist Sigset_t
|
||||||
Siginfo __Siginfo
|
Siginfo __PtraceSiginfo
|
||||||
Tdname [20]int8
|
Tdname [20]int8
|
||||||
Child_pid int32
|
Child_pid int32
|
||||||
Syscall_code uint32
|
Syscall_code uint32
|
||||||
|
@ -310,6 +310,18 @@ type __Siginfo struct {
|
||||||
_ [40]byte
|
_ [40]byte
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type __PtraceSiginfo struct {
|
||||||
|
Signo int32
|
||||||
|
Errno int32
|
||||||
|
Code int32
|
||||||
|
Pid int32
|
||||||
|
Uid uint32
|
||||||
|
Status int32
|
||||||
|
Addr uintptr
|
||||||
|
Value [8]byte
|
||||||
|
_ [40]byte
|
||||||
|
}
|
||||||
|
|
||||||
type Sigset_t struct {
|
type Sigset_t struct {
|
||||||
Val [4]uint32
|
Val [4]uint32
|
||||||
}
|
}
|
||||||
|
@ -354,8 +366,8 @@ type FpExtendedPrecision struct{}
|
||||||
|
|
||||||
type PtraceIoDesc struct {
|
type PtraceIoDesc struct {
|
||||||
Op int32
|
Op int32
|
||||||
Offs *byte
|
Offs uintptr
|
||||||
Addr *byte
|
Addr uintptr
|
||||||
Len uint64
|
Len uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
18
vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go
generated
vendored
18
vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go
generated
vendored
|
@ -293,7 +293,7 @@ type PtraceLwpInfoStruct struct {
|
||||||
Flags int32
|
Flags int32
|
||||||
Sigmask Sigset_t
|
Sigmask Sigset_t
|
||||||
Siglist Sigset_t
|
Siglist Sigset_t
|
||||||
Siginfo __Siginfo
|
Siginfo __PtraceSiginfo
|
||||||
Tdname [20]int8
|
Tdname [20]int8
|
||||||
Child_pid int32
|
Child_pid int32
|
||||||
Syscall_code uint32
|
Syscall_code uint32
|
||||||
|
@ -312,6 +312,18 @@ type __Siginfo struct {
|
||||||
_ [32]byte
|
_ [32]byte
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type __PtraceSiginfo struct {
|
||||||
|
Signo int32
|
||||||
|
Errno int32
|
||||||
|
Code int32
|
||||||
|
Pid int32
|
||||||
|
Uid uint32
|
||||||
|
Status int32
|
||||||
|
Addr uintptr
|
||||||
|
Value [4]byte
|
||||||
|
_ [32]byte
|
||||||
|
}
|
||||||
|
|
||||||
type Sigset_t struct {
|
type Sigset_t struct {
|
||||||
Val [4]uint32
|
Val [4]uint32
|
||||||
}
|
}
|
||||||
|
@ -337,8 +349,8 @@ type FpExtendedPrecision struct {
|
||||||
|
|
||||||
type PtraceIoDesc struct {
|
type PtraceIoDesc struct {
|
||||||
Op int32
|
Op int32
|
||||||
Offs *byte
|
Offs uintptr
|
||||||
Addr *byte
|
Addr uintptr
|
||||||
Len uint32
|
Len uint32
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
18
vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go
generated
vendored
18
vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go
generated
vendored
|
@ -291,7 +291,7 @@ type PtraceLwpInfoStruct struct {
|
||||||
Flags int32
|
Flags int32
|
||||||
Sigmask Sigset_t
|
Sigmask Sigset_t
|
||||||
Siglist Sigset_t
|
Siglist Sigset_t
|
||||||
Siginfo __Siginfo
|
Siginfo __PtraceSiginfo
|
||||||
Tdname [20]int8
|
Tdname [20]int8
|
||||||
Child_pid int32
|
Child_pid int32
|
||||||
Syscall_code uint32
|
Syscall_code uint32
|
||||||
|
@ -310,6 +310,18 @@ type __Siginfo struct {
|
||||||
_ [40]byte
|
_ [40]byte
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type __PtraceSiginfo struct {
|
||||||
|
Signo int32
|
||||||
|
Errno int32
|
||||||
|
Code int32
|
||||||
|
Pid int32
|
||||||
|
Uid uint32
|
||||||
|
Status int32
|
||||||
|
Addr uintptr
|
||||||
|
Value [8]byte
|
||||||
|
_ [40]byte
|
||||||
|
}
|
||||||
|
|
||||||
type Sigset_t struct {
|
type Sigset_t struct {
|
||||||
Val [4]uint32
|
Val [4]uint32
|
||||||
}
|
}
|
||||||
|
@ -334,8 +346,8 @@ type FpExtendedPrecision struct{}
|
||||||
|
|
||||||
type PtraceIoDesc struct {
|
type PtraceIoDesc struct {
|
||||||
Op int32
|
Op int32
|
||||||
Offs *byte
|
Offs uintptr
|
||||||
Addr *byte
|
Addr uintptr
|
||||||
Len uint64
|
Len uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue