mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2024-12-11 14:53:49 +00:00
Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files
This commit is contained in:
commit
d0c364d93d
105 changed files with 2679 additions and 1157 deletions
10
README.md
10
README.md
|
@ -533,6 +533,7 @@ The `/api/v1/export` endpoint should return the following response:
|
|||
Extra labels may be added to all the imported time series by passing `extra_label=name=value` query args.
|
||||
For example, `/api/put?extra_label=foo=bar` would add `{foo="bar"}` label to all the ingested metrics.
|
||||
|
||||
|
||||
## Prometheus querying API usage
|
||||
|
||||
VictoriaMetrics supports the following handlers from [Prometheus querying API](https://prometheus.io/docs/prometheus/latest/querying/api/):
|
||||
|
@ -548,6 +549,8 @@ VictoriaMetrics supports the following handlers from [Prometheus querying API](h
|
|||
* [/api/v1/targets](https://prometheus.io/docs/prometheus/latest/querying/api/#targets) - see [these docs](#how-to-scrape-prometheus-exporters-such-as-node-exporter) for more details.
|
||||
|
||||
These handlers can be queried from Prometheus-compatible clients such as Grafana or curl.
|
||||
All the Prometheus querying API handlers can be prepended with `/prometheus` prefix. For example, both `/prometheus/api/v1/query` and `/api/v1/query` should work.
|
||||
|
||||
|
||||
### Prometheus querying API enhancements
|
||||
|
||||
|
@ -592,6 +595,8 @@ VictoriaMetrics supports the following Graphite APIs, which are needed for [Grap
|
|||
* Metrics API - see [these docs](#graphite-metrics-api-usage).
|
||||
* Tags API - see [these docs](#graphite-tags-api-usage).
|
||||
|
||||
All the Graphite handlers can be pre-pended with `/graphite` prefix. For example, both `/graphite/metrics/find` and `/metrics/find` should work.
|
||||
|
||||
VictoriaMetrics supports `__graphite__` pseudo-label for filtering time series with Graphite-compatible filters in [MetricsQL](https://victoriametrics.github.io/MetricsQL.html).
|
||||
For example, `{__graphite__="foo.*.bar"}` is equivalent to `{__name__=~"foo[.][^.]*[.]bar"}`, but it works faster
|
||||
and it is easier to use when migrating from Graphite to VictoriaMetrics.
|
||||
|
@ -599,8 +604,8 @@ and it is easier to use when migrating from Graphite to VictoriaMetrics.
|
|||
|
||||
### Graphite Render API usage
|
||||
|
||||
[VictoriaMetrics Enterprise](https://victoriametrics.com/enterprise.html) supports [Graphite Render API](https://graphite.readthedocs.io/en/stable/render_api.html) subset,
|
||||
which is needed for [Graphite datasource in Grafana](https://grafana.com/docs/grafana/latest/datasources/graphite/).
|
||||
[VictoriaMetrics Enterprise](https://victoriametrics.com/enterprise.html) supports [Graphite Render API](https://graphite.readthedocs.io/en/stable/render_api.html) subset
|
||||
at `/render` endpoint. This subset is required for [Graphite datasource in Grafana](https://grafana.com/docs/grafana/latest/datasources/graphite/).
|
||||
|
||||
|
||||
### Graphite Metrics API usage
|
||||
|
@ -997,6 +1002,7 @@ Note that it could be required to flush response cache after importing historica
|
|||
### How to import data in Prometheus exposition format
|
||||
|
||||
VictoriaMetrics accepts data in [Prometheus exposition format](https://github.com/prometheus/docs/blob/master/content/docs/instrumenting/exposition_formats.md#text-based-format)
|
||||
and in [OpenMetrics format](https://github.com/OpenObservability/OpenMetrics/blob/master/specification/OpenMetrics.md)
|
||||
via `/api/v1/import/prometheus` path. For example, the following line imports a single line in Prometheus exposition format into VictoriaMetrics:
|
||||
|
||||
```bash
|
||||
|
|
12
app/victoria-metrics/multiarch/Dockerfile
Normal file
12
app/victoria-metrics/multiarch/Dockerfile
Normal file
|
@ -0,0 +1,12 @@
|
|||
# See https://medium.com/on-docker/use-multi-stage-builds-to-inject-ca-certs-ad1e8f01de1b
|
||||
ARG certs_image
|
||||
ARG root_image
|
||||
FROM $certs_image as certs
|
||||
RUN apk --update --no-cache add ca-certificates
|
||||
|
||||
FROM $root_image
|
||||
COPY --from=certs /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt
|
||||
EXPOSE 8428
|
||||
ENTRYPOINT ["/victoria-metrics-prod"]
|
||||
ARG TARGETARCH
|
||||
COPY victoria-metrics-${TARGETARCH}-prod ./victoria-metrics-prod
|
12
app/vmagent/multiarch/Dockerfile
Normal file
12
app/vmagent/multiarch/Dockerfile
Normal file
|
@ -0,0 +1,12 @@
|
|||
# See https://medium.com/on-docker/use-multi-stage-builds-to-inject-ca-certs-ad1e8f01de1b
|
||||
ARG certs_image
|
||||
ARG root_image
|
||||
FROM $certs_image as certs
|
||||
RUN apk --update --no-cache add ca-certificates
|
||||
|
||||
FROM $root_image
|
||||
COPY --from=certs /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt
|
||||
EXPOSE 8429
|
||||
ENTRYPOINT ["/vmagent-prod"]
|
||||
ARG TARGETARCH
|
||||
COPY vmagent-${TARGETARCH}-prod ./vmagent-prod
|
|
@ -56,12 +56,25 @@ users:
|
|||
|
||||
# The user for inserting Prometheus data into VictoriaMetrics cluster under account 42
|
||||
# See https://victoriametrics.github.io/Cluster-VictoriaMetrics.html#url-format
|
||||
# All the reuqests to http://vmauth:8427 with the given Basic Auth (username:password)
|
||||
# All the requests to http://vmauth:8427 with the given Basic Auth (username:password)
|
||||
# will be routed to http://vminsert:8480/insert/42/prometheus .
|
||||
# For example, http://vmauth:8427/api/v1/write is routed to http://vminsert:8480/insert/42/prometheus/api/v1/write
|
||||
- username: "cluster-insert-account-42"
|
||||
password: "***"
|
||||
url_prefix: "http://vminsert:8480/insert/42/prometheus"
|
||||
|
||||
|
||||
# A single user for querying and inserting data:
|
||||
# - Requests to http://vmauth:8427/api/v1/query or http://vmauth:8427/api/v1/query_range
|
||||
# are routed to http://vmselect:8481/select/42/prometheus.
|
||||
# For example, http://vmauth:8427/api/v1/query is routed to http://vmselect:8480/select/42/prometheus/api/v1/query
|
||||
# - Requests to http://vmauth:8427/api/v1/write are routed to http://vminsert:8480/insert/42/prometheus/api/v1/write
|
||||
- username: "foobar"
|
||||
url_map:
|
||||
- src_paths: ["/api/v1/query", "/api/v1/query_range"]
|
||||
url_prefix: "http://vmselect:8481/select/42/prometheus"
|
||||
- src_paths: ["/api/v1/write"]
|
||||
url_prefix: "http://vminsert:8480/insert/42/prometheus"
|
||||
```
|
||||
|
||||
The config may contain `%{ENV_VAR}` placeholders, which are substituted by the corresponding `ENV_VAR` environment variable values.
|
||||
|
|
|
@ -28,13 +28,20 @@ type AuthConfig struct {
|
|||
|
||||
// UserInfo is user information read from authConfigPath
|
||||
type UserInfo struct {
|
||||
Username string `yaml:"username"`
|
||||
Password string `yaml:"password"`
|
||||
URLPrefix string `yaml:"url_prefix"`
|
||||
Username string `yaml:"username"`
|
||||
Password string `yaml:"password"`
|
||||
URLPrefix string `yaml:"url_prefix"`
|
||||
URLMap []URLMap `yaml:"url_map"`
|
||||
|
||||
requests *metrics.Counter
|
||||
}
|
||||
|
||||
// URLMap is a mapping from source paths to target urls.
|
||||
type URLMap struct {
|
||||
SrcPaths []string `yaml:"src_paths"`
|
||||
URLPrefix string `yaml:"url_prefix"`
|
||||
}
|
||||
|
||||
func initAuthConfig() {
|
||||
if len(*authConfigPath) == 0 {
|
||||
logger.Fatalf("missing required `-auth.config` command-line flag")
|
||||
|
@ -109,23 +116,52 @@ func parseAuthConfig(data []byte) (map[string]*UserInfo, error) {
|
|||
if m[ui.Username] != nil {
|
||||
return nil, fmt.Errorf("duplicate username found; username: %q", ui.Username)
|
||||
}
|
||||
urlPrefix := ui.URLPrefix
|
||||
// Remove trailing '/' from urlPrefix
|
||||
for strings.HasSuffix(urlPrefix, "/") {
|
||||
urlPrefix = urlPrefix[:len(urlPrefix)-1]
|
||||
if len(ui.URLPrefix) > 0 {
|
||||
urlPrefix, err := sanitizeURLPrefix(ui.URLPrefix)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ui.URLPrefix = urlPrefix
|
||||
}
|
||||
// Validate urlPrefix
|
||||
target, err := url.Parse(urlPrefix)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid `url_prefix: %q`: %w", urlPrefix, err)
|
||||
for _, e := range ui.URLMap {
|
||||
if len(e.SrcPaths) == 0 {
|
||||
return nil, fmt.Errorf("missing `src_paths`")
|
||||
}
|
||||
for _, path := range e.SrcPaths {
|
||||
if !strings.HasPrefix(path, "/") {
|
||||
return nil, fmt.Errorf("`src_path`=%q must start with `/`", path)
|
||||
}
|
||||
}
|
||||
urlPrefix, err := sanitizeURLPrefix(e.URLPrefix)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
e.URLPrefix = urlPrefix
|
||||
}
|
||||
if target.Scheme != "http" && target.Scheme != "https" {
|
||||
return nil, fmt.Errorf("unsupported scheme for `url_prefix: %q`: %q; must be `http` or `https`", urlPrefix, target.Scheme)
|
||||
if len(ui.URLMap) == 0 && len(ui.URLPrefix) == 0 {
|
||||
return nil, fmt.Errorf("missing `url_prefix`")
|
||||
}
|
||||
|
||||
ui.URLPrefix = urlPrefix
|
||||
ui.requests = metrics.GetOrCreateCounter(fmt.Sprintf(`vmauth_user_requests_total{username=%q}`, ui.Username))
|
||||
m[ui.Username] = ui
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func sanitizeURLPrefix(urlPrefix string) (string, error) {
|
||||
// Remove trailing '/' from urlPrefix
|
||||
for strings.HasSuffix(urlPrefix, "/") {
|
||||
urlPrefix = urlPrefix[:len(urlPrefix)-1]
|
||||
}
|
||||
// Validate urlPrefix
|
||||
target, err := url.Parse(urlPrefix)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("invalid `url_prefix: %q`: %w", urlPrefix, err)
|
||||
}
|
||||
if target.Scheme != "http" && target.Scheme != "https" {
|
||||
return "", fmt.Errorf("unsupported scheme for `url_prefix: %q`: %q; must be `http` or `https`", urlPrefix, target.Scheme)
|
||||
}
|
||||
if target.Host == "" {
|
||||
return "", fmt.Errorf("missing hostname in `url_prefix %q`", urlPrefix)
|
||||
}
|
||||
return urlPrefix, nil
|
||||
}
|
||||
|
|
|
@ -46,6 +46,11 @@ users:
|
|||
- username: foo
|
||||
url_prefix: //bar
|
||||
`)
|
||||
f(`
|
||||
users:
|
||||
- username: foo
|
||||
url_prefix: http:///bar
|
||||
`)
|
||||
|
||||
// Duplicate users
|
||||
f(`
|
||||
|
@ -57,6 +62,31 @@ users:
|
|||
- username: foo
|
||||
url_prefix: https://sss.sss
|
||||
`)
|
||||
|
||||
// Missing url_prefix in url_map
|
||||
f(`
|
||||
users:
|
||||
- username: a
|
||||
url_map:
|
||||
- src_paths: ["/foo/bar"]
|
||||
`)
|
||||
|
||||
// Missing src_paths in url_map
|
||||
f(`
|
||||
users:
|
||||
- username: a
|
||||
url_map:
|
||||
- url_prefix: http://foobar
|
||||
`)
|
||||
|
||||
// src_path not starting with `/`
|
||||
f(`
|
||||
users:
|
||||
- username: a
|
||||
url_map:
|
||||
- src_paths: [foobar]
|
||||
url_prefix: http://foobar
|
||||
`)
|
||||
}
|
||||
|
||||
func TestParseAuthConfigSuccess(t *testing.T) {
|
||||
|
@ -103,6 +133,31 @@ users:
|
|||
URLPrefix: "https://bar/x",
|
||||
},
|
||||
})
|
||||
|
||||
// non-empty URLMap
|
||||
f(`
|
||||
users:
|
||||
- username: foo
|
||||
url_map:
|
||||
- src_paths: ["/api/v1/query","/api/v1/query_range"]
|
||||
url_prefix: http://vmselect/select/0/prometheus
|
||||
- src_paths: ["/api/v1/write"]
|
||||
url_prefix: http://vminsert/insert/0/prometheus
|
||||
`, map[string]*UserInfo{
|
||||
"foo": {
|
||||
Username: "foo",
|
||||
URLMap: []URLMap{
|
||||
{
|
||||
SrcPaths: []string{"/api/v1/query", "/api/v1/query_range"},
|
||||
URLPrefix: "http://vmselect/select/0/prometheus",
|
||||
},
|
||||
{
|
||||
SrcPaths: []string{"/api/v1/write"},
|
||||
URLPrefix: "http://vminsert/insert/0/prometheus",
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func removeMetrics(m map[string]*UserInfo) {
|
||||
|
|
|
@ -54,14 +54,17 @@ func requestHandler(w http.ResponseWriter, r *http.Request) bool {
|
|||
return true
|
||||
}
|
||||
ac := authConfig.Load().(map[string]*UserInfo)
|
||||
info := ac[username]
|
||||
if info == nil || info.Password != password {
|
||||
ui := ac[username]
|
||||
if ui == nil || ui.Password != password {
|
||||
httpserver.Errorf(w, r, "cannot find the provided username %q or password in config", username)
|
||||
return true
|
||||
}
|
||||
info.requests.Inc()
|
||||
|
||||
targetURL := createTargetURL(info.URLPrefix, r.URL)
|
||||
ui.requests.Inc()
|
||||
targetURL, err := createTargetURL(ui, r.URL)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "cannot determine targetURL: %s", err)
|
||||
return true
|
||||
}
|
||||
if _, err := url.Parse(targetURL); err != nil {
|
||||
httpserver.Errorf(w, r, "invalid targetURL=%q: %s", targetURL, err)
|
||||
return true
|
||||
|
|
12
app/vmauth/multiarch/Dockerfile
Normal file
12
app/vmauth/multiarch/Dockerfile
Normal file
|
@ -0,0 +1,12 @@
|
|||
# See https://medium.com/on-docker/use-multi-stage-builds-to-inject-ca-certs-ad1e8f01de1b
|
||||
ARG certs_image
|
||||
ARG root_image
|
||||
FROM $certs_image as certs
|
||||
RUN apk --update --no-cache add ca-certificates
|
||||
|
||||
FROM $root_image
|
||||
COPY --from=certs /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt
|
||||
EXPOSE 8427
|
||||
ENTRYPOINT ["/vmauth-prod"]
|
||||
ARG TARGETARCH
|
||||
COPY vmauth-${TARGETARCH}-prod ./vmauth-prod
|
|
@ -1,16 +1,31 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"path"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func createTargetURL(prefix string, u *url.URL) string {
|
||||
func createTargetURL(ui *UserInfo, uOrig *url.URL) (string, error) {
|
||||
u, err := url.Parse(uOrig.String())
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("cannot make a copy of %q: %w", u, err)
|
||||
}
|
||||
// Prevent from attacks with using `..` in r.URL.Path
|
||||
u.Path = path.Clean(u.Path)
|
||||
if !strings.HasPrefix(u.Path, "/") {
|
||||
u.Path = "/" + u.Path
|
||||
}
|
||||
return prefix + u.RequestURI()
|
||||
for _, e := range ui.URLMap {
|
||||
for _, path := range e.SrcPaths {
|
||||
if u.Path == path {
|
||||
return e.URLPrefix + u.RequestURI(), nil
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(ui.URLPrefix) > 0 {
|
||||
return ui.URLPrefix + u.RequestURI(), nil
|
||||
}
|
||||
return "", fmt.Errorf("missing route for %q", u)
|
||||
}
|
||||
|
|
|
@ -5,22 +5,82 @@ import (
|
|||
"testing"
|
||||
)
|
||||
|
||||
func TestCreateTargetURL(t *testing.T) {
|
||||
f := func(prefix, requestURI, expectedTarget string) {
|
||||
func TestCreateTargetURLSuccess(t *testing.T) {
|
||||
f := func(ui *UserInfo, requestURI, expectedTarget string) {
|
||||
t.Helper()
|
||||
u, err := url.Parse(requestURI)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot parse %q: %s", requestURI, err)
|
||||
}
|
||||
target := createTargetURL(prefix, u)
|
||||
target, err := createTargetURL(ui, u)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
if target != expectedTarget {
|
||||
t.Fatalf("unexpected target; got %q; want %q", target, expectedTarget)
|
||||
}
|
||||
}
|
||||
f("http://foo.bar", "", "http://foo.bar/.")
|
||||
f("http://foo.bar", "/", "http://foo.bar/")
|
||||
f("http://foo.bar", "a/b?c=d", "http://foo.bar/a/b?c=d")
|
||||
f("https://sss:3894/x/y", "/z", "https://sss:3894/x/y/z")
|
||||
f("https://sss:3894/x/y", "/../../aaa", "https://sss:3894/x/y/aaa")
|
||||
f("https://sss:3894/x/y", "/./asd/../../aaa?a=d&s=s/../d", "https://sss:3894/x/y/aaa?a=d&s=s/../d")
|
||||
// Simple routing with `url_prefix`
|
||||
f(&UserInfo{
|
||||
URLPrefix: "http://foo.bar",
|
||||
}, "", "http://foo.bar/.")
|
||||
f(&UserInfo{
|
||||
URLPrefix: "http://foo.bar",
|
||||
}, "/", "http://foo.bar/")
|
||||
f(&UserInfo{
|
||||
URLPrefix: "http://foo.bar",
|
||||
}, "a/b?c=d", "http://foo.bar/a/b?c=d")
|
||||
f(&UserInfo{
|
||||
URLPrefix: "https://sss:3894/x/y",
|
||||
}, "/z", "https://sss:3894/x/y/z")
|
||||
f(&UserInfo{
|
||||
URLPrefix: "https://sss:3894/x/y",
|
||||
}, "/../../aaa", "https://sss:3894/x/y/aaa")
|
||||
f(&UserInfo{
|
||||
URLPrefix: "https://sss:3894/x/y",
|
||||
}, "/./asd/../../aaa?a=d&s=s/../d", "https://sss:3894/x/y/aaa?a=d&s=s/../d")
|
||||
|
||||
// Complex routing with `url_map`
|
||||
ui := &UserInfo{
|
||||
URLMap: []URLMap{
|
||||
{
|
||||
SrcPaths: []string{"/api/v1/query"},
|
||||
URLPrefix: "http://vmselect/0/prometheus",
|
||||
},
|
||||
{
|
||||
SrcPaths: []string{"/api/v1/write"},
|
||||
URLPrefix: "http://vminsert/0/prometheus",
|
||||
},
|
||||
},
|
||||
URLPrefix: "http://default-server",
|
||||
}
|
||||
f(ui, "/api/v1/query?query=up", "http://vmselect/0/prometheus/api/v1/query?query=up")
|
||||
f(ui, "/api/v1/write", "http://vminsert/0/prometheus/api/v1/write")
|
||||
f(ui, "/api/v1/query_range", "http://default-server/api/v1/query_range")
|
||||
}
|
||||
|
||||
func TestCreateTargetURLFailure(t *testing.T) {
|
||||
f := func(ui *UserInfo, requestURI string) {
|
||||
t.Helper()
|
||||
u, err := url.Parse(requestURI)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot parse %q: %s", requestURI, err)
|
||||
}
|
||||
target, err := createTargetURL(ui, u)
|
||||
if err == nil {
|
||||
t.Fatalf("expecting non-nil error")
|
||||
}
|
||||
if target != "" {
|
||||
t.Fatalf("unexpected target=%q; want empty string", target)
|
||||
}
|
||||
}
|
||||
f(&UserInfo{}, "/foo/bar")
|
||||
f(&UserInfo{
|
||||
URLMap: []URLMap{
|
||||
{
|
||||
SrcPaths: []string{"/api/v1/query"},
|
||||
URLPrefix: "http://foobar/baz",
|
||||
},
|
||||
},
|
||||
}, "/api/v1/write")
|
||||
}
|
||||
|
|
11
app/vmbackup/multiarch/Dockerfile
Normal file
11
app/vmbackup/multiarch/Dockerfile
Normal file
|
@ -0,0 +1,11 @@
|
|||
# See https://medium.com/on-docker/use-multi-stage-builds-to-inject-ca-certs-ad1e8f01de1b
|
||||
ARG certs_image
|
||||
ARG root_image
|
||||
FROM $certs_image as certs
|
||||
RUN apk --update --no-cache add ca-certificates
|
||||
|
||||
FROM $root_image
|
||||
COPY --from=certs /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt
|
||||
ENTRYPOINT ["/vmbackup-prod"]
|
||||
ARG TARGETARCH
|
||||
COPY vmbackup-${TARGETARCH}-prod ./vmbackup-prod
|
|
@ -1,4 +1,6 @@
|
|||
# vmctl - Victoria metrics command-line tool
|
||||
# vmctl
|
||||
|
||||
Victoria metrics command-line tool
|
||||
|
||||
Features:
|
||||
- [x] Prometheus: migrate data from Prometheus to VictoriaMetrics using snapshot API
|
||||
|
|
11
app/vmctl/multiarch/Dockerfile
Normal file
11
app/vmctl/multiarch/Dockerfile
Normal file
|
@ -0,0 +1,11 @@
|
|||
# See https://medium.com/on-docker/use-multi-stage-builds-to-inject-ca-certs-ad1e8f01de1b
|
||||
ARG certs_image
|
||||
ARG root_image
|
||||
FROM $certs_image as certs
|
||||
RUN apk --update --no-cache add ca-certificates
|
||||
|
||||
FROM $root_image
|
||||
COPY --from=certs /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt
|
||||
ENTRYPOINT ["/vmctl-prod"]
|
||||
ARG TARGETARCH
|
||||
COPY vmctl-${TARGETARCH}-prod ./vmctl-prod
|
|
@ -92,7 +92,7 @@ func Stop() {
|
|||
func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
path := strings.Replace(r.URL.Path, "//", "/", -1)
|
||||
switch path {
|
||||
case "/api/v1/write":
|
||||
case "/prometheus/api/v1/write", "/api/v1/write":
|
||||
prometheusWriteRequests.Inc()
|
||||
if err := promremotewrite.InsertHandler(r); err != nil {
|
||||
prometheusWriteErrors.Inc()
|
||||
|
@ -101,7 +101,7 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
|||
}
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
return true
|
||||
case "/api/v1/import":
|
||||
case "/prometheus/api/v1/import", "/api/v1/import":
|
||||
vmimportRequests.Inc()
|
||||
if err := vmimport.InsertHandler(r); err != nil {
|
||||
vmimportErrors.Inc()
|
||||
|
@ -110,7 +110,7 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
|||
}
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
return true
|
||||
case "/api/v1/import/csv":
|
||||
case "/prometheus/api/v1/import/csv", "/api/v1/import/csv":
|
||||
csvimportRequests.Inc()
|
||||
if err := csvimport.InsertHandler(r); err != nil {
|
||||
csvimportErrors.Inc()
|
||||
|
@ -119,7 +119,7 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
|||
}
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
return true
|
||||
case "/api/v1/import/prometheus":
|
||||
case "/prometheus/api/v1/import/prometheus", "/api/v1/import/prometheus":
|
||||
prometheusimportRequests.Inc()
|
||||
if err := prometheusimport.InsertHandler(r); err != nil {
|
||||
prometheusimportErrors.Inc()
|
||||
|
@ -128,7 +128,7 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
|||
}
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
return true
|
||||
case "/api/v1/import/native":
|
||||
case "/prometheus/api/v1/import/native", "/api/v1/import/native":
|
||||
nativeimportRequests.Inc()
|
||||
if err := native.InsertHandler(r); err != nil {
|
||||
nativeimportErrors.Inc()
|
||||
|
@ -137,7 +137,7 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
|||
}
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
return true
|
||||
case "/write", "/api/v2/write":
|
||||
case "/influx/write", "/influx/api/v2/write", "/write", "/api/v2/write":
|
||||
influxWriteRequests.Inc()
|
||||
if err := influx.InsertHandlerForHTTP(r); err != nil {
|
||||
influxWriteErrors.Inc()
|
||||
|
@ -146,23 +146,23 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
|||
}
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
return true
|
||||
case "/query":
|
||||
case "/influx/query", "/query":
|
||||
// Emulate fake response for influx query.
|
||||
// This is required for TSBS benchmark.
|
||||
influxQueryRequests.Inc()
|
||||
fmt.Fprintf(w, `{"results":[{"series":[{"values":[]}]}]}`)
|
||||
return true
|
||||
case "/targets":
|
||||
case "/prometheus/targets", "/targets":
|
||||
promscrapeTargetsRequests.Inc()
|
||||
promscrape.WriteHumanReadableTargetsStatus(w, r)
|
||||
return true
|
||||
case "/api/v1/targets":
|
||||
case "/prometheus/api/v1/targets", "/api/v1/targets":
|
||||
promscrapeAPIV1TargetsRequests.Inc()
|
||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
state := r.FormValue("state")
|
||||
promscrape.WriteAPIV1Targets(w, state)
|
||||
return true
|
||||
case "/-/reload":
|
||||
case "/prometheus/-/reload", "/-/reload":
|
||||
promscrapeConfigReloadRequests.Inc()
|
||||
procutil.SelfSIGHUP()
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
|
|
|
@ -23,7 +23,7 @@ var (
|
|||
func InsertHandler(req *http.Request) error {
|
||||
path := req.URL.Path
|
||||
switch path {
|
||||
case "/api/put":
|
||||
case "/opentsdb/api/put", "/api/put":
|
||||
extraLabels, err := parserCommon.GetExtraLabels(req)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
11
app/vmrestore/multiarch/Dockerfile
Normal file
11
app/vmrestore/multiarch/Dockerfile
Normal file
|
@ -0,0 +1,11 @@
|
|||
# See https://medium.com/on-docker/use-multi-stage-builds-to-inject-ca-certs-ad1e8f01de1b
|
||||
ARG certs_image
|
||||
ARG root_image
|
||||
FROM $certs_image as certs
|
||||
RUN apk --update --no-cache add ca-certificates
|
||||
|
||||
FROM $root_image
|
||||
COPY --from=certs /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt
|
||||
ENTRYPOINT ["/vmrestore-prod"]
|
||||
ARG TARGETARCH
|
||||
COPY vmrestore-${TARGETARCH}-prod ./vmrestore-prod
|
|
@ -118,8 +118,18 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
|||
return true
|
||||
}
|
||||
|
||||
// Strip /prometheus and /graphite prefixes in order to provide path compatibility with cluster version
|
||||
//
|
||||
// See https://victoriametrics.github.io/Cluster-VictoriaMetrics.html#url-format
|
||||
switch {
|
||||
case strings.HasPrefix(path, "/prometheus"):
|
||||
path = path[len("/prometheus"):]
|
||||
case strings.HasPrefix(path, "/graphite"):
|
||||
path = path[len("/graphite"):]
|
||||
}
|
||||
|
||||
if strings.HasPrefix(path, "/api/v1/label/") {
|
||||
s := r.URL.Path[len("/api/v1/label/"):]
|
||||
s := path[len("/api/v1/label/"):]
|
||||
if strings.HasSuffix(s, "/values") {
|
||||
labelValuesRequests.Inc()
|
||||
labelName := s[:len(s)-len("/values")]
|
||||
|
@ -133,7 +143,7 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
|||
}
|
||||
}
|
||||
if strings.HasPrefix(path, "/tags/") && !isGraphiteTagsPath(path) {
|
||||
tagName := r.URL.Path[len("/tags/"):]
|
||||
tagName := path[len("/tags/"):]
|
||||
graphiteTagValuesRequests.Inc()
|
||||
if err := graphite.TagValuesHandler(startTime, tagName, w, r); err != nil {
|
||||
graphiteTagValuesErrors.Inc()
|
||||
|
|
|
@ -260,9 +260,9 @@ func ExportHandler(startTime time.Time, w http.ResponseWriter, r *http.Request)
|
|||
if err := r.ParseForm(); err != nil {
|
||||
return fmt.Errorf("cannot parse request form values: %w", err)
|
||||
}
|
||||
matches, err := getMatchesFromRequest(r)
|
||||
if err != nil {
|
||||
return err
|
||||
matches := getMatchesFromRequest(r)
|
||||
if len(matches) == 0 {
|
||||
return fmt.Errorf("missing `match[]` query arg")
|
||||
}
|
||||
start, err := searchutils.GetTime(r, "start", 0)
|
||||
if err != nil {
|
||||
|
@ -476,8 +476,9 @@ func LabelValuesHandler(startTime time.Time, labelName string, w http.ResponseWr
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
matches := getMatchesFromRequest(r)
|
||||
var labelValues []string
|
||||
if len(r.Form["match[]"]) == 0 && len(etf) == 0 {
|
||||
if len(matches) == 0 && len(etf) == 0 {
|
||||
if len(r.Form["start"]) == 0 && len(r.Form["end"]) == 0 {
|
||||
var err error
|
||||
labelValues, err = netstorage.GetLabelValues(labelName, deadline)
|
||||
|
@ -508,7 +509,6 @@ func LabelValuesHandler(startTime time.Time, labelName string, w http.ResponseWr
|
|||
// i.e. /api/v1/label/foo/values?match[]=foobar{baz="abc"}&start=...&end=...
|
||||
// is equivalent to `label_values(foobar{baz="abc"}, foo)` call on the selected
|
||||
// time range in Grafana templating.
|
||||
matches := r.Form["match[]"]
|
||||
if len(matches) == 0 {
|
||||
matches = []string{fmt.Sprintf("{%s!=''}", labelName)}
|
||||
}
|
||||
|
@ -691,8 +691,9 @@ func LabelsHandler(startTime time.Time, w http.ResponseWriter, r *http.Request)
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
matches := getMatchesFromRequest(r)
|
||||
var labels []string
|
||||
if len(r.Form["match[]"]) == 0 && len(etf) == 0 {
|
||||
if len(matches) == 0 && len(etf) == 0 {
|
||||
if len(r.Form["start"]) == 0 && len(r.Form["end"]) == 0 {
|
||||
var err error
|
||||
labels, err = netstorage.GetLabels(deadline)
|
||||
|
@ -721,7 +722,6 @@ func LabelsHandler(startTime time.Time, w http.ResponseWriter, r *http.Request)
|
|||
} else {
|
||||
// Extended functionality that allows filtering by label filters and time range
|
||||
// i.e. /api/v1/labels?match[]=foobar{baz="abc"}&start=...&end=...
|
||||
matches := r.Form["match[]"]
|
||||
if len(matches) == 0 {
|
||||
matches = []string{"{__name__!=''}"}
|
||||
}
|
||||
|
@ -1274,9 +1274,9 @@ func getTagFilterssFromMatches(matches []string) ([][]storage.TagFilter, error)
|
|||
}
|
||||
|
||||
func getTagFilterssFromRequest(r *http.Request) ([][]storage.TagFilter, error) {
|
||||
matches, err := getMatchesFromRequest(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
matches := getMatchesFromRequest(r)
|
||||
if len(matches) == 0 {
|
||||
return nil, fmt.Errorf("missing `match[]` query arg")
|
||||
}
|
||||
tagFilterss, err := getTagFilterssFromMatches(matches)
|
||||
if err != nil {
|
||||
|
@ -1290,16 +1290,11 @@ func getTagFilterssFromRequest(r *http.Request) ([][]storage.TagFilter, error) {
|
|||
return tagFilterss, nil
|
||||
}
|
||||
|
||||
func getMatchesFromRequest(r *http.Request) ([]string, error) {
|
||||
func getMatchesFromRequest(r *http.Request) []string {
|
||||
matches := r.Form["match[]"]
|
||||
if len(matches) > 0 {
|
||||
return matches, nil
|
||||
}
|
||||
match := r.Form.Get("match")
|
||||
if len(match) == 0 {
|
||||
return nil, fmt.Errorf("missing `match[]` query arg")
|
||||
}
|
||||
return []string{match}, nil
|
||||
// This is needed for backwards compatibility
|
||||
matches = append(matches, r.Form["match"]...)
|
||||
return matches
|
||||
}
|
||||
|
||||
func getLatencyOffsetMilliseconds() int64 {
|
||||
|
|
|
@ -268,14 +268,47 @@ func evalExpr(ec *EvalConfig, e metricsql.Expr) ([]*timeseries, error) {
|
|||
return rv, nil
|
||||
}
|
||||
if be, ok := e.(*metricsql.BinaryOpExpr); ok {
|
||||
left, err := evalExpr(ec, be.Left)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
right, err := evalExpr(ec, be.Right)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
// Execute left and right sides of the binary operation in parallel.
|
||||
// This should reduce execution times for heavy queries.
|
||||
// On the other side this can increase CPU and RAM usage when executing heavy queries.
|
||||
// TODO: think on how to limit CPU and RAM usage while leaving short execution times.
|
||||
var left, right []*timeseries
|
||||
var mu sync.Mutex
|
||||
var wg sync.WaitGroup
|
||||
var errGlobal error
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
ecCopy := newEvalConfig(ec)
|
||||
tss, err := evalExpr(ecCopy, be.Left)
|
||||
mu.Lock()
|
||||
if err != nil {
|
||||
if errGlobal == nil {
|
||||
errGlobal = err
|
||||
}
|
||||
}
|
||||
left = tss
|
||||
mu.Unlock()
|
||||
}()
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
ecCopy := newEvalConfig(ec)
|
||||
tss, err := evalExpr(ecCopy, be.Right)
|
||||
mu.Lock()
|
||||
if err != nil {
|
||||
if errGlobal == nil {
|
||||
errGlobal = err
|
||||
}
|
||||
}
|
||||
right = tss
|
||||
mu.Unlock()
|
||||
}()
|
||||
wg.Wait()
|
||||
if errGlobal != nil {
|
||||
return nil, errGlobal
|
||||
}
|
||||
|
||||
bf := getBinaryOpFunc(be.Op)
|
||||
if bf == nil {
|
||||
return nil, fmt.Errorf(`unknown binary op %q`, be.Op)
|
||||
|
|
|
@ -449,6 +449,9 @@ func registerStorageMetrics() {
|
|||
metrics.NewGauge(`vm_index_blocks_with_metric_ids_incorrect_order_total`, func() float64 {
|
||||
return float64(idbm().IndexBlocksWithMetricIDsIncorrectOrder)
|
||||
})
|
||||
metrics.NewGauge(`vm_composite_index_min_timestamp`, func() float64 {
|
||||
return float64(idbm().MinTimestampForCompositeIndex) / 1e3
|
||||
})
|
||||
|
||||
metrics.NewGauge(`vm_assisted_merges_total{type="storage/small"}`, func() float64 {
|
||||
return float64(tm().SmallAssistedMerges)
|
||||
|
@ -646,6 +649,18 @@ func registerStorageMetrics() {
|
|||
metrics.NewGauge(`vm_cache_size_bytes{type="storage/metricName"}`, func() float64 {
|
||||
return float64(m().MetricNameCacheSizeBytes)
|
||||
})
|
||||
metrics.NewGauge(`vm_cache_size_bytes{type="storage/bigIndexBlocks"}`, func() float64 {
|
||||
return float64(tm().BigIndexBlocksCacheSizeBytes)
|
||||
})
|
||||
metrics.NewGauge(`vm_cache_size_bytes{type="storage/smallIndexBlocks"}`, func() float64 {
|
||||
return float64(tm().SmallIndexBlocksCacheSizeBytes)
|
||||
})
|
||||
metrics.NewGauge(`vm_cache_size_bytes{type="indexdb/dataBlocks"}`, func() float64 {
|
||||
return float64(idbm().DataBlocksCacheSizeBytes)
|
||||
})
|
||||
metrics.NewGauge(`vm_cache_size_bytes{type="indexdb/indexBlocks"}`, func() float64 {
|
||||
return float64(idbm().IndexBlocksCacheSizeBytes)
|
||||
})
|
||||
metrics.NewGauge(`vm_cache_size_bytes{type="storage/date_metricID"}`, func() float64 {
|
||||
return float64(m().DateMetricIDCacheSizeBytes)
|
||||
})
|
||||
|
|
|
@ -4,9 +4,9 @@ DOCKER_NAMESPACE := victoriametrics
|
|||
|
||||
ROOT_IMAGE ?= alpine:3.13.1
|
||||
CERTS_IMAGE := alpine:3.13.1
|
||||
GO_BUILDER_IMAGE := golang:1.15.7
|
||||
GO_BUILDER_IMAGE := golang:1.15.8
|
||||
BUILDER_IMAGE := local/builder:2.0.0-$(shell echo $(GO_BUILDER_IMAGE) | tr : _)
|
||||
BASE_IMAGE := local/base:1.1.1-$(shell echo $(ROOT_IMAGE) | tr : _)-$(shell echo $(CERTS_IMAGE) | tr : _)
|
||||
BASE_IMAGE := local/base:1.1.3-$(shell echo $(ROOT_IMAGE) | tr : _)-$(shell echo $(CERTS_IMAGE) | tr : _)
|
||||
|
||||
package-base:
|
||||
(docker image ls --format '{{.Repository}}:{{.Tag}}' | grep -q '$(BASE_IMAGE)$$') \
|
||||
|
@ -21,7 +21,7 @@ package-builder:
|
|||
--build-arg go_builder_image=$(GO_BUILDER_IMAGE) \
|
||||
deployment/docker/builder
|
||||
|
||||
app-via-docker: package-base package-builder
|
||||
app-via-docker: package-builder
|
||||
mkdir -p gocache-for-docker
|
||||
docker run --rm \
|
||||
--user $(shell id -u):$(shell id -g) \
|
||||
|
@ -37,7 +37,7 @@ app-via-docker: package-base package-builder
|
|||
-tags 'netgo osusergo nethttpomithttp2' \
|
||||
-o bin/$(APP_NAME)$(APP_SUFFIX)-prod $(PKG_PREFIX)/app/$(APP_NAME)
|
||||
|
||||
package-via-docker:
|
||||
package-via-docker: package-base
|
||||
(docker image ls --format '{{.Repository}}:{{.Tag}}' | grep -q '$(DOCKER_NAMESPACE)/$(APP_NAME):$(PKG_TAG)$(APP_SUFFIX)$(RACE)$$') || (\
|
||||
$(MAKE) app-via-docker && \
|
||||
docker build \
|
||||
|
@ -46,45 +46,22 @@ package-via-docker:
|
|||
-t $(DOCKER_NAMESPACE)/$(APP_NAME):$(PKG_TAG)$(APP_SUFFIX)$(RACE) \
|
||||
-f app/$(APP_NAME)/deployment/Dockerfile bin)
|
||||
|
||||
package-manifest: \
|
||||
package-via-docker-amd64 \
|
||||
package-via-docker-arm \
|
||||
package-via-docker-arm64 \
|
||||
package-via-docker-ppc64le \
|
||||
package-via-docker-386
|
||||
$(MAKE) package-manifest-internal
|
||||
|
||||
package-manifest-internal:
|
||||
docker push $(DOCKER_NAMESPACE)/$(APP_NAME):$(PKG_TAG)-amd64$(RACE)
|
||||
docker push $(DOCKER_NAMESPACE)/$(APP_NAME):$(PKG_TAG)-arm$(RACE)
|
||||
docker push $(DOCKER_NAMESPACE)/$(APP_NAME):$(PKG_TAG)-arm64$(RACE)
|
||||
docker push $(DOCKER_NAMESPACE)/$(APP_NAME):$(PKG_TAG)-ppc64le$(RACE)
|
||||
docker push $(DOCKER_NAMESPACE)/$(APP_NAME):$(PKG_TAG)-386$(RACE)
|
||||
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create --amend $(DOCKER_NAMESPACE)/$(APP_NAME):$(PKG_TAG)$(RACE) \
|
||||
$(DOCKER_NAMESPACE)/$(APP_NAME):$(PKG_TAG)-amd64$(RACE) \
|
||||
$(DOCKER_NAMESPACE)/$(APP_NAME):$(PKG_TAG)-arm$(RACE) \
|
||||
$(DOCKER_NAMESPACE)/$(APP_NAME):$(PKG_TAG)-arm64$(RACE) \
|
||||
$(DOCKER_NAMESPACE)/$(APP_NAME):$(PKG_TAG)-ppc64le$(RACE) \
|
||||
$(DOCKER_NAMESPACE)/$(APP_NAME):$(PKG_TAG)-386$(RACE)
|
||||
GOARCH=amd64 $(MAKE) package-manifest-annotate-goarch
|
||||
GOARCH=arm $(MAKE) package-manifest-annotate-goarch
|
||||
GOARCH=arm64 $(MAKE) package-manifest-annotate-goarch
|
||||
GOARCH=ppc64le $(MAKE) package-manifest-annotate-goarch
|
||||
GOARCH=386 $(MAKE) package-manifest-annotate-goarch
|
||||
|
||||
package-manifest-annotate-goarch:
|
||||
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest annotate $(DOCKER_NAMESPACE)/$(APP_NAME):$(PKG_TAG)$(RACE) \
|
||||
$(DOCKER_NAMESPACE)/$(APP_NAME):$(PKG_TAG)-$(GOARCH)$(RACE) --os linux --arch $(GOARCH)
|
||||
|
||||
publish-via-docker: package-manifest
|
||||
docker tag $(DOCKER_NAMESPACE)/$(APP_NAME):$(PKG_TAG)-amd64$(RACE) $(DOCKER_NAMESPACE)/$(APP_NAME):latest-amd64$(RACE)
|
||||
docker tag $(DOCKER_NAMESPACE)/$(APP_NAME):$(PKG_TAG)-arm$(RACE) $(DOCKER_NAMESPACE)/$(APP_NAME):latest-arm$(RACE)
|
||||
docker tag $(DOCKER_NAMESPACE)/$(APP_NAME):$(PKG_TAG)-arm64$(RACE) $(DOCKER_NAMESPACE)/$(APP_NAME):latest-arm64$(RACE)
|
||||
docker tag $(DOCKER_NAMESPACE)/$(APP_NAME):$(PKG_TAG)-ppc64le$(RACE) $(DOCKER_NAMESPACE)/$(APP_NAME):latest-ppc64le$(RACE)
|
||||
docker tag $(DOCKER_NAMESPACE)/$(APP_NAME):$(PKG_TAG)-386$(RACE) $(DOCKER_NAMESPACE)/$(APP_NAME):latest-386$(RACE)
|
||||
PKG_TAG=latest $(MAKE) package-manifest-internal
|
||||
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push --purge $(DOCKER_NAMESPACE)/$(APP_NAME):$(PKG_TAG)$(RACE)
|
||||
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push --purge $(DOCKER_NAMESPACE)/$(APP_NAME):latest$(RACE)
|
||||
publish-via-docker: \
|
||||
app-via-docker-amd64 \
|
||||
app-via-docker-arm \
|
||||
app-via-docker-arm64 \
|
||||
app-via-docker-ppc64le \
|
||||
app-via-docker-386
|
||||
docker buildx build \
|
||||
--platform=linux/amd64,linux/arm,linux/arm64,linux/ppc64le,linux/386 \
|
||||
--build-arg certs_image=$(CERTS_IMAGE) \
|
||||
--build-arg root_image=$(ROOT_IMAGE) \
|
||||
--build-arg APP_NAME=$(APP_NAME) \
|
||||
-t $(DOCKER_NAMESPACE)/$(APP_NAME):$(PKG_TAG)$(RACE) \
|
||||
-o type=image \
|
||||
-f app/$(APP_NAME)/multiarch/Dockerfile \
|
||||
--push \
|
||||
bin
|
||||
|
||||
run-via-docker: package-via-docker
|
||||
docker run -it --rm \
|
||||
|
|
|
@ -2,6 +2,19 @@
|
|||
|
||||
# tip
|
||||
|
||||
* FEATURE: optimize searching for matching metrics for `metric{<label_filters>}` queries if `<label_filters>` contains at least a single filter. For example, the query `up{job="foobar"}` should find the matching time series much faster than previously.
|
||||
* FEATURE: reduce execution times for `q1 <binary_op> q2` queries by executing `q1` and `q2` in parallel.
|
||||
* FEATURE: single-node VictoriaMetrics now accepts requests to handlers with `/prometheus` and `/graphite` prefixes such as `/prometheus/api/v1/query`. This improves compatibility with [handlers from VictoriaMetrics cluster](https://victoriametrics.github.io/Cluster-VictoriaMetrics.html#url-format).
|
||||
* FEATURE: expose `process_open_fds` and `process_max_fds` metrics. These metrics can be used for alerting when `process_open_fds` reaches `process_max_fds`. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/402 and https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1037
|
||||
* FEATURE: vmalert: add `-datasource.appendTypePrefix` command-line option for querying both Prometheus and Graphite datasource in cluster version of VictoriaMetrics. See [these docs](https://victoriametrics.github.io/vmalert.html#graphite) for details.
|
||||
* FEATURE: vmauth: add ability to route requests from a single user to multiple destinations depending on the requested paths. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1064
|
||||
* FEATURE: remove dependency on external programs such as `cat`, `grep` and `cut` when detecting cpu and memory limits inside Docker or LXC container.
|
||||
|
||||
* BUGFIX: properly convert regexp tag filters containing escaped dots to non-regexp tag filters. For example, `{foo=~"bar\.baz"}` should be converted to `{foo="bar.baz"}`. Previously it was incorrectly converted to `{foo="bar\.baz"}`, which could result in missing time series for this tag filter.
|
||||
* BUGFIX: do not spam error logs when discovering Docker Swarm targets without dedicated IP. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1028 .
|
||||
* BUGFIX: properly embed timezone data into VictoriaMetrics apps. This should fix `-loggerTimezone` usage inside Docker containers.
|
||||
* BUGFIX: properly build Docker images for non-amd64 architectures (arm, arm64, ppc64le, 386) on [Docker hub](https://hub.docker.com/u/victoriametrics/). Previously these images were incorrectly based on amd64 base image, so they didn't work.
|
||||
|
||||
|
||||
# [v1.53.1](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.53.1)
|
||||
|
||||
|
|
|
@ -179,7 +179,7 @@ or [an alternative dashboard for VictoriaMetrics cluster](https://grafana.com/gr
|
|||
- `prometheus/api/v1/import` - for importing data obtained via `api/v1/export` on `vmselect` (see below).
|
||||
- `prometheus/api/v1/import/native` - for importing data obtained via `api/v1/export/native` on `vmselect` (see below).
|
||||
- `prometheus/api/v1/import/csv` - for importing arbitrary CSV data. See [these docs](https://victoriametrics.github.io/Single-server-VictoriaMetrics.html#how-to-import-csv-data) for details.
|
||||
- `prometheus/api/v1/import/prometheus` - for importing data in Prometheus exposition format. See [these docs](https://victoriametrics.github.io/Single-server-VictoriaMetrics.html#how-to-import-data-in-prometheus-exposition-format) for details.
|
||||
- `prometheus/api/v1/import/prometheus` - for importing data in [Prometheus text exposition format](https://github.com/prometheus/docs/blob/master/content/docs/instrumenting/exposition_formats.md#text-based-format) and in [OpenMetrics format](https://github.com/OpenObservability/OpenMetrics/blob/master/specification/OpenMetrics.md). See [these docs](https://victoriametrics.github.io/Single-server-VictoriaMetrics.html#how-to-import-data-in-prometheus-exposition-format) for details.
|
||||
|
||||
* URLs for [Prometheus querying API](https://prometheus.io/docs/prometheus/latest/querying/api/): `http://<vmselect>:8481/select/<accountID>/prometheus/<suffix>`, where:
|
||||
- `<accountID>` is an arbitrary number identifying data namespace for the query (aka tenant)
|
||||
|
@ -338,9 +338,15 @@ It is available in the [helm-charts](https://github.com/VictoriaMetrics/helm-cha
|
|||
|
||||
## Replication and data safety
|
||||
|
||||
In order to enable application-level replication, `-replicationFactor=N` command-line flag must be passed to `vminsert`.
|
||||
By default VictoriaMetrics offloads replication to the underlying storage pointed by `-storageDataPath`.
|
||||
|
||||
The replication can be enabled by passing `-replicationFactor=N` command-line flag to `vminsert`.
|
||||
This guarantees that all the data remains available for querying if up to `N-1` `vmstorage` nodes are unavailable.
|
||||
For example, when `-replicationFactor=3` is passed to `vminsert`, then it replicates all the ingested data to 3 distinct `vmstorage` nodes.
|
||||
The cluster must contain at least `2*N-1` `vmstorage` nodes, where `N`
|
||||
is replication factor, in order to maintain the given replication factor for newly ingested data when `N-1` of storage nodes are lost.
|
||||
For example, when `-replicationFactor=3` is passed to `vminsert`, then it replicates all the ingested data to 3 distinct `vmstorage` nodes,
|
||||
so up to 2 `vmstorage` nodes can be lost without data loss. The minimum number of `vmstorage` nodes should be equal to `2*3-1 = 5`, so when 2 `vmstorage` nodes are lost,
|
||||
the remaining 3 `vmstorage` nodes could provide the `-replicationFactor=3` for newly ingested data.
|
||||
|
||||
When the replication is enabled, `-replicationFactor=N` and `-dedup.minScrapeInterval=1ms` command-line flag must be passed to `vmselect` nodes.
|
||||
The `-replicationFactor=N` improves query performance when a part of vmstorage nodes respond slowly and/or temporarily unavailable.
|
||||
|
@ -350,9 +356,9 @@ when [deduplication](https://victoriametrics.github.io/Single-server-VictoriaMet
|
|||
Note that [replication doesn't save from disaster](https://medium.com/@valyala/speeding-up-backups-for-big-time-series-databases-533c1a927883),
|
||||
so it is recommended performing regular backups. See [these docs](#backups) for details.
|
||||
|
||||
By default VictoriaMetrics offloads replication to the underlying storage pointed by `-storageDataPath`.
|
||||
It is recommended storing data on [Google Compute Engine persistent disks](https://cloud.google.com/compute/docs/disks/#pdspecs),
|
||||
since they are protected from data loss and data corruption. They also provide consistently high performance
|
||||
Note that the replication increases resource usage - CPU, RAM, disk space, network bandwidth - by up to `-replicationFactor` times. So it may be worth
|
||||
offloading the replication to underlying storage pointed by `-storageDataPath` such as [Google Compute Engine persistent disk](https://cloud.google.com/compute/docs/disks/#pdspecs),
|
||||
which is protected from data loss and data corruption. It also provide consistently high performance
|
||||
and [may be resized](https://cloud.google.com/compute/docs/disks/add-persistent-disk) without downtime.
|
||||
HDD-based persistent disks should be enough for the majority of use cases.
|
||||
|
||||
|
|
|
@ -533,6 +533,7 @@ The `/api/v1/export` endpoint should return the following response:
|
|||
Extra labels may be added to all the imported time series by passing `extra_label=name=value` query args.
|
||||
For example, `/api/put?extra_label=foo=bar` would add `{foo="bar"}` label to all the ingested metrics.
|
||||
|
||||
|
||||
## Prometheus querying API usage
|
||||
|
||||
VictoriaMetrics supports the following handlers from [Prometheus querying API](https://prometheus.io/docs/prometheus/latest/querying/api/):
|
||||
|
@ -548,6 +549,8 @@ VictoriaMetrics supports the following handlers from [Prometheus querying API](h
|
|||
* [/api/v1/targets](https://prometheus.io/docs/prometheus/latest/querying/api/#targets) - see [these docs](#how-to-scrape-prometheus-exporters-such-as-node-exporter) for more details.
|
||||
|
||||
These handlers can be queried from Prometheus-compatible clients such as Grafana or curl.
|
||||
All the Prometheus querying API handlers can be prepended with `/prometheus` prefix. For example, both `/prometheus/api/v1/query` and `/api/v1/query` should work.
|
||||
|
||||
|
||||
### Prometheus querying API enhancements
|
||||
|
||||
|
@ -592,6 +595,8 @@ VictoriaMetrics supports the following Graphite APIs, which are needed for [Grap
|
|||
* Metrics API - see [these docs](#graphite-metrics-api-usage).
|
||||
* Tags API - see [these docs](#graphite-tags-api-usage).
|
||||
|
||||
All the Graphite handlers can be pre-pended with `/graphite` prefix. For example, both `/graphite/metrics/find` and `/metrics/find` should work.
|
||||
|
||||
VictoriaMetrics supports `__graphite__` pseudo-label for filtering time series with Graphite-compatible filters in [MetricsQL](https://victoriametrics.github.io/MetricsQL.html).
|
||||
For example, `{__graphite__="foo.*.bar"}` is equivalent to `{__name__=~"foo[.][^.]*[.]bar"}`, but it works faster
|
||||
and it is easier to use when migrating from Graphite to VictoriaMetrics.
|
||||
|
@ -599,8 +604,8 @@ and it is easier to use when migrating from Graphite to VictoriaMetrics.
|
|||
|
||||
### Graphite Render API usage
|
||||
|
||||
[VictoriaMetrics Enterprise](https://victoriametrics.com/enterprise.html) supports [Graphite Render API](https://graphite.readthedocs.io/en/stable/render_api.html) subset,
|
||||
which is needed for [Graphite datasource in Grafana](https://grafana.com/docs/grafana/latest/datasources/graphite/).
|
||||
[VictoriaMetrics Enterprise](https://victoriametrics.com/enterprise.html) supports [Graphite Render API](https://graphite.readthedocs.io/en/stable/render_api.html) subset
|
||||
at `/render` endpoint. This subset is required for [Graphite datasource in Grafana](https://grafana.com/docs/grafana/latest/datasources/graphite/).
|
||||
|
||||
|
||||
### Graphite Metrics API usage
|
||||
|
@ -997,6 +1002,7 @@ Note that it could be required to flush response cache after importing historica
|
|||
### How to import data in Prometheus exposition format
|
||||
|
||||
VictoriaMetrics accepts data in [Prometheus exposition format](https://github.com/prometheus/docs/blob/master/content/docs/instrumenting/exposition_formats.md#text-based-format)
|
||||
and in [OpenMetrics format](https://github.com/OpenObservability/OpenMetrics/blob/master/specification/OpenMetrics.md)
|
||||
via `/api/v1/import/prometheus` path. For example, the following line imports a single line in Prometheus exposition format into VictoriaMetrics:
|
||||
|
||||
```bash
|
||||
|
@ -1449,7 +1455,7 @@ The collected profiles may be analyzed with [go tool pprof](https://github.com/g
|
|||
* [netdata](https://github.com/netdata/netdata) can push data into VictoriaMetrics via `Prometheus remote_write API`.
|
||||
See [these docs](https://github.com/netdata/netdata#integrations).
|
||||
* [go-graphite/carbonapi](https://github.com/go-graphite/carbonapi) can use VictoriaMetrics as time series backend.
|
||||
See [this example](https://github.com/go-graphite/carbonapi/blob/master/cmd/carbonapi/carbonapi.example.prometheus.yaml).
|
||||
See [this example](https://github.com/go-graphite/carbonapi/blob/main/cmd/carbonapi/carbonapi.example.victoriametrics.yaml).
|
||||
* [Ansible role for installing single-node VictoriaMetrics](https://github.com/dreamteam-gg/ansible-victoriametrics-role).
|
||||
* [Ansible role for installing cluster VictoriaMetrics](https://github.com/Slapper/ansible-victoriametrics-cluster-role).
|
||||
* [Snap package for VictoriaMetrics](https://snapcraft.io/victoriametrics).
|
||||
|
|
|
@ -56,12 +56,25 @@ users:
|
|||
|
||||
# The user for inserting Prometheus data into VictoriaMetrics cluster under account 42
|
||||
# See https://victoriametrics.github.io/Cluster-VictoriaMetrics.html#url-format
|
||||
# All the reuqests to http://vmauth:8427 with the given Basic Auth (username:password)
|
||||
# All the requests to http://vmauth:8427 with the given Basic Auth (username:password)
|
||||
# will be routed to http://vminsert:8480/insert/42/prometheus .
|
||||
# For example, http://vmauth:8427/api/v1/write is routed to http://vminsert:8480/insert/42/prometheus/api/v1/write
|
||||
- username: "cluster-insert-account-42"
|
||||
password: "***"
|
||||
url_prefix: "http://vminsert:8480/insert/42/prometheus"
|
||||
|
||||
|
||||
# A single user for querying and inserting data:
|
||||
# - Requests to http://vmauth:8427/api/v1/query or http://vmauth:8427/api/v1/query_range
|
||||
# are routed to http://vmselect:8481/select/42/prometheus.
|
||||
# For example, http://vmauth:8427/api/v1/query is routed to http://vmselect:8480/select/42/prometheus/api/v1/query
|
||||
# - Requests to http://vmauth:8427/api/v1/write are routed to http://vminsert:8480/insert/42/prometheus/api/v1/write
|
||||
- username: "foobar"
|
||||
url_map:
|
||||
- src_paths: ["/api/v1/query", "/api/v1/query_range"]
|
||||
url_prefix: "http://vmselect:8481/select/42/prometheus"
|
||||
- src_paths: ["/api/v1/write"]
|
||||
url_prefix: "http://vminsert:8480/insert/42/prometheus"
|
||||
```
|
||||
|
||||
The config may contain `%{ENV_VAR}` placeholders, which are substituted by the corresponding `ENV_VAR` environment variable values.
|
||||
|
|
|
@ -1,4 +1,6 @@
|
|||
# vmctl - Victoria metrics command-line tool
|
||||
# vmctl
|
||||
|
||||
Victoria metrics command-line tool
|
||||
|
||||
Features:
|
||||
- [x] Prometheus: migrate data from Prometheus to VictoriaMetrics using snapshot API
|
||||
|
|
19
go.mod
19
go.mod
|
@ -1,16 +1,16 @@
|
|||
module github.com/VictoriaMetrics/VictoriaMetrics
|
||||
|
||||
require (
|
||||
cloud.google.com/go v0.75.0 // indirect
|
||||
cloud.google.com/go/storage v1.12.0
|
||||
cloud.google.com/go v0.76.0 // indirect
|
||||
cloud.google.com/go/storage v1.13.0
|
||||
github.com/VictoriaMetrics/fastcache v1.5.7
|
||||
|
||||
// Do not use the original github.com/valyala/fasthttp because of issues
|
||||
// like https://github.com/valyala/fasthttp/commit/996610f021ff45fdc98c2ce7884d5fa4e7f9199b
|
||||
github.com/VictoriaMetrics/fasthttp v1.0.12
|
||||
github.com/VictoriaMetrics/metrics v1.12.3
|
||||
github.com/VictoriaMetrics/metrics v1.13.1
|
||||
github.com/VictoriaMetrics/metricsql v0.10.0
|
||||
github.com/aws/aws-sdk-go v1.37.1
|
||||
github.com/aws/aws-sdk-go v1.37.7
|
||||
github.com/cespare/xxhash/v2 v2.1.1
|
||||
github.com/cheggaaa/pb/v3 v3.0.5
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0 // indirect
|
||||
|
@ -22,7 +22,7 @@ require (
|
|||
github.com/mattn/go-runewidth v0.0.10 // indirect
|
||||
github.com/oklog/ulid v1.3.1
|
||||
github.com/prometheus/client_golang v1.9.0 // indirect
|
||||
github.com/prometheus/procfs v0.3.0 // indirect
|
||||
github.com/prometheus/procfs v0.4.1 // indirect
|
||||
github.com/prometheus/prometheus v1.8.2-0.20201119142752-3ad25a6dc3d9
|
||||
github.com/rivo/uniseg v0.2.0 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
|
@ -34,15 +34,10 @@ require (
|
|||
github.com/valyala/histogram v1.1.2
|
||||
github.com/valyala/quicktemplate v1.6.3
|
||||
go.opencensus.io v0.22.6 // indirect
|
||||
golang.org/x/mod v0.4.1 // indirect
|
||||
golang.org/x/net v0.0.0-20210119194325-5f4716e94777 // indirect
|
||||
golang.org/x/oauth2 v0.0.0-20210201163806-010130855d6c
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c
|
||||
golang.org/x/text v0.3.5 // indirect
|
||||
golang.org/x/tools v0.1.0 // indirect
|
||||
google.golang.org/api v0.38.0
|
||||
google.golang.org/genproto v0.0.0-20210201151548-94839c025ad4 // indirect
|
||||
google.golang.org/grpc v1.35.0 // indirect
|
||||
google.golang.org/api v0.39.0
|
||||
google.golang.org/genproto v0.0.0-20210207032614-bba0dbe2a9ea // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
)
|
||||
|
||||
|
|
43
go.sum
43
go.sum
|
@ -15,11 +15,11 @@ cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKV
|
|||
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
|
||||
cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
|
||||
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
|
||||
cloud.google.com/go v0.66.0/go.mod h1:dgqGAjKCDxyhGTtC9dAREQGUJpkceNm1yt590Qno0Ko=
|
||||
cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
|
||||
cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
|
||||
cloud.google.com/go v0.75.0 h1:XgtDnVJRCPEUG21gjFiRPz4zI1Mjg16R+NYQjfmU4XY=
|
||||
cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY=
|
||||
cloud.google.com/go v0.76.0 h1:Ckw+E/QYZgd/5bpI4wz4h6f+jmpvh9S9uSrKNnbicJI=
|
||||
cloud.google.com/go v0.76.0/go.mod h1:r9EvIAvLrunusnetGdQ50M/gKui1x3zdGW/VELGkdpw=
|
||||
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
||||
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
|
||||
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
|
||||
|
@ -38,8 +38,8 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo
|
|||
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
|
||||
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
|
||||
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
|
||||
cloud.google.com/go/storage v1.12.0 h1:4y3gHptW1EHVtcPAVE0eBBlFuGqEejTTG3KdIE0lUX4=
|
||||
cloud.google.com/go/storage v1.12.0/go.mod h1:fFLk2dp2oAhDz8QFKwqrjdJvxSp/W2g7nillojlL5Ho=
|
||||
cloud.google.com/go/storage v1.13.0 h1:amPvhCOI+Hltp6rPu+62YdwhIrjf+34PKVAL4HwgYwk=
|
||||
cloud.google.com/go/storage v1.13.0/go.mod h1:pqFyBUK3zZqMIIU5+8NaZq6/Ma3ClgUg9Hv5jfuJnvo=
|
||||
collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
github.com/Azure/azure-sdk-for-go v48.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
|
||||
|
@ -85,8 +85,8 @@ github.com/VictoriaMetrics/fastcache v1.5.7/go.mod h1:ptDBkNMQI4RtmVo8VS/XwRY6Ro
|
|||
github.com/VictoriaMetrics/fasthttp v1.0.12 h1:Ag0E119yrH4BTxVyjKD9TeiSImtG9bUcg/stItLJhSE=
|
||||
github.com/VictoriaMetrics/fasthttp v1.0.12/go.mod h1:3SeUL4zwB/p/a9aEeRc6gdlbrtNHXBJR6N376EgiSHU=
|
||||
github.com/VictoriaMetrics/metrics v1.12.2/go.mod h1:Z1tSfPfngDn12bTfZSCqArT3OPY3u88J12hSoOhuiRE=
|
||||
github.com/VictoriaMetrics/metrics v1.12.3 h1:Fe6JHC6MSEKa+BtLhPN8WIvS+HKPzMc2evEpNeCGy7I=
|
||||
github.com/VictoriaMetrics/metrics v1.12.3/go.mod h1:Z1tSfPfngDn12bTfZSCqArT3OPY3u88J12hSoOhuiRE=
|
||||
github.com/VictoriaMetrics/metrics v1.13.1 h1:1S9QrbXLPrcDBYLiDNIqWk9AC/lk5Ptk8eIjDIFFDsQ=
|
||||
github.com/VictoriaMetrics/metrics v1.13.1/go.mod h1:Z1tSfPfngDn12bTfZSCqArT3OPY3u88J12hSoOhuiRE=
|
||||
github.com/VictoriaMetrics/metricsql v0.10.0 h1:45BARAP2shaL/5p67Hvz+YrWUbr0X0VCy9t+gvdIm8o=
|
||||
github.com/VictoriaMetrics/metricsql v0.10.0/go.mod h1:ylO7YITho/Iw6P71oEaGyHbO94bGoGtzWfLGqFhMIg8=
|
||||
github.com/VividCortex/ewma v1.1.1 h1:MnEK4VOv6n0RSY4vtRe3h11qjxL3+t0B8yOL8iMXdcM=
|
||||
|
@ -123,8 +123,8 @@ github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQ
|
|||
github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||
github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48=
|
||||
github.com/aws/aws-sdk-go v1.35.31/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
||||
github.com/aws/aws-sdk-go v1.37.1 h1:BTHmuN+gzhxkvU9sac2tZvaY0gV9ihbHw+KxZOecYvY=
|
||||
github.com/aws/aws-sdk-go v1.37.1/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
||||
github.com/aws/aws-sdk-go v1.37.7 h1:vfald/ssuWaA2HgJ9DrieVVXVE9eD0Kly/9kl0hofbE=
|
||||
github.com/aws/aws-sdk-go v1.37.7/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
||||
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
|
@ -397,11 +397,11 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf
|
|||
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200905233945-acf8798be1f7/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20201117184057-ae444373da19/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
|
@ -680,8 +680,8 @@ github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+Gx
|
|||
github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/prometheus/procfs v0.3.0 h1:Uehi/mxLK0eiUc0H0++5tpMGTexB8wZ598MIgU8VpDM=
|
||||
github.com/prometheus/procfs v0.3.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/prometheus/procfs v0.4.1 h1:a4oCTNJdGpE6eD4j1mypyS2ZXLFXo8wEVgUabL47Xr0=
|
||||
github.com/prometheus/procfs v0.4.1/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/prometheus/prometheus v1.8.2-0.20201119142752-3ad25a6dc3d9 h1:F2A86PGVYqn3P7oWbrSmSlJHae9y6wwpAdoWb/pZi6Q=
|
||||
github.com/prometheus/prometheus v1.8.2-0.20201119142752-3ad25a6dc3d9/go.mod h1:1MDE/bXgu4gqd5w/otko6WQpXZX9vu8QX4KbitCmaPg=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
|
@ -904,7 +904,6 @@ golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/
|
|||
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
|
@ -920,6 +919,7 @@ golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4Iltr
|
|||
golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210113205817-d3ed898aa8a3/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210201163806-010130855d6c h1:HiAZXo96zOhVhtFHchj/ojzoxCFiPrp9/j0GtS38V3g=
|
||||
golang.org/x/oauth2 v0.0.0-20210201163806-010130855d6c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
|
@ -990,7 +990,6 @@ golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200828194041-157a740278f4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
|
@ -1077,10 +1076,7 @@ golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roY
|
|||
golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.0.0-20200828161849-5deb26317202/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
|
||||
golang.org/x/tools v0.0.0-20200915173823-2db8f0ff891c/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU=
|
||||
golang.org/x/tools v0.0.0-20200918232735-d647fc253266/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU=
|
||||
golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20201119054027-25dc3e1ccc3c/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
|
@ -1117,12 +1113,11 @@ google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M
|
|||
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
||||
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
|
||||
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
|
||||
google.golang.org/api v0.31.0/go.mod h1:CL+9IBCa2WWU6gRuBWaKqGWLFFwbEUXkfeMkHLQWYWo=
|
||||
google.golang.org/api v0.32.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
|
||||
google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
|
||||
google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=
|
||||
google.golang.org/api v0.38.0 h1:vDyWk6eup8eQAidaZ31sNWIn8tZEL8qpbtGkBD4ytQo=
|
||||
google.golang.org/api v0.38.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
|
||||
google.golang.org/api v0.39.0 h1:zHCTXf0NeDdKTgcSQpT+ZflWAqHsEp1GmdpxW09f3YM=
|
||||
google.golang.org/api v0.39.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
|
@ -1165,17 +1160,16 @@ google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7Fc
|
|||
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20200831141814-d751682dd103/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20200914193844-75d14daec038/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20200921151605-7abf4a1a14d5/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20210201151548-94839c025ad4 h1:HPkKL4eEh/nemF/FRzYMrFsAh1ZPm5t8NqKBI/Ejlg0=
|
||||
google.golang.org/genproto v0.0.0-20210201151548-94839c025ad4/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20210202153253-cf70463f6119/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20210203152818-3206188e46ba/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20210207032614-bba0dbe2a9ea h1:N98SvVh7Hdle2lgUVFuIkf0B3u29CUakMUQa7Hwz8Wc=
|
||||
google.golang.org/genproto v0.0.0-20210207032614-bba0dbe2a9ea/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
|
||||
|
@ -1194,7 +1188,6 @@ google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3Iji
|
|||
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||
google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||
google.golang.org/grpc v1.32.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
|
||||
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
|
||||
google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
|
||||
|
|
|
@ -41,7 +41,7 @@ func updateGOMAXPROCSToCPUQuota() {
|
|||
}
|
||||
|
||||
func getCPUQuota() float64 {
|
||||
quotaUS, err := readInt64("/sys/fs/cgroup/cpu/cpu.cfs_quota_us", "cat /sys/fs/cgroup/cpu$(cat /proc/self/cgroup | grep cpu, | cut -d: -f3)/cpu.cfs_quota_us")
|
||||
quotaUS, err := getCPUStat("cpu.cfs_quota_us")
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
|
@ -50,13 +50,17 @@ func getCPUQuota() float64 {
|
|||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/685#issuecomment-674423728
|
||||
return getOnlineCPUCount()
|
||||
}
|
||||
periodUS, err := readInt64("/sys/fs/cgroup/cpu/cpu.cfs_period_us", "cat /sys/fs/cgroup/cpu$(cat /proc/self/cgroup | grep cpu, | cut -d: -f3)/cpu.cfs_period_us")
|
||||
periodUS, err := getCPUStat("cpu.cfs_period_us")
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
return float64(quotaUS) / float64(periodUS)
|
||||
}
|
||||
|
||||
func getCPUStat(statName string) (int64, error) {
|
||||
return getStatGeneric(statName, "/sys/fs/cgroup/cpu", "/proc/self/cgroup", "cpu,")
|
||||
}
|
||||
|
||||
func getOnlineCPUCount() float64 {
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/685#issuecomment-674423728
|
||||
data, err := ioutil.ReadFile("/sys/devices/system/cpu/online")
|
||||
|
|
|
@ -1,5 +1,9 @@
|
|||
package cgroup
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// GetMemoryLimit returns cgroup memory limit
|
||||
func GetMemoryLimit() int64 {
|
||||
// Try determining the amount of memory inside docker container.
|
||||
|
@ -8,24 +12,36 @@ func GetMemoryLimit() int64 {
|
|||
// Read memory limit according to https://unix.stackexchange.com/questions/242718/how-to-find-out-how-much-memory-lxc-container-is-allowed-to-consume
|
||||
// This should properly determine the limit inside lxc container.
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/84
|
||||
n, err := readInt64("/sys/fs/cgroup/memory/memory.limit_in_bytes", "cat /sys/fs/cgroup/memory$(cat /proc/self/cgroup | grep memory | cut -d: -f3)/memory.limit_in_bytes")
|
||||
n, err := getMemStat("memory.limit_in_bytes")
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func getMemStat(statName string) (int64, error) {
|
||||
return getStatGeneric(statName, "/sys/fs/cgroup/memory", "/proc/self/cgroup", "memory")
|
||||
}
|
||||
|
||||
// GetHierarchicalMemoryLimit returns hierarchical memory limit
|
||||
// https://www.kernel.org/doc/Documentation/cgroup-v1/memory.txt
|
||||
func GetHierarchicalMemoryLimit() int64 {
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/699
|
||||
n, err := readInt64FromCommand("cat /sys/fs/cgroup/memory/memory.stat | grep hierarchical_memory_limit | cut -d' ' -f 2")
|
||||
if err == nil {
|
||||
return n
|
||||
}
|
||||
n, err = readInt64FromCommand(
|
||||
"cat /sys/fs/cgroup/memory$(cat /proc/self/cgroup | grep memory | cut -d: -f3)/memory.stat | grep hierarchical_memory_limit | cut -d' ' -f 2")
|
||||
n, err := getHierarchicalMemoryLimit("/sys/fs/cgroup/memory", "/proc/self/cgroup")
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func getHierarchicalMemoryLimit(sysfsPrefix, cgroupPath string) (int64, error) {
|
||||
data, err := getFileContents("memory.stat", sysfsPrefix, cgroupPath, "memory")
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
memStat, err := grepFirstMatch(data, "hierarchical_memory_limit", 1, " ")
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return strconv.ParseInt(memStat, 10, 64)
|
||||
}
|
||||
|
|
34
lib/cgroup/mem_test.go
Normal file
34
lib/cgroup/mem_test.go
Normal file
|
@ -0,0 +1,34 @@
|
|||
package cgroup
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestGetHierarchicalMemoryLimitSuccess(t *testing.T) {
|
||||
f := func(sysPath, cgroupPath string, want int64) {
|
||||
t.Helper()
|
||||
got, err := getHierarchicalMemoryLimit(sysPath, cgroupPath)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
if got != want {
|
||||
t.Fatalf("unexpected result, got: %d, want %d", got, want)
|
||||
}
|
||||
}
|
||||
f("testdata/", "testdata/self/cgroup", 16)
|
||||
f("testdata/cgroup", "testdata/self/cgroup", 120)
|
||||
}
|
||||
|
||||
func TestGetHierarchicalMemoryLimitFailure(t *testing.T) {
|
||||
f := func(sysPath, cgroupPath string) {
|
||||
t.Helper()
|
||||
got, err := getHierarchicalMemoryLimit(sysPath, cgroupPath)
|
||||
if err == nil {
|
||||
t.Fatalf("expecting non-nil error")
|
||||
}
|
||||
if got != 0 {
|
||||
t.Fatalf("unexpected result, got: %d, want 0", got)
|
||||
}
|
||||
}
|
||||
f("testdata/", "testdata/none_existing_folder")
|
||||
}
|
1
lib/cgroup/testdata/cgroup/cpu.cfs_period_us
vendored
Normal file
1
lib/cgroup/testdata/cgroup/cpu.cfs_period_us
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
500000
|
1
lib/cgroup/testdata/cgroup/cpu.cfs_quota_us
vendored
Normal file
1
lib/cgroup/testdata/cgroup/cpu.cfs_quota_us
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
10
|
1
lib/cgroup/testdata/cgroup/memory.limit_in_bytes
vendored
Normal file
1
lib/cgroup/testdata/cgroup/memory.limit_in_bytes
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
523372036854771712
|
31
lib/cgroup/testdata/cgroup/memory.stat
vendored
Normal file
31
lib/cgroup/testdata/cgroup/memory.stat
vendored
Normal file
|
@ -0,0 +1,31 @@
|
|||
rss 2
|
||||
rss_huge 3
|
||||
mapped_file 4
|
||||
dirty 5
|
||||
writeback 6
|
||||
pgpgin 7
|
||||
pgpgout 8
|
||||
pgfault 9
|
||||
pgmajfault 10
|
||||
inactive_anon 11
|
||||
active_anon 12
|
||||
inactive_file 13
|
||||
active_file 14
|
||||
unevictable 15
|
||||
hierarchical_memory_limit 120
|
||||
hierarchical_memsw_limit 17
|
||||
total_cache 18
|
||||
total_rss 19
|
||||
total_rss_huge 20
|
||||
total_mapped_file 21
|
||||
total_dirty 22
|
||||
total_writeback 23
|
||||
total_pgpgin 24
|
||||
total_pgpgout 25
|
||||
total_pgfault 26
|
||||
total_pgmajfault 27
|
||||
total_inactive_anon 28
|
||||
total_active_anon 29
|
||||
total_inactive_file 30
|
||||
total_active_file 31
|
||||
total_unevictable 32
|
|
@ -0,0 +1 @@
|
|||
100000
|
|
@ -0,0 +1 @@
|
|||
-1
|
|
@ -0,0 +1 @@
|
|||
9223372036854771712
|
|
@ -0,0 +1,31 @@
|
|||
rss 2
|
||||
rss_huge 3
|
||||
mapped_file 4
|
||||
dirty 5
|
||||
writeback 6
|
||||
pgpgin 7
|
||||
pgpgout 8
|
||||
pgfault 9
|
||||
pgmajfault 10
|
||||
inactive_anon 11
|
||||
active_anon 12
|
||||
inactive_file 13
|
||||
active_file 14
|
||||
unevictable 15
|
||||
hierarchical_memory_limit 16
|
||||
hierarchical_memsw_limit 17
|
||||
total_cache 18
|
||||
total_rss 19
|
||||
total_rss_huge 20
|
||||
total_mapped_file 21
|
||||
total_dirty 22
|
||||
total_writeback 23
|
||||
total_pgpgin 24
|
||||
total_pgpgout 25
|
||||
total_pgfault 26
|
||||
total_pgmajfault 27
|
||||
total_inactive_anon 28
|
||||
total_active_anon 29
|
||||
total_inactive_file 30
|
||||
total_active_file 31
|
||||
total_unevictable 32
|
13
lib/cgroup/testdata/self/cgroup
vendored
Normal file
13
lib/cgroup/testdata/self/cgroup
vendored
Normal file
|
@ -0,0 +1,13 @@
|
|||
12:perf_event:/docker/74c9abf42b88b9a35b1b56061b08303e56fd1707fe5c5b4df93324dedb36b5db
|
||||
11:rdma:/
|
||||
10:pids:/docker/74c9abf42b88b9a35b1b56061b08303e56fd1707fe5c5b4df93324dedb36b5db
|
||||
9:freezer:/docker/74c9abf42b88b9a35b1b56061b08303e56fd1707fe5c5b4df93324dedb36b5db
|
||||
8:memory:/docker/74c9abf42b88b9a35b1b56061b08303e56fd1707fe5c5b4df93324dedb36b5db
|
||||
7:devices:/docker/74c9abf42b88b9a35b1b56061b08303e56fd1707fe5c5b4df93324dedb36b5db
|
||||
6:cpuset:/docker/74c9abf42b88b9a35b1b56061b08303e56fd1707fe5c5b4df93324dedb36b5db
|
||||
5:hugetlb:/docker/74c9abf42b88b9a35b1b56061b08303e56fd1707fe5c5b4df93324dedb36b5db
|
||||
4:net_cls,net_prio:/docker/74c9abf42b88b9a35b1b56061b08303e56fd1707fe5c5b4df93324dedb36b5db
|
||||
3:blkio:/docker/74c9abf42b88b9a35b1b56061b08303e56fd1707fe5c5b4df93324dedb36b5db
|
||||
2:cpu,cpuacct:/docker/74c9abf42b88b9a35b1b56061b08303e56fd1707fe5c5b4df93324dedb36b5db
|
||||
1:name=systemd:/docker/74c9abf42b88b9a35b1b56061b08303e56fd1707fe5c5b4df93324dedb36b5db
|
||||
0::/system.slice/containerd.service
|
|
@ -1,27 +1,58 @@
|
|||
package cgroup
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os/exec"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func readInt64(path, altCommand string) (int64, error) {
|
||||
data, err := ioutil.ReadFile(path)
|
||||
if err == nil {
|
||||
data = bytes.TrimSpace(data)
|
||||
return strconv.ParseInt(string(data), 10, 64)
|
||||
}
|
||||
return readInt64FromCommand(altCommand)
|
||||
}
|
||||
|
||||
func readInt64FromCommand(command string) (int64, error) {
|
||||
cmd := exec.Command("/bin/sh", "-c", command)
|
||||
data, err := cmd.Output()
|
||||
func getStatGeneric(statName, sysfsPrefix, cgroupPath, cgroupGrepLine string) (int64, error) {
|
||||
data, err := getFileContents(statName, sysfsPrefix, cgroupPath, cgroupGrepLine)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
data = bytes.TrimSpace(data)
|
||||
return strconv.ParseInt(string(data), 10, 64)
|
||||
n, err := strconv.ParseInt(data, 10, 64)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func getFileContents(statName, sysfsPrefix, cgroupPath, cgroupGrepLine string) (string, error) {
|
||||
filepath := path.Join(sysfsPrefix, statName)
|
||||
data, err := ioutil.ReadFile(filepath)
|
||||
if err == nil {
|
||||
return string(data), nil
|
||||
}
|
||||
cgroupData, err := ioutil.ReadFile(cgroupPath)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
subPath, err := grepFirstMatch(string(cgroupData), cgroupGrepLine, 2, ":")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
filepath = path.Join(sysfsPrefix, subPath, statName)
|
||||
data, err = ioutil.ReadFile(filepath)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(data), nil
|
||||
}
|
||||
|
||||
// grepFirstMatch searches match line at data and returns item from it by index with given delimiter.
|
||||
func grepFirstMatch(data string, match string, index int, delimiter string) (string, error) {
|
||||
lines := strings.Split(string(data), "\n")
|
||||
for _, s := range lines {
|
||||
if !strings.Contains(s, match) {
|
||||
continue
|
||||
}
|
||||
parts := strings.Split(s, delimiter)
|
||||
if index < len(parts) {
|
||||
return strings.TrimSpace(parts[index]), nil
|
||||
}
|
||||
}
|
||||
return "", fmt.Errorf("cannot find %q in %q", match, data)
|
||||
}
|
||||
|
|
40
lib/cgroup/util_test.go
Normal file
40
lib/cgroup/util_test.go
Normal file
|
@ -0,0 +1,40 @@
|
|||
package cgroup
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestGetStatGenericSuccess(t *testing.T) {
|
||||
f := func(statName, sysfsPrefix, cgroupPath, cgroupGrepLine string, want int64) {
|
||||
t.Helper()
|
||||
got, err := getStatGeneric(statName, sysfsPrefix, cgroupPath, cgroupGrepLine)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
if got != want {
|
||||
t.Fatalf("unexpected result, got: %d, want %d", got, want)
|
||||
}
|
||||
}
|
||||
f("cpu.cfs_quota_us", "testdata/", "testdata/self/cgroup", "cpu,", -1)
|
||||
f("cpu.cfs_quota_us", "testdata/cgroup", "testdata/self/cgroup", "cpu,", 10)
|
||||
f("cpu.cfs_period_us", "testdata/", "testdata/self/cgroup", "cpu,", 100000)
|
||||
f("cpu.cfs_period_us", "testdata/cgroup", "testdata/self/cgroup", "cpu,", 500000)
|
||||
f("memory.limit_in_bytes", "testdata/", "testdata/self/cgroup", "memory", 9223372036854771712)
|
||||
f("memory.limit_in_bytes", "testdata/cgroup", "testdata/self/cgroup", "memory", 523372036854771712)
|
||||
}
|
||||
|
||||
func TestGetStatGenericFailure(t *testing.T) {
|
||||
f := func(statName, sysfsPrefix, cgroupPath, cgroupGrepLine string) {
|
||||
t.Helper()
|
||||
got, err := getStatGeneric(statName, sysfsPrefix, cgroupPath, cgroupGrepLine)
|
||||
if err == nil {
|
||||
t.Fatalf("expecting non-nil error")
|
||||
}
|
||||
if got != 0 {
|
||||
t.Fatalf("unexpected result, got: %d, want 0", got)
|
||||
}
|
||||
}
|
||||
f("cpu.cfs_quota_us", "testdata/", "testdata/missing_folder", "cpu,")
|
||||
f("cpu.cfs_period_us", "testdata/", "testdata/missing_folder", "cpu,")
|
||||
f("memory.limit_in_bytes", "testdata/", "testdata/none_existing_folder", "memory")
|
||||
}
|
|
@ -1,34 +0,0 @@
|
|||
// +build cgo
|
||||
|
||||
package fs
|
||||
|
||||
// #cgo CFLAGS: -O3
|
||||
//
|
||||
// #include <stdint.h> // for uintptr_t
|
||||
// #include <string.h> // for memcpy
|
||||
//
|
||||
// // The memcpy_wrapper allows avoiding memory allocations during calls from Go.
|
||||
// // See https://github.com/golang/go/issues/24450 .
|
||||
// static void memcpy_wrapper(uintptr_t dst, uintptr_t src, size_t n) {
|
||||
// memcpy((void*)dst, (void*)src, n);
|
||||
// }
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// copyMmap copies len(dst) bytes from src to dst.
|
||||
func copyMmap(dst, src []byte) {
|
||||
// Copy data from mmap'ed src via cgo call in order to protect from goroutine stalls
|
||||
// when the copied data isn't available in RAM, so the OS triggers reading the data from file.
|
||||
// See https://medium.com/@valyala/mmap-in-go-considered-harmful-d92a25cb161d for details.
|
||||
dstPtr := C.uintptr_t(uintptr(unsafe.Pointer(&dst[0])))
|
||||
srcPtr := C.uintptr_t(uintptr(unsafe.Pointer(&src[0])))
|
||||
C.memcpy_wrapper(dstPtr, srcPtr, C.size_t(len(dst)))
|
||||
|
||||
// Prevent from GC'ing src or dst during C.memcpy_wrapper call.
|
||||
runtime.KeepAlive(src)
|
||||
runtime.KeepAlive(dst)
|
||||
}
|
|
@ -1,12 +0,0 @@
|
|||
// +build !cgo
|
||||
|
||||
package fs
|
||||
|
||||
// copyMmap copies len(dst) bytes from src to dst.
|
||||
func copyMmap(dst, src []byte) {
|
||||
// This may lead to goroutines stalls when the copied data isn't available in RAM.
|
||||
// In this case the OS triggers reading the data from file.
|
||||
// See https://medium.com/@valyala/mmap-in-go-considered-harmful-d92a25cb161d for details.
|
||||
// TODO: fix this
|
||||
copy(dst, src)
|
||||
}
|
43
lib/fs/fs.go
43
lib/fs/fs.go
|
@ -3,6 +3,7 @@ package fs
|
|||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
|
@ -257,6 +258,48 @@ func SymlinkRelative(srcPath, dstPath string) error {
|
|||
return os.Symlink(srcPathRel, dstPath)
|
||||
}
|
||||
|
||||
// CopyDirectory copies all the files in srcPath to dstPath.
|
||||
func CopyDirectory(srcPath, dstPath string) error {
|
||||
fis, err := ioutil.ReadDir(srcPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := MkdirAllIfNotExist(dstPath); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, fi := range fis {
|
||||
if !fi.Mode().IsRegular() {
|
||||
// Skip non-files
|
||||
continue
|
||||
}
|
||||
src := filepath.Join(srcPath, fi.Name())
|
||||
dst := filepath.Join(dstPath, fi.Name())
|
||||
if err := copyFile(src, dst); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
MustSyncPath(dstPath)
|
||||
return nil
|
||||
}
|
||||
|
||||
func copyFile(srcPath, dstPath string) error {
|
||||
src, err := os.Open(srcPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer MustClose(src)
|
||||
dst, err := os.Create(dstPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer MustClose(dst)
|
||||
if _, err := io.Copy(dst, src); err != nil {
|
||||
return err
|
||||
}
|
||||
MustSyncPath(dstPath)
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReadFullData reads len(data) bytes from r.
|
||||
func ReadFullData(r io.Reader, data []byte) error {
|
||||
n, err := io.ReadFull(r, data)
|
||||
|
|
|
@ -4,9 +4,6 @@ import (
|
|||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
|
@ -17,6 +14,7 @@ var disableMmap = flag.Bool("fs.disableMmap", is32BitPtr, "Whether to use pread(
|
|||
"By default mmap() is used for 64-bit arches and pread() is used for 32-bit arches, since they cannot read data files bigger than 2^32 bytes in memory. "+
|
||||
"mmap() is usually faster for reading small data chunks than pread()")
|
||||
|
||||
// Disable mmap for architectures with 32-bit pointers in order to be able to work with files exceeding 2^32 bytes.
|
||||
const is32BitPtr = (^uintptr(0) >> 32) == 0
|
||||
|
||||
// MustReadAtCloser is rand-access read interface.
|
||||
|
@ -32,18 +30,6 @@ type MustReadAtCloser interface {
|
|||
type ReaderAt struct {
|
||||
f *os.File
|
||||
mmapData []byte
|
||||
|
||||
// pageCacheBitmap holds a bitmap for recently touched pages in mmapData.
|
||||
// This bitmap allows using simple copy() instead of copyMmap() for reading recently touched pages,
|
||||
// which is up to 4x faster when reading small chunks of data via MustReadAt.
|
||||
pageCacheBitmap atomic.Value
|
||||
pageCacheBitmapWG sync.WaitGroup
|
||||
|
||||
stopCh chan struct{}
|
||||
}
|
||||
|
||||
type pageCacheBitmap struct {
|
||||
m []uint64
|
||||
}
|
||||
|
||||
// MustReadAt reads len(p) bytes at off from r.
|
||||
|
@ -54,11 +40,7 @@ func (r *ReaderAt) MustReadAt(p []byte, off int64) {
|
|||
if off < 0 {
|
||||
logger.Panicf("off=%d cannot be negative", off)
|
||||
}
|
||||
end := off + int64(len(p))
|
||||
if len(r.mmapData) == 0 || (len(p) > 8*1024 && !r.isInPageCache(off, end)) {
|
||||
// Read big blocks directly from file.
|
||||
// This could be faster than reading these blocks from mmap,
|
||||
// since it triggers less page faults.
|
||||
if len(r.mmapData) == 0 {
|
||||
n, err := r.f.ReadAt(p, off)
|
||||
if err != nil {
|
||||
logger.Panicf("FATAL: cannot read %d bytes at offset %d of file %q: %s", len(p), off, r.f.Name(), err)
|
||||
|
@ -66,77 +48,21 @@ func (r *ReaderAt) MustReadAt(p []byte, off int64) {
|
|||
if n != len(p) {
|
||||
logger.Panicf("FATAL: unexpected number of bytes read; got %d; want %d", n, len(p))
|
||||
}
|
||||
if len(r.mmapData) > 0 {
|
||||
r.markInPageCache(off, end)
|
||||
}
|
||||
} else {
|
||||
if off > int64(len(r.mmapData)-len(p)) {
|
||||
logger.Panicf("off=%d is out of allowed range [0...%d] for len(p)=%d", off, len(r.mmapData)-len(p), len(p))
|
||||
}
|
||||
src := r.mmapData[off:]
|
||||
if r.isInPageCache(off, end) {
|
||||
// It is safe copying the data with copy(), since it is likely it is in the page cache.
|
||||
// This is up to 4x faster than copyMmap() below.
|
||||
copy(p, src)
|
||||
} else {
|
||||
// The data may be missing in the page cache, so it is better to copy it via cgo trick
|
||||
// in order to avoid P stalls in Go runtime.
|
||||
// See https://medium.com/@valyala/mmap-in-go-considered-harmful-d92a25cb161d for details.
|
||||
copyMmap(p, src)
|
||||
r.markInPageCache(off, end)
|
||||
}
|
||||
// The copy() below may result in thread block as described at https://valyala.medium.com/mmap-in-go-considered-harmful-d92a25cb161d .
|
||||
// But production workload proved this is OK in most cases, so use it without fear :)
|
||||
copy(p, src)
|
||||
}
|
||||
readCalls.Inc()
|
||||
readBytes.Add(len(p))
|
||||
}
|
||||
|
||||
func (r *ReaderAt) isInPageCache(start, end int64) bool {
|
||||
if int64(len(r.mmapData))-end < 4096 {
|
||||
// If standard copy(dst, src) from Go may read beyond len(src), then this should help
|
||||
// fixing SIGBUS panic from https://github.com/VictoriaMetrics/VictoriaMetrics/issues/581
|
||||
return false
|
||||
}
|
||||
startBit := uint64(start) / pageSize
|
||||
endBit := uint64(end) / pageSize
|
||||
m := r.pageCacheBitmap.Load().(*pageCacheBitmap).m
|
||||
for startBit <= endBit {
|
||||
idx := startBit / 64
|
||||
off := startBit % 64
|
||||
if idx >= uint64(len(m)) {
|
||||
return true
|
||||
}
|
||||
n := atomic.LoadUint64(&m[idx])
|
||||
if (n>>off)&1 != 1 {
|
||||
return false
|
||||
}
|
||||
startBit++
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (r *ReaderAt) markInPageCache(start, end int64) {
|
||||
startBit := uint64(start) / pageSize
|
||||
endBit := uint64(end) / pageSize
|
||||
m := r.pageCacheBitmap.Load().(*pageCacheBitmap).m
|
||||
for startBit <= endBit {
|
||||
idx := startBit / 64
|
||||
off := startBit % 64
|
||||
n := atomic.LoadUint64(&m[idx])
|
||||
n |= 1 << off
|
||||
// It is OK if multiple concurrent goroutines store the same m[idx].
|
||||
atomic.StoreUint64(&m[idx], n)
|
||||
startBit++
|
||||
}
|
||||
}
|
||||
|
||||
// Assume page size is 4KB
|
||||
const pageSize = 4 * 1024
|
||||
|
||||
// MustClose closes r.
|
||||
func (r *ReaderAt) MustClose() {
|
||||
close(r.stopCh)
|
||||
r.pageCacheBitmapWG.Wait()
|
||||
|
||||
fname := r.f.Name()
|
||||
if len(r.mmapData) > 0 {
|
||||
if err := unix.Munmap(r.mmapData[:cap(r.mmapData)]); err != nil {
|
||||
|
@ -168,7 +94,6 @@ func MustOpenReaderAt(path string) *ReaderAt {
|
|||
}
|
||||
var r ReaderAt
|
||||
r.f = f
|
||||
r.stopCh = make(chan struct{})
|
||||
if !*disableMmap {
|
||||
fi, err := f.Stat()
|
||||
if err != nil {
|
||||
|
@ -176,16 +101,6 @@ func MustOpenReaderAt(path string) *ReaderAt {
|
|||
logger.Panicf("FATAL: error in fstat(%q): %s", path, err)
|
||||
}
|
||||
size := fi.Size()
|
||||
bm := &pageCacheBitmap{
|
||||
m: make([]uint64, 1+size/pageSize/64),
|
||||
}
|
||||
r.pageCacheBitmap.Store(bm)
|
||||
r.pageCacheBitmapWG.Add(1)
|
||||
go func() {
|
||||
defer r.pageCacheBitmapWG.Done()
|
||||
pageCacheBitmapCleaner(&r.pageCacheBitmap, r.stopCh)
|
||||
}()
|
||||
|
||||
data, err := mmapFile(f, size)
|
||||
if err != nil {
|
||||
MustClose(f)
|
||||
|
@ -197,23 +112,6 @@ func MustOpenReaderAt(path string) *ReaderAt {
|
|||
return &r
|
||||
}
|
||||
|
||||
func pageCacheBitmapCleaner(pcbm *atomic.Value, stopCh <-chan struct{}) {
|
||||
t := time.NewTicker(time.Minute)
|
||||
for {
|
||||
select {
|
||||
case <-stopCh:
|
||||
t.Stop()
|
||||
return
|
||||
case <-t.C:
|
||||
}
|
||||
bmOld := pcbm.Load().(*pageCacheBitmap)
|
||||
bm := &pageCacheBitmap{
|
||||
m: make([]uint64, len(bmOld.m)),
|
||||
}
|
||||
pcbm.Store(bm)
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
readCalls = metrics.NewCounter(`vm_fs_read_calls_total`)
|
||||
readBytes = metrics.NewCounter(`vm_fs_read_bytes_total`)
|
||||
|
|
|
@ -19,6 +19,7 @@ var versionRe = regexp.MustCompile(`v\d+\.\d+\.\d+`)
|
|||
// WritePrometheusMetrics writes all the registered metrics to w in Prometheus exposition format.
|
||||
func WritePrometheusMetrics(w io.Writer) {
|
||||
metrics.WritePrometheus(w, true)
|
||||
metrics.WriteFDMetrics(w)
|
||||
|
||||
fmt.Fprintf(w, "vm_app_version{version=%q, short_version=%q} 1\n", buildinfo.Version,
|
||||
versionRe.FindString(buildinfo.Version))
|
||||
|
|
|
@ -17,10 +17,11 @@ import (
|
|||
)
|
||||
|
||||
var (
|
||||
loggerLevel = flag.String("loggerLevel", "INFO", "Minimum level of errors to log. Possible values: INFO, WARN, ERROR, FATAL, PANIC")
|
||||
loggerFormat = flag.String("loggerFormat", "default", "Format for logs. Possible values: default, json")
|
||||
loggerOutput = flag.String("loggerOutput", "stderr", "Output for the logs. Supported values: stderr, stdout")
|
||||
loggerTimezone = flag.String("loggerTimezone", "UTC", "Timezone to use for timestamps in logs. Local timezone can be used")
|
||||
loggerLevel = flag.String("loggerLevel", "INFO", "Minimum level of errors to log. Possible values: INFO, WARN, ERROR, FATAL, PANIC")
|
||||
loggerFormat = flag.String("loggerFormat", "default", "Format for logs. Possible values: default, json")
|
||||
loggerOutput = flag.String("loggerOutput", "stderr", "Output for the logs. Supported values: stderr, stdout")
|
||||
loggerTimezone = flag.String("loggerTimezone", "UTC", "Timezone to use for timestamps in logs. Timezone must be a valid IANA Time Zone. "+
|
||||
"For example: America/New_York, Europe/Berlin, Etc/GMT+3 or Local")
|
||||
disableTimestamps = flag.Bool("loggerDisableTimestamps", false, "Whether to disable writing timestamps in logs")
|
||||
|
||||
errorsPerSecondLimit = flag.Int("loggerErrorsPerSecondLimit", 0, "Per-second limit on the number of ERROR messages. If more than the given number of errors "+
|
||||
|
@ -47,8 +48,7 @@ func Init() {
|
|||
func initTimezone() {
|
||||
tz, err := time.LoadLocation(*loggerTimezone)
|
||||
if err != nil {
|
||||
log.Printf("cannot load timezone %q, so using UTC; error: %s", *loggerTimezone, err)
|
||||
tz = time.UTC
|
||||
log.Fatalf("cannot load timezone %q: %s", *loggerTimezone, err)
|
||||
}
|
||||
timezone = tz
|
||||
}
|
||||
|
|
9
lib/logger/tzdata.go
Normal file
9
lib/logger/tzdata.go
Normal file
|
@ -0,0 +1,9 @@
|
|||
// +build go1.15
|
||||
|
||||
package logger
|
||||
|
||||
import (
|
||||
// This is needed for embedding tzdata into binary, so `-loggerTimezone` could work in an app running on a scratch base Docker image.
|
||||
// The "time/tzdata" package has been appeared starting from Go1.15 - see https://golang.org/doc/go1.15#time/tzdata
|
||||
_ "time/tzdata"
|
||||
)
|
|
@ -6,6 +6,7 @@ import (
|
|||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"unsafe"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding"
|
||||
|
@ -28,6 +29,10 @@ type inmemoryBlock struct {
|
|||
items byteSliceSorter
|
||||
}
|
||||
|
||||
func (ib *inmemoryBlock) SizeBytes() int {
|
||||
return int(unsafe.Sizeof(*ib)) + cap(ib.commonPrefix) + cap(ib.data) + cap(ib.items)*int(unsafe.Sizeof([]byte{}))
|
||||
}
|
||||
|
||||
func (ib *inmemoryBlock) Reset() {
|
||||
ib.commonPrefix = ib.commonPrefix[:0]
|
||||
ib.data = ib.data[:0]
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/filestream"
|
||||
|
@ -136,6 +137,10 @@ type indexBlock struct {
|
|||
bhs []blockHeader
|
||||
}
|
||||
|
||||
func (idxb *indexBlock) SizeBytes() int {
|
||||
return cap(idxb.bhs) * int(unsafe.Sizeof(blockHeader{}))
|
||||
}
|
||||
|
||||
func getIndexBlock() *indexBlock {
|
||||
v := indexBlockPool.Get()
|
||||
if v == nil {
|
||||
|
@ -200,7 +205,7 @@ func (idxbc *indexBlockCache) MustClose() {
|
|||
|
||||
// cleaner periodically cleans least recently used items.
|
||||
func (idxbc *indexBlockCache) cleaner() {
|
||||
ticker := time.NewTicker(5 * time.Second)
|
||||
ticker := time.NewTicker(30 * time.Second)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
|
@ -216,8 +221,8 @@ func (idxbc *indexBlockCache) cleanByTimeout() {
|
|||
currentTime := fasttime.UnixTimestamp()
|
||||
idxbc.mu.Lock()
|
||||
for k, idxbe := range idxbc.m {
|
||||
// Delete items accessed more than a minute ago.
|
||||
if currentTime-atomic.LoadUint64(&idxbe.lastAccessTime) > 60 {
|
||||
// Delete items accessed more than two minutes ago.
|
||||
if currentTime-atomic.LoadUint64(&idxbe.lastAccessTime) > 2*60 {
|
||||
delete(idxbc.m, k)
|
||||
}
|
||||
}
|
||||
|
@ -276,6 +281,16 @@ func (idxbc *indexBlockCache) Len() uint64 {
|
|||
return uint64(n)
|
||||
}
|
||||
|
||||
func (idxbc *indexBlockCache) SizeBytes() uint64 {
|
||||
n := 0
|
||||
idxbc.mu.RLock()
|
||||
for _, e := range idxbc.m {
|
||||
n += e.idxb.SizeBytes()
|
||||
}
|
||||
idxbc.mu.RUnlock()
|
||||
return uint64(n)
|
||||
}
|
||||
|
||||
func (idxbc *indexBlockCache) Requests() uint64 {
|
||||
return atomic.LoadUint64(&idxbc.requests)
|
||||
}
|
||||
|
@ -299,15 +314,10 @@ type inmemoryBlockCache struct {
|
|||
}
|
||||
|
||||
type inmemoryBlockCacheKey struct {
|
||||
firstItem string
|
||||
itemsBlockOffset uint64
|
||||
}
|
||||
|
||||
func (ibck *inmemoryBlockCacheKey) Init(bh *blockHeader) {
|
||||
ibck.firstItem = ""
|
||||
if bh.itemsBlockSize == 0 {
|
||||
ibck.firstItem = string(bh.firstItem)
|
||||
}
|
||||
ibck.itemsBlockOffset = bh.itemsBlockOffset
|
||||
}
|
||||
|
||||
|
@ -347,7 +357,7 @@ func (ibc *inmemoryBlockCache) MustClose() {
|
|||
|
||||
// cleaner periodically cleans least recently used items.
|
||||
func (ibc *inmemoryBlockCache) cleaner() {
|
||||
ticker := time.NewTicker(5 * time.Second)
|
||||
ticker := time.NewTicker(30 * time.Second)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
|
@ -363,8 +373,10 @@ func (ibc *inmemoryBlockCache) cleanByTimeout() {
|
|||
currentTime := fasttime.UnixTimestamp()
|
||||
ibc.mu.Lock()
|
||||
for k, ibe := range ibc.m {
|
||||
// Delete items accessed more than a minute ago.
|
||||
if currentTime-atomic.LoadUint64(&ibe.lastAccessTime) > 60 {
|
||||
// Delete items accessed more than a two minutes ago.
|
||||
if currentTime-atomic.LoadUint64(&ibe.lastAccessTime) > 2*60 {
|
||||
// do not call putInmemoryBlock(ibc.m[k]), since it
|
||||
// may be used by concurrent goroutines.
|
||||
delete(ibc.m, k)
|
||||
}
|
||||
}
|
||||
|
@ -424,6 +436,16 @@ func (ibc *inmemoryBlockCache) Len() uint64 {
|
|||
return uint64(n)
|
||||
}
|
||||
|
||||
func (ibc *inmemoryBlockCache) SizeBytes() uint64 {
|
||||
n := 0
|
||||
ibc.mu.RLock()
|
||||
for _, e := range ibc.m {
|
||||
n += e.ib.SizeBytes()
|
||||
}
|
||||
ibc.mu.RUnlock()
|
||||
return uint64(n)
|
||||
}
|
||||
|
||||
func (ibc *inmemoryBlockCache) Requests() uint64 {
|
||||
return atomic.LoadUint64(&ibc.requests)
|
||||
}
|
||||
|
|
|
@ -28,8 +28,6 @@ type partSearch struct {
|
|||
// Pointer to inmemory block, which may be reused.
|
||||
inmemoryBlockReuse *inmemoryBlock
|
||||
|
||||
shouldCacheBlock func(item []byte) bool
|
||||
|
||||
idxbCache *indexBlockCache
|
||||
ibCache *inmemoryBlockCache
|
||||
|
||||
|
@ -54,7 +52,6 @@ func (ps *partSearch) reset() {
|
|||
putInmemoryBlock(ps.inmemoryBlockReuse)
|
||||
ps.inmemoryBlockReuse = nil
|
||||
}
|
||||
ps.shouldCacheBlock = nil
|
||||
ps.idxbCache = nil
|
||||
ps.ibCache = nil
|
||||
ps.err = nil
|
||||
|
@ -71,13 +68,12 @@ func (ps *partSearch) reset() {
|
|||
// Init initializes ps for search in the p.
|
||||
//
|
||||
// Use Seek for search in p.
|
||||
func (ps *partSearch) Init(p *part, shouldCacheBlock func(item []byte) bool) {
|
||||
func (ps *partSearch) Init(p *part) {
|
||||
ps.reset()
|
||||
|
||||
ps.p = p
|
||||
ps.idxbCache = p.idxbCache
|
||||
ps.ibCache = p.ibCache
|
||||
ps.shouldCacheBlock = shouldCacheBlock
|
||||
}
|
||||
|
||||
// Seek seeks for the first item greater or equal to k in ps.
|
||||
|
@ -306,16 +302,6 @@ func (ps *partSearch) readIndexBlock(mr *metaindexRow) (*indexBlock, error) {
|
|||
}
|
||||
|
||||
func (ps *partSearch) getInmemoryBlock(bh *blockHeader) (*inmemoryBlock, bool, error) {
|
||||
if ps.shouldCacheBlock != nil {
|
||||
if !ps.shouldCacheBlock(bh.firstItem) {
|
||||
ib, err := ps.readInmemoryBlock(bh)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
return ib, true, nil
|
||||
}
|
||||
}
|
||||
|
||||
var ibKey inmemoryBlockCacheKey
|
||||
ibKey.Init(bh)
|
||||
ib := ps.ibCache.Get(ibKey)
|
||||
|
|
|
@ -51,7 +51,7 @@ func testPartSearchConcurrent(p *part, items []string) error {
|
|||
func testPartSearchSerial(p *part, items []string) error {
|
||||
var ps partSearch
|
||||
|
||||
ps.Init(p, nil)
|
||||
ps.Init(p)
|
||||
var k []byte
|
||||
|
||||
// Search for the item smaller than the items[0]
|
||||
|
|
|
@ -294,13 +294,15 @@ type TableMetrics struct {
|
|||
ItemsCount uint64
|
||||
SizeBytes uint64
|
||||
|
||||
DataBlocksCacheSize uint64
|
||||
DataBlocksCacheRequests uint64
|
||||
DataBlocksCacheMisses uint64
|
||||
DataBlocksCacheSize uint64
|
||||
DataBlocksCacheSizeBytes uint64
|
||||
DataBlocksCacheRequests uint64
|
||||
DataBlocksCacheMisses uint64
|
||||
|
||||
IndexBlocksCacheSize uint64
|
||||
IndexBlocksCacheRequests uint64
|
||||
IndexBlocksCacheMisses uint64
|
||||
IndexBlocksCacheSize uint64
|
||||
IndexBlocksCacheSizeBytes uint64
|
||||
IndexBlocksCacheRequests uint64
|
||||
IndexBlocksCacheMisses uint64
|
||||
|
||||
PartsRefCount uint64
|
||||
}
|
||||
|
@ -328,10 +330,12 @@ func (tb *Table) UpdateMetrics(m *TableMetrics) {
|
|||
m.SizeBytes += p.size
|
||||
|
||||
m.DataBlocksCacheSize += p.ibCache.Len()
|
||||
m.DataBlocksCacheSizeBytes += p.ibCache.SizeBytes()
|
||||
m.DataBlocksCacheRequests += p.ibCache.Requests()
|
||||
m.DataBlocksCacheMisses += p.ibCache.Misses()
|
||||
|
||||
m.IndexBlocksCacheSize += p.idxbCache.Len()
|
||||
m.IndexBlocksCacheSizeBytes += p.idxbCache.SizeBytes()
|
||||
m.IndexBlocksCacheRequests += p.idxbCache.Requests()
|
||||
m.IndexBlocksCacheMisses += p.idxbCache.Misses()
|
||||
|
||||
|
|
|
@ -58,7 +58,7 @@ func (ts *TableSearch) reset() {
|
|||
// Init initializes ts for searching in the tb.
|
||||
//
|
||||
// MustClose must be called when the ts is no longer needed.
|
||||
func (ts *TableSearch) Init(tb *Table, shouldCacheBlock func(item []byte) bool) {
|
||||
func (ts *TableSearch) Init(tb *Table) {
|
||||
if ts.needClosing {
|
||||
logger.Panicf("BUG: missing MustClose call before the next call to Init")
|
||||
}
|
||||
|
@ -76,7 +76,7 @@ func (ts *TableSearch) Init(tb *Table, shouldCacheBlock func(item []byte) bool)
|
|||
}
|
||||
ts.psPool = ts.psPool[:len(ts.pws)]
|
||||
for i, pw := range ts.pws {
|
||||
ts.psPool[i].Init(pw.p, shouldCacheBlock)
|
||||
ts.psPool[i].Init(pw.p)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -109,7 +109,7 @@ func testTableSearchConcurrent(tb *Table, items []string) error {
|
|||
|
||||
func testTableSearchSerial(tb *Table, items []string) error {
|
||||
var ts TableSearch
|
||||
ts.Init(tb, nil)
|
||||
ts.Init(tb)
|
||||
for _, key := range []string{
|
||||
"",
|
||||
"123",
|
||||
|
|
|
@ -81,7 +81,7 @@ func benchmarkTableSearchKeysExt(b *testing.B, tb *Table, keys [][]byte, stripSu
|
|||
b.SetBytes(int64(searchKeysCount * rowsToScan))
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
var ts TableSearch
|
||||
ts.Init(tb, nil)
|
||||
ts.Init(tb)
|
||||
defer ts.MustClose()
|
||||
for pb.Next() {
|
||||
startIdx := rand.Intn(len(keys) - searchKeysCount)
|
||||
|
|
|
@ -176,10 +176,10 @@ func TestTableCreateSnapshotAt(t *testing.T) {
|
|||
defer tb2.MustClose()
|
||||
|
||||
var ts, ts1, ts2 TableSearch
|
||||
ts.Init(tb, nil)
|
||||
ts1.Init(tb1, nil)
|
||||
ts.Init(tb)
|
||||
ts1.Init(tb1)
|
||||
defer ts1.MustClose()
|
||||
ts2.Init(tb2, nil)
|
||||
ts2.Init(tb2)
|
||||
defer ts2.MustClose()
|
||||
for i := 0; i < itemsCount; i++ {
|
||||
key := []byte(fmt.Sprintf("item %d", i))
|
||||
|
|
|
@ -99,6 +99,11 @@ func addServicesLabels(services []service, networksLabels map[string]map[string]
|
|||
commonLabels["__meta_dockerswarm_service_label_"+discoveryutils.SanitizeLabelName(k)] = v
|
||||
}
|
||||
for _, vip := range service.Endpoint.VirtualIPs {
|
||||
// skip services without virtual address.
|
||||
// usually its host services.
|
||||
if vip.Addr == "" {
|
||||
continue
|
||||
}
|
||||
ip, _, err := net.ParseCIDR(vip.Addr)
|
||||
if err != nil {
|
||||
logger.Errorf("cannot parse: %q as cidr for service label add, err: %v", vip.Addr, err)
|
||||
|
|
|
@ -269,7 +269,6 @@ func (sw *scrapeWork) scrapeInternal(scrapeTimestamp, realTimestamp int64) error
|
|||
if err != nil {
|
||||
up = 0
|
||||
scrapesFailed.Inc()
|
||||
metrics.GetOrCreateCounter(fmt.Sprintf(`vm_promscrape_scrapes_failed_per_url_total{url=%q}`, sw.Config.ScrapeURL)).Inc()
|
||||
} else {
|
||||
bodyString := bytesutil.ToUnsafeString(body.B)
|
||||
wc.rows.UnmarshalWithErrLogger(bodyString, sw.logError)
|
||||
|
@ -281,7 +280,6 @@ func (sw *scrapeWork) scrapeInternal(scrapeTimestamp, realTimestamp int64) error
|
|||
srcRows = srcRows[:0]
|
||||
up = 0
|
||||
scrapesSkippedBySampleLimit.Inc()
|
||||
metrics.GetOrCreateCounter(fmt.Sprintf(`vm_promscrape_scrapes_skipped_by_sample_limit_per_url_total{url=%q}`, sw.Config.ScrapeURL)).Inc()
|
||||
}
|
||||
samplesPostRelabeling := 0
|
||||
for i := range srcRows {
|
||||
|
|
|
@ -184,9 +184,11 @@ func (bsw *blockStreamWriter) MustClose() {
|
|||
}
|
||||
|
||||
// WriteExternalBlock writes b to bsw and updates ph and rowsMerged.
|
||||
func (bsw *blockStreamWriter) WriteExternalBlock(b *Block, ph *partHeader, rowsMerged *uint64) {
|
||||
func (bsw *blockStreamWriter) WriteExternalBlock(b *Block, ph *partHeader, rowsMerged *uint64, needDedup bool) {
|
||||
atomic.AddUint64(rowsMerged, uint64(b.rowsCount()))
|
||||
b.deduplicateSamplesDuringMerge()
|
||||
if needDedup {
|
||||
b.deduplicateSamplesDuringMerge()
|
||||
}
|
||||
headerData, timestampsData, valuesData := b.MarshalData(bsw.timestampsBlockOffset, bsw.valuesBlockOffset)
|
||||
usePrevTimestamps := len(bsw.prevTimestampsData) > 0 && bytes.Equal(timestampsData, bsw.prevTimestampsData)
|
||||
if usePrevTimestamps {
|
||||
|
|
|
@ -49,7 +49,7 @@ func benchmarkBlockStreamWriter(b *testing.B, ebs []Block, rowsCount int, writeR
|
|||
|
||||
bsw.InitFromInmemoryPart(&mp)
|
||||
for i := range ebsCopy {
|
||||
bsw.WriteExternalBlock(&ebsCopy[i], &ph, &rowsMerged)
|
||||
bsw.WriteExternalBlock(&ebsCopy[i], &ph, &rowsMerged, false)
|
||||
}
|
||||
bsw.MustClose()
|
||||
mp.Reset()
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"sync"
|
||||
|
@ -48,28 +49,6 @@ const (
|
|||
nsPrefixDateTagToMetricIDs = 6
|
||||
)
|
||||
|
||||
func shouldCacheBlock(item []byte) bool {
|
||||
if len(item) == 0 {
|
||||
return true
|
||||
}
|
||||
// Do not cache items starting from
|
||||
switch item[0] {
|
||||
case nsPrefixTagToMetricIDs, nsPrefixDateTagToMetricIDs:
|
||||
// Do not cache blocks with tag->metricIDs and (date,tag)->metricIDs items, since:
|
||||
// - these blocks are scanned sequentially, so the overhead
|
||||
// on their unmarshaling is amortized by the sequential scan.
|
||||
// - these blocks can occupy high amounts of RAM in cache
|
||||
// and evict other frequently accessed blocks.
|
||||
return false
|
||||
case nsPrefixDeletedMetricID:
|
||||
// Do not cache blocks with deleted metricIDs,
|
||||
// since these metricIDs are loaded only once during app start.
|
||||
return false
|
||||
default:
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// indexDB represents an index db.
|
||||
type indexDB struct {
|
||||
// Atomic counters must go at the top of the structure in order to properly align by 8 bytes on 32-bit archs.
|
||||
|
@ -125,9 +104,9 @@ type indexDB struct {
|
|||
// matching low number of metrics.
|
||||
uselessTagFiltersCache *workingsetcache.Cache
|
||||
|
||||
// Cache for (date, tagFilter) -> metricIDsLen, which is used for reducing
|
||||
// Cache for (date, tagFilter) -> filterDuration, which is used for reducing
|
||||
// the amount of work when matching a set of filters.
|
||||
metricIDsPerDateTagFilterCache *workingsetcache.Cache
|
||||
durationsPerDateTagFilterCache *workingsetcache.Cache
|
||||
|
||||
indexSearchPool sync.Pool
|
||||
|
||||
|
@ -139,10 +118,13 @@ type indexDB struct {
|
|||
// metricIDs, since it usually requires 1 bit per deleted metricID.
|
||||
deletedMetricIDs atomic.Value
|
||||
deletedMetricIDsUpdateLock sync.Mutex
|
||||
|
||||
// The minimum timestamp when queries with composite index can be used.
|
||||
minTimestampForCompositeIndex int64
|
||||
}
|
||||
|
||||
// openIndexDB opens index db from the given path with the given caches.
|
||||
func openIndexDB(path string, metricIDCache, metricNameCache, tsidCache *workingsetcache.Cache) (*indexDB, error) {
|
||||
func openIndexDB(path string, metricIDCache, metricNameCache, tsidCache *workingsetcache.Cache, minTimestampForCompositeIndex int64) (*indexDB, error) {
|
||||
if metricIDCache == nil {
|
||||
logger.Panicf("BUG: metricIDCache must be non-nil")
|
||||
}
|
||||
|
@ -173,7 +155,9 @@ func openIndexDB(path string, metricIDCache, metricNameCache, tsidCache *working
|
|||
metricNameCache: metricNameCache,
|
||||
tsidCache: tsidCache,
|
||||
uselessTagFiltersCache: workingsetcache.New(mem/128, time.Hour),
|
||||
metricIDsPerDateTagFilterCache: workingsetcache.New(mem/128, time.Hour),
|
||||
durationsPerDateTagFilterCache: workingsetcache.New(mem/128, time.Hour),
|
||||
|
||||
minTimestampForCompositeIndex: minTimestampForCompositeIndex,
|
||||
}
|
||||
|
||||
is := db.getIndexSearch(noDeadline)
|
||||
|
@ -220,6 +204,8 @@ type IndexDBMetrics struct {
|
|||
IndexBlocksWithMetricIDsProcessed uint64
|
||||
IndexBlocksWithMetricIDsIncorrectOrder uint64
|
||||
|
||||
MinTimestampForCompositeIndex uint64
|
||||
|
||||
mergeset.TableMetrics
|
||||
}
|
||||
|
||||
|
@ -261,6 +247,8 @@ func (db *indexDB) UpdateMetrics(m *IndexDBMetrics) {
|
|||
m.IndexBlocksWithMetricIDsProcessed = atomic.LoadUint64(&indexBlocksWithMetricIDsProcessed)
|
||||
m.IndexBlocksWithMetricIDsIncorrectOrder = atomic.LoadUint64(&indexBlocksWithMetricIDsIncorrectOrder)
|
||||
|
||||
m.MinTimestampForCompositeIndex = uint64(db.minTimestampForCompositeIndex)
|
||||
|
||||
db.tb.UpdateMetrics(&m.TableMetrics)
|
||||
db.doExtDB(func(extDB *indexDB) {
|
||||
extDB.tb.UpdateMetrics(&m.TableMetrics)
|
||||
|
@ -328,14 +316,14 @@ func (db *indexDB) decRef() {
|
|||
// Free space occupied by caches owned by db.
|
||||
db.tagCache.Stop()
|
||||
db.uselessTagFiltersCache.Stop()
|
||||
db.metricIDsPerDateTagFilterCache.Stop()
|
||||
db.durationsPerDateTagFilterCache.Stop()
|
||||
|
||||
db.tagCache = nil
|
||||
db.metricIDCache = nil
|
||||
db.metricNameCache = nil
|
||||
db.tsidCache = nil
|
||||
db.uselessTagFiltersCache = nil
|
||||
db.metricIDsPerDateTagFilterCache = nil
|
||||
db.durationsPerDateTagFilterCache = nil
|
||||
|
||||
if atomic.LoadUint64(&db.mustDrop) == 0 {
|
||||
return
|
||||
|
@ -549,7 +537,7 @@ func (db *indexDB) getIndexSearch(deadline uint64) *indexSearch {
|
|||
}
|
||||
}
|
||||
is := v.(*indexSearch)
|
||||
is.ts.Init(db.tb, shouldCacheBlock)
|
||||
is.ts.Init(db.tb)
|
||||
is.deadline = deadline
|
||||
return is
|
||||
}
|
||||
|
@ -621,51 +609,34 @@ func (db *indexDB) createIndexes(tsid *TSID, mn *MetricName) error {
|
|||
// The order of index items is important.
|
||||
// It guarantees index consistency.
|
||||
|
||||
items := getIndexItems()
|
||||
ii := getIndexItems()
|
||||
defer putIndexItems(ii)
|
||||
|
||||
// Create MetricName -> TSID index.
|
||||
items.B = append(items.B, nsPrefixMetricNameToTSID)
|
||||
items.B = mn.Marshal(items.B)
|
||||
items.B = append(items.B, kvSeparatorChar)
|
||||
items.B = tsid.Marshal(items.B)
|
||||
items.Next()
|
||||
ii.B = append(ii.B, nsPrefixMetricNameToTSID)
|
||||
ii.B = mn.Marshal(ii.B)
|
||||
ii.B = append(ii.B, kvSeparatorChar)
|
||||
ii.B = tsid.Marshal(ii.B)
|
||||
ii.Next()
|
||||
|
||||
// Create MetricID -> MetricName index.
|
||||
items.B = marshalCommonPrefix(items.B, nsPrefixMetricIDToMetricName)
|
||||
items.B = encoding.MarshalUint64(items.B, tsid.MetricID)
|
||||
items.B = mn.Marshal(items.B)
|
||||
items.Next()
|
||||
ii.B = marshalCommonPrefix(ii.B, nsPrefixMetricIDToMetricName)
|
||||
ii.B = encoding.MarshalUint64(ii.B, tsid.MetricID)
|
||||
ii.B = mn.Marshal(ii.B)
|
||||
ii.Next()
|
||||
|
||||
// Create MetricID -> TSID index.
|
||||
items.B = marshalCommonPrefix(items.B, nsPrefixMetricIDToTSID)
|
||||
items.B = encoding.MarshalUint64(items.B, tsid.MetricID)
|
||||
items.B = tsid.Marshal(items.B)
|
||||
items.Next()
|
||||
ii.B = marshalCommonPrefix(ii.B, nsPrefixMetricIDToTSID)
|
||||
ii.B = encoding.MarshalUint64(ii.B, tsid.MetricID)
|
||||
ii.B = tsid.Marshal(ii.B)
|
||||
ii.Next()
|
||||
|
||||
commonPrefix := kbPool.Get()
|
||||
commonPrefix.B = marshalCommonPrefix(commonPrefix.B[:0], nsPrefixTagToMetricIDs)
|
||||
prefix := kbPool.Get()
|
||||
prefix.B = marshalCommonPrefix(prefix.B[:0], nsPrefixTagToMetricIDs)
|
||||
ii.registerTagIndexes(prefix.B, mn, tsid.MetricID)
|
||||
kbPool.Put(prefix)
|
||||
|
||||
// Create MetricGroup -> MetricID index.
|
||||
items.B = append(items.B, commonPrefix.B...)
|
||||
items.B = marshalTagValue(items.B, nil)
|
||||
items.B = marshalTagValue(items.B, mn.MetricGroup)
|
||||
items.B = encoding.MarshalUint64(items.B, tsid.MetricID)
|
||||
items.Next()
|
||||
addReverseMetricGroupIfNeeded(items, commonPrefix.B, mn, tsid.MetricID)
|
||||
|
||||
// For each tag create tag -> MetricID index.
|
||||
for i := range mn.Tags {
|
||||
tag := &mn.Tags[i]
|
||||
items.B = append(items.B, commonPrefix.B...)
|
||||
items.B = tag.Marshal(items.B)
|
||||
items.B = encoding.MarshalUint64(items.B, tsid.MetricID)
|
||||
items.Next()
|
||||
}
|
||||
|
||||
kbPool.Put(commonPrefix)
|
||||
err := db.tb.AddItems(items.Items)
|
||||
putIndexItems(items)
|
||||
return err
|
||||
return db.tb.AddItems(ii.Items)
|
||||
}
|
||||
|
||||
type indexItems struct {
|
||||
|
@ -722,10 +693,6 @@ func (db *indexDB) SearchTagKeysOnTimeRange(tr TimeRange, maxTagKeys int, deadli
|
|||
|
||||
keys := make([]string, 0, len(tks))
|
||||
for key := range tks {
|
||||
if key == string(graphiteReverseTagKey) {
|
||||
// Do not show artificially created graphiteReverseTagKey to the caller.
|
||||
continue
|
||||
}
|
||||
// Do not skip empty keys, since they are converted to __name__
|
||||
keys = append(keys, key)
|
||||
}
|
||||
|
@ -796,16 +763,20 @@ func (is *indexSearch) searchTagKeysOnDate(tks map[string]struct{}, date uint64,
|
|||
if mp.IsDeletedTag(dmis) {
|
||||
continue
|
||||
}
|
||||
|
||||
key := mp.Tag.Key
|
||||
if isArtificialTagKey(key) {
|
||||
// Skip artificially created tag key.
|
||||
continue
|
||||
}
|
||||
// Store tag key.
|
||||
tks[string(mp.Tag.Key)] = struct{}{}
|
||||
tks[string(key)] = struct{}{}
|
||||
|
||||
// Search for the next tag key.
|
||||
// The last char in kb.B must be tagSeparatorChar.
|
||||
// Just increment it in order to jump to the next tag key.
|
||||
kb.B = is.marshalCommonPrefix(kb.B[:0], nsPrefixDateTagToMetricIDs)
|
||||
kb.B = encoding.MarshalUint64(kb.B, date)
|
||||
kb.B = marshalTagValue(kb.B, mp.Tag.Key)
|
||||
kb.B = marshalTagValue(kb.B, key)
|
||||
kb.B[len(kb.B)-1]++
|
||||
ts.Seek(kb.B)
|
||||
}
|
||||
|
@ -837,10 +808,6 @@ func (db *indexDB) SearchTagKeys(maxTagKeys int, deadline uint64) ([]string, err
|
|||
|
||||
keys := make([]string, 0, len(tks))
|
||||
for key := range tks {
|
||||
if key == string(graphiteReverseTagKey) {
|
||||
// Do not show artificially created graphiteReverseTagKey to the caller.
|
||||
continue
|
||||
}
|
||||
// Do not skip empty keys, since they are converted to __name__
|
||||
keys = append(keys, key)
|
||||
}
|
||||
|
@ -875,15 +842,19 @@ func (is *indexSearch) searchTagKeys(tks map[string]struct{}, maxTagKeys int) er
|
|||
if mp.IsDeletedTag(dmis) {
|
||||
continue
|
||||
}
|
||||
|
||||
key := mp.Tag.Key
|
||||
if isArtificialTagKey(key) {
|
||||
// Skip artificailly created tag keys.
|
||||
continue
|
||||
}
|
||||
// Store tag key.
|
||||
tks[string(mp.Tag.Key)] = struct{}{}
|
||||
tks[string(key)] = struct{}{}
|
||||
|
||||
// Search for the next tag key.
|
||||
// The last char in kb.B must be tagSeparatorChar.
|
||||
// Just increment it in order to jump to the next tag key.
|
||||
kb.B = is.marshalCommonPrefix(kb.B[:0], nsPrefixTagToMetricIDs)
|
||||
kb.B = marshalTagValue(kb.B, mp.Tag.Key)
|
||||
kb.B = marshalTagValue(kb.B, key)
|
||||
kb.B[len(kb.B)-1]++
|
||||
ts.Seek(kb.B)
|
||||
}
|
||||
|
@ -1374,6 +1345,10 @@ func (is *indexSearch) getTSDBStatusForDate(date uint64, topN int) (*TSDBStatus,
|
|||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot unmarshal tag key from line %q: %w", item, err)
|
||||
}
|
||||
if isArtificialTagKey(tmp) {
|
||||
// Skip artificially created tag keys.
|
||||
continue
|
||||
}
|
||||
if len(tmp) == 0 {
|
||||
tmp = append(tmp, "__name__"...)
|
||||
}
|
||||
|
@ -1663,6 +1638,9 @@ func (db *indexDB) searchTSIDs(tfss []*TagFilters, tr TimeRange, maxMetrics int,
|
|||
if len(tfss) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
if tr.MinTimestamp >= db.minTimestampForCompositeIndex {
|
||||
tfss = convertToCompositeTagFilterss(tfss)
|
||||
}
|
||||
|
||||
tfKeyBuf := tagFiltersKeyBufPool.Get()
|
||||
defer tagFiltersKeyBufPool.Put(tfKeyBuf)
|
||||
|
@ -1920,6 +1898,10 @@ func (is *indexSearch) updateMetricIDsByMetricNameMatch(metricIDs, srcMetricIDs
|
|||
// sort srcMetricIDs in order to speed up Seek below.
|
||||
sortedMetricIDs := srcMetricIDs.AppendTo(nil)
|
||||
|
||||
kb := &is.kb
|
||||
kb.B = is.marshalCommonPrefix(kb.B[:0], nsPrefixTagToMetricIDs)
|
||||
tfs = removeCompositeTagFilters(tfs, kb.B)
|
||||
|
||||
metricName := kbPool.Get()
|
||||
defer kbPool.Put(metricName)
|
||||
mn := GetMetricName()
|
||||
|
@ -2060,7 +2042,7 @@ func (is *indexSearch) getTagFilterWithMinMetricIDsCount(tfs *TagFilters, maxMet
|
|||
continue
|
||||
}
|
||||
|
||||
metricIDs, err := is.getMetricIDsForTagFilter(tf, nil, maxMetrics)
|
||||
metricIDs, err := is.getMetricIDsForTagFilter(tf, maxMetrics)
|
||||
if err != nil {
|
||||
if err == errFallbackToMetricNameMatch {
|
||||
// Skip tag filters requiring to scan for too many metrics.
|
||||
|
@ -2121,16 +2103,91 @@ func (is *indexSearch) getTagFilterWithMinMetricIDsCount(tfs *TagFilters, maxMet
|
|||
return nil, metricIDs, nil
|
||||
}
|
||||
|
||||
func removeCompositeTagFilters(tfs []*tagFilter, prefix []byte) []*tagFilter {
|
||||
if !hasCompositeTagFilters(tfs, prefix) {
|
||||
return tfs
|
||||
}
|
||||
var tagKey []byte
|
||||
var name []byte
|
||||
tfsNew := make([]*tagFilter, 0, len(tfs)+1)
|
||||
for _, tf := range tfs {
|
||||
if !bytes.HasPrefix(tf.prefix, prefix) {
|
||||
tfsNew = append(tfsNew, tf)
|
||||
continue
|
||||
}
|
||||
suffix := tf.prefix[len(prefix):]
|
||||
var err error
|
||||
_, tagKey, err = unmarshalTagValue(tagKey[:0], suffix)
|
||||
if err != nil {
|
||||
logger.Panicf("BUG: cannot unmarshal tag key from suffix=%q: %s", suffix, err)
|
||||
}
|
||||
if len(tagKey) == 0 || tagKey[0] != compositeTagKeyPrefix {
|
||||
tfsNew = append(tfsNew, tf)
|
||||
continue
|
||||
}
|
||||
tagKey = tagKey[1:]
|
||||
var nameLen uint64
|
||||
tagKey, nameLen, err = encoding.UnmarshalVarUint64(tagKey)
|
||||
if err != nil {
|
||||
logger.Panicf("BUG: cannot unmarshal nameLen from tagKey %q: %s", tagKey, err)
|
||||
}
|
||||
if nameLen == 0 {
|
||||
logger.Panicf("BUG: nameLen must be greater than 0")
|
||||
}
|
||||
if uint64(len(tagKey)) < nameLen {
|
||||
logger.Panicf("BUG: expecting at %d bytes for name in tagKey=%q; got %d bytes", nameLen, tagKey, len(tagKey))
|
||||
}
|
||||
name = append(name[:0], tagKey[:nameLen]...)
|
||||
tagKey = tagKey[nameLen:]
|
||||
var tfNew tagFilter
|
||||
if err := tfNew.Init(prefix, tagKey, tf.value, tf.isNegative, tf.isRegexp); err != nil {
|
||||
logger.Panicf("BUG: cannot initialize {%s=%q} filter: %s", tagKey, tf.value, err)
|
||||
}
|
||||
tfsNew = append(tfsNew, &tfNew)
|
||||
}
|
||||
if len(name) > 0 {
|
||||
var tfNew tagFilter
|
||||
if err := tfNew.Init(prefix, nil, name, false, false); err != nil {
|
||||
logger.Panicf("BUG: unexpected error when initializing {__name__=%q} filter: %s", name, err)
|
||||
}
|
||||
tfsNew = append(tfsNew, &tfNew)
|
||||
}
|
||||
return tfsNew
|
||||
}
|
||||
|
||||
func hasCompositeTagFilters(tfs []*tagFilter, prefix []byte) bool {
|
||||
var tagKey []byte
|
||||
for _, tf := range tfs {
|
||||
if !bytes.HasPrefix(tf.prefix, prefix) {
|
||||
continue
|
||||
}
|
||||
suffix := tf.prefix[len(prefix):]
|
||||
var err error
|
||||
_, tagKey, err = unmarshalTagValue(tagKey[:0], suffix)
|
||||
if err != nil {
|
||||
logger.Panicf("BUG: cannot unmarshal tag key from suffix=%q: %s", suffix, err)
|
||||
}
|
||||
if len(tagKey) > 0 && tagKey[0] == compositeTagKeyPrefix {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func matchTagFilters(mn *MetricName, tfs []*tagFilter, kb *bytesutil.ByteBuffer) (bool, error) {
|
||||
kb.B = marshalCommonPrefix(kb.B[:0], nsPrefixTagToMetricIDs)
|
||||
|
||||
for i, tf := range tfs {
|
||||
if bytes.Equal(tf.key, graphiteReverseTagKey) {
|
||||
// Skip artificial tag filter for Graphite-like metric names with dots,
|
||||
// since mn doesn't contain the corresponding tag.
|
||||
continue
|
||||
}
|
||||
if len(tf.key) == 0 || string(tf.key) == "__graphite__" {
|
||||
// Match against mn.MetricGroup.
|
||||
b := marshalTagValue(kb.B, nil)
|
||||
b = marshalTagValue(b, mn.MetricGroup)
|
||||
kb.B = b[:len(kb.B)]
|
||||
ok, err := matchTagFilter(b, tf)
|
||||
ok, err := tf.match(b)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("cannot match MetricGroup %q with tagFilter %s: %w", mn.MetricGroup, tf, err)
|
||||
}
|
||||
|
@ -2144,17 +2201,10 @@ func matchTagFilters(mn *MetricName, tfs []*tagFilter, kb *bytesutil.ByteBuffer)
|
|||
}
|
||||
continue
|
||||
}
|
||||
if bytes.Equal(tf.key, graphiteReverseTagKey) {
|
||||
// Skip artificial tag filter for Graphite-like metric names with dots,
|
||||
// since mn doesn't contain the corresponding tag.
|
||||
continue
|
||||
}
|
||||
|
||||
// Search for matching tag name.
|
||||
tagMatched := false
|
||||
tagSeen := false
|
||||
for j := range mn.Tags {
|
||||
tag := &mn.Tags[j]
|
||||
for _, tag := range mn.Tags {
|
||||
if string(tag.Key) != string(tf.key) {
|
||||
continue
|
||||
}
|
||||
|
@ -2163,7 +2213,7 @@ func matchTagFilters(mn *MetricName, tfs []*tagFilter, kb *bytesutil.ByteBuffer)
|
|||
tagSeen = true
|
||||
b := tag.Marshal(kb.B)
|
||||
kb.B = b[:len(kb.B)]
|
||||
ok, err := matchTagFilter(b, tf)
|
||||
ok, err := tf.match(b)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("cannot match tag %q with tagFilter %s: %w", tag, tf, err)
|
||||
}
|
||||
|
@ -2203,20 +2253,6 @@ func matchTagFilters(mn *MetricName, tfs []*tagFilter, kb *bytesutil.ByteBuffer)
|
|||
return true, nil
|
||||
}
|
||||
|
||||
func matchTagFilter(b []byte, tf *tagFilter) (bool, error) {
|
||||
if !bytes.HasPrefix(b, tf.prefix) {
|
||||
return tf.isNegative, nil
|
||||
}
|
||||
ok, err := tf.matchSuffix(b[len(tf.prefix):])
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if !ok {
|
||||
return tf.isNegative, nil
|
||||
}
|
||||
return !tf.isNegative, nil
|
||||
}
|
||||
|
||||
func (is *indexSearch) searchMetricIDs(tfss []*TagFilters, tr TimeRange, maxMetrics int) ([]uint64, error) {
|
||||
metricIDs := &uint64set.Set{}
|
||||
for _, tfs := range tfss {
|
||||
|
@ -2328,7 +2364,7 @@ const (
|
|||
|
||||
var uselessTagFilterCacheValue = []byte("1")
|
||||
|
||||
func (is *indexSearch) getMetricIDsForTagFilter(tf *tagFilter, filter *uint64set.Set, maxMetrics int) (*uint64set.Set, error) {
|
||||
func (is *indexSearch) getMetricIDsForTagFilter(tf *tagFilter, maxMetrics int) (*uint64set.Set, error) {
|
||||
if tf.isNegative {
|
||||
logger.Panicf("BUG: isNegative must be false")
|
||||
}
|
||||
|
@ -2346,7 +2382,7 @@ func (is *indexSearch) getMetricIDsForTagFilter(tf *tagFilter, filter *uint64set
|
|||
|
||||
// Slow path - scan for all the rows with the given prefix.
|
||||
maxLoops := maxMetrics * maxIndexScanSlowLoopsPerMetric
|
||||
if err := is.getMetricIDsForTagFilterSlow(tf, filter, maxLoops, metricIDs.Add); err != nil {
|
||||
if err := is.getMetricIDsForTagFilterSlow(tf, nil, maxLoops, metricIDs.Add); err != nil {
|
||||
if err == errFallbackToMetricNameMatch {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -2743,9 +2779,8 @@ func (is *indexSearch) getMetricIDsForDateAndFilters(date uint64, tfs *TagFilter
|
|||
// Sort tfs by the number of matching filters from previous queries.
|
||||
// This way we limit the amount of work below by applying more specific filters at first.
|
||||
type tagFilterWithCount struct {
|
||||
tf *tagFilter
|
||||
cost uint64
|
||||
count uint64
|
||||
tf *tagFilter
|
||||
seconds float64
|
||||
}
|
||||
tfsWithCount := make([]tagFilterWithCount, len(tfs.tfs))
|
||||
kb := &is.kb
|
||||
|
@ -2753,26 +2788,26 @@ func (is *indexSearch) getMetricIDsForDateAndFilters(date uint64, tfs *TagFilter
|
|||
for i := range tfs.tfs {
|
||||
tf := &tfs.tfs[i]
|
||||
kb.B = appendDateTagFilterCacheKey(kb.B[:0], date, tf)
|
||||
buf = is.db.metricIDsPerDateTagFilterCache.Get(buf[:0], kb.B)
|
||||
count := uint64(0)
|
||||
buf = is.db.durationsPerDateTagFilterCache.Get(buf[:0], kb.B)
|
||||
seconds := float64(0)
|
||||
if len(buf) == 8 {
|
||||
count = encoding.UnmarshalUint64(buf)
|
||||
n := encoding.UnmarshalUint64(buf)
|
||||
seconds = math.Float64frombits(n)
|
||||
}
|
||||
tfsWithCount[i] = tagFilterWithCount{
|
||||
tf: tf,
|
||||
cost: count * tf.matchCost,
|
||||
count: count,
|
||||
tf: tf,
|
||||
seconds: seconds,
|
||||
}
|
||||
}
|
||||
sort.Slice(tfsWithCount, func(i, j int) bool {
|
||||
a, b := &tfsWithCount[i], &tfsWithCount[j]
|
||||
if a.cost != b.cost {
|
||||
return a.cost < b.cost
|
||||
if a.seconds != b.seconds {
|
||||
return a.seconds < b.seconds
|
||||
}
|
||||
return a.tf.Less(b.tf)
|
||||
return a.tf.matchCost < b.tf.matchCost
|
||||
})
|
||||
|
||||
// Populate metricIDs with the first non-negative filter.
|
||||
// Populate metricIDs for the first non-negative filter.
|
||||
var tfsPostponed []*tagFilter
|
||||
var metricIDs *uint64set.Set
|
||||
maxDateMetrics := maxMetrics * 50
|
||||
|
@ -2783,7 +2818,7 @@ func (is *indexSearch) getMetricIDsForDateAndFilters(date uint64, tfs *TagFilter
|
|||
tfsRemainingWithCount = append(tfsRemainingWithCount, tfsWithCount[i])
|
||||
continue
|
||||
}
|
||||
m, err := is.getMetricIDsForDateTagFilter(tf, date, tfs.commonPrefix, nil, maxDateMetrics)
|
||||
m, err := is.getMetricIDsForDateTagFilter(tf, date, tfs.commonPrefix, maxDateMetrics)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -2818,25 +2853,28 @@ func (is *indexSearch) getMetricIDsForDateAndFilters(date uint64, tfs *TagFilter
|
|||
}
|
||||
metricIDs = m
|
||||
}
|
||||
if metricIDs.Len() == 0 {
|
||||
// There is no sense in inspecting tfsRemainingWithCount, since the result will be empty.
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Intersect metricIDs with the rest of filters.
|
||||
//
|
||||
// Do not run these tag filters in parallel, since this may result in CPU and RAM waste
|
||||
// when the intial tag filters significantly reduce the number of found metricIDs,
|
||||
// so the remaining filters could be performed via much faster metricName matching instead
|
||||
// of slow selecting of matching metricIDs.
|
||||
for i := range tfsRemainingWithCount {
|
||||
tfWithCount := tfsRemainingWithCount[i]
|
||||
if n := uint64(metricIDs.Len()); n < 1000 || (n < tfWithCount.count/maxIndexScanLoopsPerMetric && n < uint64(maxMetrics)/10) {
|
||||
// It should be faster performing metricName match on the remaining filters
|
||||
// instead of scanning big number of entries in the inverted index for these filters.
|
||||
for i < len(tfsRemainingWithCount) {
|
||||
tfsPostponed = append(tfsPostponed, tfsRemainingWithCount[i].tf)
|
||||
i++
|
||||
}
|
||||
tf := tfWithCount.tf
|
||||
metricIDsLen := metricIDs.Len()
|
||||
if metricIDsLen == 0 {
|
||||
// Short circuit - there is no need in applying the remaining filters to an empty set.
|
||||
break
|
||||
}
|
||||
tf := tfWithCount.tf
|
||||
m, err := is.getMetricIDsForDateTagFilter(tf, date, tfs.commonPrefix, metricIDs, maxDateMetrics)
|
||||
if float64(metricIDsLen)/metricNameMatchesPerSecond < tfWithCount.seconds {
|
||||
// It should be faster performing metricName match on the remaining filters
|
||||
// instead of scanning big number of entries in the inverted index for these filters.
|
||||
tfsPostponed = append(tfsPostponed, tf)
|
||||
continue
|
||||
}
|
||||
m, err := is.getMetricIDsForDateTagFilter(tf, date, tfs.commonPrefix, maxDateMetrics)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -2850,10 +2888,10 @@ func (is *indexSearch) getMetricIDsForDateAndFilters(date uint64, tfs *TagFilter
|
|||
} else {
|
||||
metricIDs.Intersect(m)
|
||||
}
|
||||
if metricIDs.Len() == 0 {
|
||||
// Short circuit - there is no need in applying the remaining filters to empty set.
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
if metricIDs.Len() == 0 {
|
||||
// There is no need in applying tfsPostponed, since the result is empty.
|
||||
return nil, nil
|
||||
}
|
||||
if len(tfsPostponed) > 0 {
|
||||
if n := metricIDs.Len(); n > 50000 && n > maxMetrics/10 {
|
||||
|
@ -2871,14 +2909,20 @@ func (is *indexSearch) getMetricIDsForDateAndFilters(date uint64, tfs *TagFilter
|
|||
return metricIDs, nil
|
||||
}
|
||||
|
||||
func (is *indexSearch) storeDateMetricID(date, metricID uint64) error {
|
||||
items := getIndexItems()
|
||||
defer putIndexItems(items)
|
||||
// The estimated number of per-second loops inside updateMetricIDsByMetricNameMatch
|
||||
//
|
||||
// This value is used for determining when matching by metric name must be perfromed instead of matching
|
||||
// by the remaining tag filters.
|
||||
const metricNameMatchesPerSecond = 10000
|
||||
|
||||
items.B = is.marshalCommonPrefix(items.B, nsPrefixDateToMetricID)
|
||||
items.B = encoding.MarshalUint64(items.B, date)
|
||||
items.B = encoding.MarshalUint64(items.B, metricID)
|
||||
items.Next()
|
||||
func (is *indexSearch) storeDateMetricID(date, metricID uint64) error {
|
||||
ii := getIndexItems()
|
||||
defer putIndexItems(ii)
|
||||
|
||||
ii.B = is.marshalCommonPrefix(ii.B, nsPrefixDateToMetricID)
|
||||
ii.B = encoding.MarshalUint64(ii.B, date)
|
||||
ii.B = encoding.MarshalUint64(ii.B, metricID)
|
||||
ii.Next()
|
||||
|
||||
// Create per-day inverted index entries for metricID.
|
||||
kb := kbPool.Get()
|
||||
|
@ -2906,27 +2950,44 @@ func (is *indexSearch) storeDateMetricID(date, metricID uint64) error {
|
|||
}
|
||||
kb.B = is.marshalCommonPrefix(kb.B[:0], nsPrefixDateTagToMetricIDs)
|
||||
kb.B = encoding.MarshalUint64(kb.B, date)
|
||||
|
||||
items.B = append(items.B, kb.B...)
|
||||
items.B = marshalTagValue(items.B, nil)
|
||||
items.B = marshalTagValue(items.B, mn.MetricGroup)
|
||||
items.B = encoding.MarshalUint64(items.B, metricID)
|
||||
items.Next()
|
||||
addReverseMetricGroupIfNeeded(items, kb.B, mn, metricID)
|
||||
for i := range mn.Tags {
|
||||
tag := &mn.Tags[i]
|
||||
items.B = append(items.B, kb.B...)
|
||||
items.B = tag.Marshal(items.B)
|
||||
items.B = encoding.MarshalUint64(items.B, metricID)
|
||||
items.Next()
|
||||
}
|
||||
if err = is.db.tb.AddItems(items.Items); err != nil {
|
||||
ii.registerTagIndexes(kb.B, mn, metricID)
|
||||
if err = is.db.tb.AddItems(ii.Items); err != nil {
|
||||
return fmt.Errorf("cannot add per-day entires for metricID %d: %w", metricID, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func addReverseMetricGroupIfNeeded(items *indexItems, prefix []byte, mn *MetricName, metricID uint64) {
|
||||
func (ii *indexItems) registerTagIndexes(prefix []byte, mn *MetricName, metricID uint64) {
|
||||
// Add index entry for MetricGroup -> MetricID
|
||||
ii.B = append(ii.B, prefix...)
|
||||
ii.B = marshalTagValue(ii.B, nil)
|
||||
ii.B = marshalTagValue(ii.B, mn.MetricGroup)
|
||||
ii.B = encoding.MarshalUint64(ii.B, metricID)
|
||||
ii.Next()
|
||||
ii.addReverseMetricGroupIfNeeded(prefix, mn, metricID)
|
||||
|
||||
// Add index entries for tags: tag -> MetricID
|
||||
for _, tag := range mn.Tags {
|
||||
ii.B = append(ii.B, prefix...)
|
||||
ii.B = tag.Marshal(ii.B)
|
||||
ii.B = encoding.MarshalUint64(ii.B, metricID)
|
||||
ii.Next()
|
||||
}
|
||||
|
||||
// Add index entries for composite tags: MetricGroup+tag -> MetricID
|
||||
compositeKey := kbPool.Get()
|
||||
for _, tag := range mn.Tags {
|
||||
compositeKey.B = marshalCompositeTagKey(compositeKey.B[:0], mn.MetricGroup, tag.Key)
|
||||
ii.B = append(ii.B, prefix...)
|
||||
ii.B = marshalTagValue(ii.B, compositeKey.B)
|
||||
ii.B = marshalTagValue(ii.B, tag.Value)
|
||||
ii.B = encoding.MarshalUint64(ii.B, metricID)
|
||||
ii.Next()
|
||||
}
|
||||
kbPool.Put(compositeKey)
|
||||
}
|
||||
|
||||
func (ii *indexItems) addReverseMetricGroupIfNeeded(prefix []byte, mn *MetricName, metricID uint64) {
|
||||
if bytes.IndexByte(mn.MetricGroup, '.') < 0 {
|
||||
// The reverse metric group is needed only for Graphite-like metrics with points.
|
||||
return
|
||||
|
@ -2934,14 +2995,24 @@ func addReverseMetricGroupIfNeeded(items *indexItems, prefix []byte, mn *MetricN
|
|||
// This is most likely a Graphite metric like 'foo.bar.baz'.
|
||||
// Store reverse metric name 'zab.rab.oof' in order to speed up search for '*.bar.baz'
|
||||
// when the Graphite wildcard has a suffix matching small number of time series.
|
||||
items.B = append(items.B, prefix...)
|
||||
items.B = marshalTagValue(items.B, graphiteReverseTagKey)
|
||||
ii.B = append(ii.B, prefix...)
|
||||
ii.B = marshalTagValue(ii.B, graphiteReverseTagKey)
|
||||
revBuf := kbPool.Get()
|
||||
revBuf.B = reverseBytes(revBuf.B[:0], mn.MetricGroup)
|
||||
items.B = marshalTagValue(items.B, revBuf.B)
|
||||
ii.B = marshalTagValue(ii.B, revBuf.B)
|
||||
kbPool.Put(revBuf)
|
||||
items.B = encoding.MarshalUint64(items.B, metricID)
|
||||
items.Next()
|
||||
ii.B = encoding.MarshalUint64(ii.B, metricID)
|
||||
ii.Next()
|
||||
}
|
||||
|
||||
func isArtificialTagKey(key []byte) bool {
|
||||
if bytes.Equal(key, graphiteReverseTagKey) {
|
||||
return true
|
||||
}
|
||||
if len(key) > 0 && key[0] == compositeTagKeyPrefix {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// The tag key for reverse metric name used for speeding up searching
|
||||
|
@ -2951,6 +3022,20 @@ func addReverseMetricGroupIfNeeded(items *indexItems, prefix []byte, mn *MetricN
|
|||
// It is expected that the given key isn't be used by users.
|
||||
var graphiteReverseTagKey = []byte("\xff")
|
||||
|
||||
// The prefix for composite tag, which is used for speeding up searching
|
||||
// for composite filters, which contain `{__name__="<metric_name>"}` filter.
|
||||
//
|
||||
// It is expected that the given prefix isn't used by users.
|
||||
const compositeTagKeyPrefix = '\xfe'
|
||||
|
||||
func marshalCompositeTagKey(dst, name, key []byte) []byte {
|
||||
dst = append(dst, compositeTagKeyPrefix)
|
||||
dst = encoding.MarshalVarUint64(dst, uint64(len(name)))
|
||||
dst = append(dst, name...)
|
||||
dst = append(dst, key...)
|
||||
return dst
|
||||
}
|
||||
|
||||
func reverseBytes(dst, src []byte) []byte {
|
||||
for i := len(src) - 1; i >= 0; i-- {
|
||||
dst = append(dst, src[i])
|
||||
|
@ -2976,7 +3061,7 @@ func (is *indexSearch) hasDateMetricID(date, metricID uint64) (bool, error) {
|
|||
return true, nil
|
||||
}
|
||||
|
||||
func (is *indexSearch) getMetricIDsForDateTagFilter(tf *tagFilter, date uint64, commonPrefix []byte, filter *uint64set.Set, maxMetrics int) (*uint64set.Set, error) {
|
||||
func (is *indexSearch) getMetricIDsForDateTagFilter(tf *tagFilter, date uint64, commonPrefix []byte, maxMetrics int) (*uint64set.Set, error) {
|
||||
// Augument tag filter prefix for per-date search instead of global search.
|
||||
if !bytes.HasPrefix(tf.prefix, commonPrefix) {
|
||||
logger.Panicf("BUG: unexpected tf.prefix %q; must start with commonPrefix %q", tf.prefix, commonPrefix)
|
||||
|
@ -2990,23 +3075,21 @@ func (is *indexSearch) getMetricIDsForDateTagFilter(tf *tagFilter, date uint64,
|
|||
tfNew := *tf
|
||||
tfNew.isNegative = false // isNegative for the original tf is handled by the caller.
|
||||
tfNew.prefix = kb.B
|
||||
metricIDs, err := is.getMetricIDsForTagFilter(&tfNew, filter, maxMetrics)
|
||||
if filter != nil {
|
||||
// Do not cache the number of matching metricIDs,
|
||||
// since this number may be modified by filter.
|
||||
return metricIDs, err
|
||||
}
|
||||
// Store the number of matching metricIDs in the cache in order to sort tag filters
|
||||
// in ascending number of matching metricIDs on the next search.
|
||||
startTime := time.Now()
|
||||
metricIDs, err := is.getMetricIDsForTagFilter(&tfNew, maxMetrics)
|
||||
duration := time.Since(startTime)
|
||||
// Store the duration for tag filter execution in the cache in order to sort tag filters
|
||||
// in ascending durations on the next search.
|
||||
is.kb.B = appendDateTagFilterCacheKey(is.kb.B[:0], date, tf)
|
||||
metricIDsLen := uint64(metricIDs.Len())
|
||||
if err != nil {
|
||||
// Set metricIDsLen to maxMetrics, so the given entry will be moved to the end
|
||||
// Set duration to big value, so the given tag filter will be moved to the end
|
||||
// of tag filters on the next search.
|
||||
metricIDsLen = uint64(maxMetrics)
|
||||
duration = time.Hour
|
||||
}
|
||||
kb.B = encoding.MarshalUint64(kb.B[:0], metricIDsLen)
|
||||
is.db.metricIDsPerDateTagFilterCache.Set(is.kb.B, kb.B)
|
||||
seconds := duration.Seconds()
|
||||
n := math.Float64bits(seconds)
|
||||
kb.B = encoding.MarshalUint64(kb.B[:0], n)
|
||||
is.db.durationsPerDateTagFilterCache.Set(is.kb.B, kb.B)
|
||||
return metricIDs, err
|
||||
}
|
||||
|
||||
|
|
|
@ -456,7 +456,7 @@ func TestIndexDBOpenClose(t *testing.T) {
|
|||
defer tsidCache.Stop()
|
||||
|
||||
for i := 0; i < 5; i++ {
|
||||
db, err := openIndexDB("test-index-db", metricIDCache, metricNameCache, tsidCache)
|
||||
db, err := openIndexDB("test-index-db", metricIDCache, metricNameCache, tsidCache, 0)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot open indexDB: %s", err)
|
||||
}
|
||||
|
@ -479,7 +479,7 @@ func TestIndexDB(t *testing.T) {
|
|||
defer tsidCache.Stop()
|
||||
|
||||
dbName := "test-index-db-serial"
|
||||
db, err := openIndexDB(dbName, metricIDCache, metricNameCache, tsidCache)
|
||||
db, err := openIndexDB(dbName, metricIDCache, metricNameCache, tsidCache, 0)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot open indexDB: %s", err)
|
||||
}
|
||||
|
@ -509,7 +509,7 @@ func TestIndexDB(t *testing.T) {
|
|||
|
||||
// Re-open the db and verify it works as expected.
|
||||
db.MustClose()
|
||||
db, err = openIndexDB(dbName, metricIDCache, metricNameCache, tsidCache)
|
||||
db, err = openIndexDB(dbName, metricIDCache, metricNameCache, tsidCache, 0)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot open indexDB: %s", err)
|
||||
}
|
||||
|
@ -533,7 +533,7 @@ func TestIndexDB(t *testing.T) {
|
|||
defer tsidCache.Stop()
|
||||
|
||||
dbName := "test-index-db-concurrent"
|
||||
db, err := openIndexDB(dbName, metricIDCache, metricNameCache, tsidCache)
|
||||
db, err := openIndexDB(dbName, metricIDCache, metricNameCache, tsidCache, 0)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot open indexDB: %s", err)
|
||||
}
|
||||
|
@ -1465,7 +1465,7 @@ func TestSearchTSIDWithTimeRange(t *testing.T) {
|
|||
defer tsidCache.Stop()
|
||||
|
||||
dbName := "test-index-db-ts-range"
|
||||
db, err := openIndexDB(dbName, metricIDCache, metricNameCache, tsidCache)
|
||||
db, err := openIndexDB(dbName, metricIDCache, metricNameCache, tsidCache, 0)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot open indexDB: %s", err)
|
||||
}
|
||||
|
|
|
@ -50,7 +50,7 @@ func BenchmarkIndexDBAddTSIDs(b *testing.B) {
|
|||
defer tsidCache.Stop()
|
||||
|
||||
const dbName = "bench-index-db-add-tsids"
|
||||
db, err := openIndexDB(dbName, metricIDCache, metricNameCache, tsidCache)
|
||||
db, err := openIndexDB(dbName, metricIDCache, metricNameCache, tsidCache, 0)
|
||||
if err != nil {
|
||||
b.Fatalf("cannot open indexDB: %s", err)
|
||||
}
|
||||
|
@ -115,7 +115,7 @@ func BenchmarkHeadPostingForMatchers(b *testing.B) {
|
|||
defer tsidCache.Stop()
|
||||
|
||||
const dbName = "bench-head-posting-for-matchers"
|
||||
db, err := openIndexDB(dbName, metricIDCache, metricNameCache, tsidCache)
|
||||
db, err := openIndexDB(dbName, metricIDCache, metricNameCache, tsidCache, 0)
|
||||
if err != nil {
|
||||
b.Fatalf("cannot open indexDB: %s", err)
|
||||
}
|
||||
|
@ -294,7 +294,7 @@ func BenchmarkIndexDBGetTSIDs(b *testing.B) {
|
|||
defer tsidCache.Stop()
|
||||
|
||||
const dbName = "bench-index-db-get-tsids"
|
||||
db, err := openIndexDB(dbName, metricIDCache, metricNameCache, tsidCache)
|
||||
db, err := openIndexDB(dbName, metricIDCache, metricNameCache, tsidCache, 0)
|
||||
if err != nil {
|
||||
b.Fatalf("cannot open indexDB: %s", err)
|
||||
}
|
||||
|
|
|
@ -76,14 +76,14 @@ func mergeBlockStreamsInternal(ph *partHeader, bsw *blockStreamWriter, bsm *bloc
|
|||
if bsm.Block.bh.TSID.Less(&pendingBlock.bh.TSID) {
|
||||
logger.Panicf("BUG: the next TSID=%+v is smaller than the current TSID=%+v", &bsm.Block.bh.TSID, &pendingBlock.bh.TSID)
|
||||
}
|
||||
bsw.WriteExternalBlock(pendingBlock, ph, rowsMerged)
|
||||
bsw.WriteExternalBlock(pendingBlock, ph, rowsMerged, true)
|
||||
pendingBlock.CopyFrom(bsm.Block)
|
||||
continue
|
||||
}
|
||||
if pendingBlock.tooBig() && pendingBlock.bh.MaxTimestamp <= bsm.Block.bh.MinTimestamp {
|
||||
// Fast path - pendingBlock is too big and it doesn't overlap with bsm.Block.
|
||||
// Write the pendingBlock and then deal with bsm.Block.
|
||||
bsw.WriteExternalBlock(pendingBlock, ph, rowsMerged)
|
||||
bsw.WriteExternalBlock(pendingBlock, ph, rowsMerged, true)
|
||||
pendingBlock.CopyFrom(bsm.Block)
|
||||
continue
|
||||
}
|
||||
|
@ -119,13 +119,13 @@ func mergeBlockStreamsInternal(ph *partHeader, bsw *blockStreamWriter, bsm *bloc
|
|||
tmpBlock.timestamps = tmpBlock.timestamps[:maxRowsPerBlock]
|
||||
tmpBlock.values = tmpBlock.values[:maxRowsPerBlock]
|
||||
tmpBlock.fixupTimestamps()
|
||||
bsw.WriteExternalBlock(tmpBlock, ph, rowsMerged)
|
||||
bsw.WriteExternalBlock(tmpBlock, ph, rowsMerged, true)
|
||||
}
|
||||
if err := bsm.Error(); err != nil {
|
||||
return fmt.Errorf("cannot read block to be merged: %w", err)
|
||||
}
|
||||
if !pendingBlockIsEmpty {
|
||||
bsw.WriteExternalBlock(pendingBlock, ph, rowsMerged)
|
||||
bsw.WriteExternalBlock(pendingBlock, ph, rowsMerged, true)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/filestream"
|
||||
|
@ -140,6 +141,10 @@ type indexBlock struct {
|
|||
bhs []blockHeader
|
||||
}
|
||||
|
||||
func (idxb *indexBlock) SizeBytes() int {
|
||||
return cap(idxb.bhs) * int(unsafe.Sizeof(blockHeader{}))
|
||||
}
|
||||
|
||||
func getIndexBlock() *indexBlock {
|
||||
v := indexBlockPool.Get()
|
||||
if v == nil {
|
||||
|
@ -204,7 +209,7 @@ func (ibc *indexBlockCache) MustClose(isBig bool) {
|
|||
|
||||
// cleaner periodically cleans least recently used items.
|
||||
func (ibc *indexBlockCache) cleaner() {
|
||||
ticker := time.NewTicker(5 * time.Second)
|
||||
ticker := time.NewTicker(30 * time.Second)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
|
@ -220,8 +225,8 @@ func (ibc *indexBlockCache) cleanByTimeout() {
|
|||
currentTime := fasttime.UnixTimestamp()
|
||||
ibc.mu.Lock()
|
||||
for k, ibe := range ibc.m {
|
||||
// Delete items accessed more than a minute ago.
|
||||
if currentTime-atomic.LoadUint64(&ibe.lastAccessTime) > 60 {
|
||||
// Delete items accessed more than two minutes ago.
|
||||
if currentTime-atomic.LoadUint64(&ibe.lastAccessTime) > 2*60 {
|
||||
delete(ibc.m, k)
|
||||
}
|
||||
}
|
||||
|
@ -286,3 +291,13 @@ func (ibc *indexBlockCache) Len() uint64 {
|
|||
ibc.mu.Unlock()
|
||||
return n
|
||||
}
|
||||
|
||||
func (ibc *indexBlockCache) SizeBytes() uint64 {
|
||||
n := 0
|
||||
ibc.mu.Lock()
|
||||
for _, e := range ibc.m {
|
||||
n += e.ib.SizeBytes()
|
||||
}
|
||||
ibc.mu.Unlock()
|
||||
return uint64(n)
|
||||
}
|
||||
|
|
|
@ -307,13 +307,15 @@ func newPartition(name, smallPartsPath, bigPartsPath string, getDeletedMetricIDs
|
|||
type partitionMetrics struct {
|
||||
PendingRows uint64
|
||||
|
||||
BigIndexBlocksCacheSize uint64
|
||||
BigIndexBlocksCacheRequests uint64
|
||||
BigIndexBlocksCacheMisses uint64
|
||||
BigIndexBlocksCacheSize uint64
|
||||
BigIndexBlocksCacheSizeBytes uint64
|
||||
BigIndexBlocksCacheRequests uint64
|
||||
BigIndexBlocksCacheMisses uint64
|
||||
|
||||
SmallIndexBlocksCacheSize uint64
|
||||
SmallIndexBlocksCacheRequests uint64
|
||||
SmallIndexBlocksCacheMisses uint64
|
||||
SmallIndexBlocksCacheSize uint64
|
||||
SmallIndexBlocksCacheSizeBytes uint64
|
||||
SmallIndexBlocksCacheRequests uint64
|
||||
SmallIndexBlocksCacheMisses uint64
|
||||
|
||||
BigSizeBytes uint64
|
||||
SmallSizeBytes uint64
|
||||
|
@ -360,6 +362,7 @@ func (pt *partition) UpdateMetrics(m *partitionMetrics) {
|
|||
p := pw.p
|
||||
|
||||
m.BigIndexBlocksCacheSize += p.ibCache.Len()
|
||||
m.BigIndexBlocksCacheSizeBytes += p.ibCache.SizeBytes()
|
||||
m.BigIndexBlocksCacheRequests += p.ibCache.Requests()
|
||||
m.BigIndexBlocksCacheMisses += p.ibCache.Misses()
|
||||
m.BigRowsCount += p.ph.RowsCount
|
||||
|
@ -372,6 +375,7 @@ func (pt *partition) UpdateMetrics(m *partitionMetrics) {
|
|||
p := pw.p
|
||||
|
||||
m.SmallIndexBlocksCacheSize += p.ibCache.Len()
|
||||
m.SmallIndexBlocksCacheSizeBytes += p.ibCache.SizeBytes()
|
||||
m.SmallIndexBlocksCacheRequests += p.ibCache.Requests()
|
||||
m.SmallIndexBlocksCacheMisses += p.ibCache.Misses()
|
||||
m.SmallRowsCount += p.ph.RowsCount
|
||||
|
|
|
@ -115,7 +115,7 @@ func (rrm *rawRowsMarshaler) marshalToInmemoryPart(mp *inmemoryPart, rows []rawR
|
|||
|
||||
rrm.auxValues, scale = decimal.AppendFloatToDecimal(rrm.auxValues[:0], rrm.auxFloatValues)
|
||||
tmpBlock.Init(tsid, rrm.auxTimestamps, rrm.auxValues, scale, precisionBits)
|
||||
rrm.bsw.WriteExternalBlock(tmpBlock, ph, &rowsMerged)
|
||||
rrm.bsw.WriteExternalBlock(tmpBlock, ph, &rowsMerged, false)
|
||||
|
||||
tsid = &r.TSID
|
||||
precisionBits = r.PrecisionBits
|
||||
|
@ -125,7 +125,7 @@ func (rrm *rawRowsMarshaler) marshalToInmemoryPart(mp *inmemoryPart, rows []rawR
|
|||
|
||||
rrm.auxValues, scale = decimal.AppendFloatToDecimal(rrm.auxValues[:0], rrm.auxFloatValues)
|
||||
tmpBlock.Init(tsid, rrm.auxTimestamps, rrm.auxValues, scale, precisionBits)
|
||||
rrm.bsw.WriteExternalBlock(tmpBlock, ph, &rowsMerged)
|
||||
rrm.bsw.WriteExternalBlock(tmpBlock, ph, &rowsMerged, false)
|
||||
if rowsMerged != uint64(len(rows)) {
|
||||
logger.Panicf("BUG: unexpected rowsMerged; got %d; want %d", rowsMerged, len(rows))
|
||||
}
|
||||
|
|
|
@ -108,6 +108,9 @@ type Storage struct {
|
|||
// which may be in the process of flushing to disk by concurrently running
|
||||
// snapshot process.
|
||||
snapshotLock sync.Mutex
|
||||
|
||||
// The minimum timestamp when composite index search can be used.
|
||||
minTimestampForCompositeIndex int64
|
||||
}
|
||||
|
||||
// OpenStorage opens storage on the given path with the given retentionMsecs.
|
||||
|
@ -126,14 +129,9 @@ func OpenStorage(path string, retentionMsecs int64) (*Storage, error) {
|
|||
|
||||
stop: make(chan struct{}),
|
||||
}
|
||||
|
||||
if err := fs.MkdirAllIfNotExist(path); err != nil {
|
||||
return nil, fmt.Errorf("cannot create a directory for the storage at %q: %w", path, err)
|
||||
}
|
||||
snapshotsPath := path + "/snapshots"
|
||||
if err := fs.MkdirAllIfNotExist(snapshotsPath); err != nil {
|
||||
return nil, fmt.Errorf("cannot create %q: %w", snapshotsPath, err)
|
||||
}
|
||||
|
||||
// Protect from concurrent opens.
|
||||
flockF, err := fs.CreateFlockFile(path)
|
||||
|
@ -142,6 +140,12 @@ func OpenStorage(path string, retentionMsecs int64) (*Storage, error) {
|
|||
}
|
||||
s.flockF = flockF
|
||||
|
||||
// Pre-create snapshots directory if it is missing.
|
||||
snapshotsPath := path + "/snapshots"
|
||||
if err := fs.MkdirAllIfNotExist(snapshotsPath); err != nil {
|
||||
return nil, fmt.Errorf("cannot create %q: %w", snapshotsPath, err)
|
||||
}
|
||||
|
||||
// Load caches.
|
||||
mem := memory.Allowed()
|
||||
s.tsidCache = s.mustLoadCache("MetricName->TSID", "metricName_tsid", mem/3)
|
||||
|
@ -163,13 +167,21 @@ func OpenStorage(path string, retentionMsecs int64) (*Storage, error) {
|
|||
|
||||
s.prefetchedMetricIDs.Store(&uint64set.Set{})
|
||||
|
||||
// Load metadata
|
||||
metadataDir := path + "/metadata"
|
||||
isEmptyDB := !fs.IsPathExist(path + "/indexdb")
|
||||
if err := fs.MkdirAllIfNotExist(metadataDir); err != nil {
|
||||
return nil, fmt.Errorf("cannot create %q: %w", metadataDir, err)
|
||||
}
|
||||
s.minTimestampForCompositeIndex = mustGetMinTimestampForCompositeIndex(metadataDir, isEmptyDB)
|
||||
|
||||
// Load indexdb
|
||||
idbPath := path + "/indexdb"
|
||||
idbSnapshotsPath := idbPath + "/snapshots"
|
||||
if err := fs.MkdirAllIfNotExist(idbSnapshotsPath); err != nil {
|
||||
return nil, fmt.Errorf("cannot create %q: %w", idbSnapshotsPath, err)
|
||||
}
|
||||
idbCurr, idbPrev, err := openIndexDBTables(idbPath, s.metricIDCache, s.metricNameCache, s.tsidCache)
|
||||
idbCurr, idbPrev, err := openIndexDBTables(idbPath, s.metricIDCache, s.metricNameCache, s.tsidCache, s.minTimestampForCompositeIndex)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot open indexdb tables at %q: %w", idbPath, err)
|
||||
}
|
||||
|
@ -235,7 +247,7 @@ func (s *Storage) CreateSnapshot() (string, error) {
|
|||
}
|
||||
fs.MustSyncPath(dstDataDir)
|
||||
|
||||
idbSnapshot := fmt.Sprintf("%s/indexdb/snapshots/%s", s.path, snapshotName)
|
||||
idbSnapshot := fmt.Sprintf("%s/indexdb/snapshots/%s", srcDir, snapshotName)
|
||||
idb := s.idb()
|
||||
currSnapshot := idbSnapshot + "/" + idb.name
|
||||
if err := idb.tb.CreateSnapshotAt(currSnapshot); err != nil {
|
||||
|
@ -253,8 +265,13 @@ func (s *Storage) CreateSnapshot() (string, error) {
|
|||
return "", fmt.Errorf("cannot create symlink from %q to %q: %w", idbSnapshot, dstIdbDir, err)
|
||||
}
|
||||
|
||||
srcMetadataDir := srcDir + "/metadata"
|
||||
dstMetadataDir := dstDir + "/metadata"
|
||||
if err := fs.CopyDirectory(srcMetadataDir, dstMetadataDir); err != nil {
|
||||
return "", fmt.Errorf("cannot copy metadata: %s", err)
|
||||
}
|
||||
|
||||
fs.MustSyncPath(dstDir)
|
||||
fs.MustSyncPath(srcDir + "/snapshots")
|
||||
|
||||
logger.Infof("created Storage snapshot for %q at %q in %.3f seconds", srcDir, dstDir, time.Since(startTime).Seconds())
|
||||
return snapshotName, nil
|
||||
|
@ -537,7 +554,7 @@ func (s *Storage) mustRotateIndexDB() {
|
|||
// Create new indexdb table.
|
||||
newTableName := nextIndexDBTableName()
|
||||
idbNewPath := s.path + "/indexdb/" + newTableName
|
||||
idbNew, err := openIndexDB(idbNewPath, s.metricIDCache, s.metricNameCache, s.tsidCache)
|
||||
idbNew, err := openIndexDB(idbNewPath, s.metricIDCache, s.metricNameCache, s.tsidCache, s.minTimestampForCompositeIndex)
|
||||
if err != nil {
|
||||
logger.Panicf("FATAL: cannot create new indexDB at %q: %s", idbNewPath, err)
|
||||
}
|
||||
|
@ -758,6 +775,45 @@ func marshalUint64Set(dst []byte, m *uint64set.Set) []byte {
|
|||
return dst
|
||||
}
|
||||
|
||||
func mustGetMinTimestampForCompositeIndex(metadataDir string, isEmptyDB bool) int64 {
|
||||
path := metadataDir + "/minTimestampForCompositeIndex"
|
||||
minTimestamp, err := loadMinTimestampForCompositeIndex(path)
|
||||
if err == nil {
|
||||
return minTimestamp
|
||||
}
|
||||
if !os.IsNotExist(err) {
|
||||
logger.Errorf("cannot read minTimestampForCompositeIndex, so trying to re-create it; error: %s", err)
|
||||
}
|
||||
date := time.Now().UnixNano() / 1e6 / msecPerDay
|
||||
if !isEmptyDB {
|
||||
// The current and the next day can already contain non-composite indexes,
|
||||
// so they cannot be queried with composite indexes.
|
||||
date += 2
|
||||
} else {
|
||||
date = 0
|
||||
}
|
||||
minTimestamp = date * msecPerDay
|
||||
dateBuf := encoding.MarshalInt64(nil, minTimestamp)
|
||||
if err := os.RemoveAll(path); err != nil {
|
||||
logger.Fatalf("cannot remove a file with minTimestampForCompositeIndex: %s", err)
|
||||
}
|
||||
if err := fs.WriteFileAtomically(path, dateBuf); err != nil {
|
||||
logger.Fatalf("cannot store minTimestampForCompositeIndex: %s", err)
|
||||
}
|
||||
return minTimestamp
|
||||
}
|
||||
|
||||
func loadMinTimestampForCompositeIndex(path string) (int64, error) {
|
||||
data, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if len(data) != 8 {
|
||||
return 0, fmt.Errorf("unexpected length of %q; got %d bytes; want 8 bytes", path, len(data))
|
||||
}
|
||||
return encoding.UnmarshalInt64(data), nil
|
||||
}
|
||||
|
||||
func (s *Storage) mustLoadCache(info, name string, sizeBytes int) *workingsetcache.Cache {
|
||||
path := s.cachePath + "/" + name
|
||||
logger.Infof("loading %s cache from %q...", info, path)
|
||||
|
@ -1476,7 +1532,7 @@ func (s *Storage) add(rows []rawRow, mrs []MetricRow, precisionBits uint8) ([]ra
|
|||
atomic.AddUint64(&s.slowRowInserts, slowInsertsCount)
|
||||
}
|
||||
if firstWarn != nil {
|
||||
logger.Errorf("warn occurred during rows addition: %s", firstWarn)
|
||||
logger.Warnf("warn occurred during rows addition: %s", firstWarn)
|
||||
}
|
||||
rows = rows[:rowsLen+j]
|
||||
|
||||
|
@ -1573,6 +1629,8 @@ func (s *Storage) updatePerDateData(rows []rawRow) error {
|
|||
prevMetricID uint64
|
||||
)
|
||||
hm := s.currHourMetricIDs.Load().(*hourMetricIDs)
|
||||
hmPrev := s.prevHourMetricIDs.Load().(*hourMetricIDs)
|
||||
hmPrevDate := hmPrev.hour / 24
|
||||
nextDayMetricIDs := &s.nextDayMetricIDs.Load().(*byDateMetricIDEntry).v
|
||||
todayShare16bit := uint64((float64(fasttime.UnixTimestamp()%(3600*24)) / (3600 * 24)) * (1 << 16))
|
||||
type pendingDateMetricID struct {
|
||||
|
@ -1580,6 +1638,8 @@ func (s *Storage) updatePerDateData(rows []rawRow) error {
|
|||
metricID uint64
|
||||
}
|
||||
var pendingDateMetricIDs []pendingDateMetricID
|
||||
var pendingNextDayMetricIDs []uint64
|
||||
var pendingHourEntries []uint64
|
||||
for i := range rows {
|
||||
r := &rows[i]
|
||||
if r.Timestamp != prevTimestamp {
|
||||
|
@ -1588,6 +1648,12 @@ func (s *Storage) updatePerDateData(rows []rawRow) error {
|
|||
prevTimestamp = r.Timestamp
|
||||
}
|
||||
metricID := r.TSID.MetricID
|
||||
if metricID == prevMetricID && date == prevDate {
|
||||
// Fast path for bulk import of multiple rows with the same (date, metricID) pairs.
|
||||
continue
|
||||
}
|
||||
prevDate = date
|
||||
prevMetricID = metricID
|
||||
if hour == hm.hour {
|
||||
// The r belongs to the current hour. Check for the current hour cache.
|
||||
if hm.m.Has(metricID) {
|
||||
|
@ -1604,34 +1670,36 @@ func (s *Storage) updatePerDateData(rows []rawRow) error {
|
|||
date: date + 1,
|
||||
metricID: metricID,
|
||||
})
|
||||
s.pendingNextDayMetricIDsLock.Lock()
|
||||
s.pendingNextDayMetricIDs.Add(metricID)
|
||||
s.pendingNextDayMetricIDsLock.Unlock()
|
||||
pendingNextDayMetricIDs = append(pendingNextDayMetricIDs, metricID)
|
||||
}
|
||||
continue
|
||||
}
|
||||
s.pendingHourEntriesLock.Lock()
|
||||
s.pendingHourEntries.Add(metricID)
|
||||
s.pendingHourEntriesLock.Unlock()
|
||||
pendingHourEntries = append(pendingHourEntries, metricID)
|
||||
if date == hmPrevDate && hmPrev.m.Has(metricID) {
|
||||
// The metricID is already registered for the current day on the previous hour.
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Slower path: check global cache for (date, metricID) entry.
|
||||
if metricID == prevMetricID && date == prevDate {
|
||||
// Fast path for bulk import of multiple rows with the same (date, metricID) pairs.
|
||||
if s.dateMetricIDCache.Has(date, metricID) {
|
||||
continue
|
||||
}
|
||||
prevDate = date
|
||||
prevMetricID = metricID
|
||||
|
||||
if !s.dateMetricIDCache.Has(date, metricID) {
|
||||
// Slow path: store the (date, metricID) entry in the indexDB.
|
||||
// It is OK if the (date, metricID) entry is added multiple times to db
|
||||
// by concurrent goroutines.
|
||||
pendingDateMetricIDs = append(pendingDateMetricIDs, pendingDateMetricID{
|
||||
date: date,
|
||||
metricID: metricID,
|
||||
})
|
||||
}
|
||||
// Slow path: store the (date, metricID) entry in the indexDB.
|
||||
pendingDateMetricIDs = append(pendingDateMetricIDs, pendingDateMetricID{
|
||||
date: date,
|
||||
metricID: metricID,
|
||||
})
|
||||
}
|
||||
if len(pendingNextDayMetricIDs) > 0 {
|
||||
s.pendingNextDayMetricIDsLock.Lock()
|
||||
s.pendingNextDayMetricIDs.AddMulti(pendingNextDayMetricIDs)
|
||||
s.pendingNextDayMetricIDsLock.Unlock()
|
||||
}
|
||||
if len(pendingHourEntries) > 0 {
|
||||
s.pendingHourEntriesLock.Lock()
|
||||
s.pendingHourEntries.AddMulti(pendingHourEntries)
|
||||
s.pendingHourEntriesLock.Unlock()
|
||||
}
|
||||
if len(pendingDateMetricIDs) == 0 {
|
||||
// Fast path - there are no new (date, metricID) entires in rows.
|
||||
|
@ -1654,22 +1722,10 @@ func (s *Storage) updatePerDateData(rows []rawRow) error {
|
|||
is := idb.getIndexSearch(noDeadline)
|
||||
defer idb.putIndexSearch(is)
|
||||
var firstError error
|
||||
prevMetricID = 0
|
||||
prevDate = 0
|
||||
for _, dateMetricID := range pendingDateMetricIDs {
|
||||
date := dateMetricID.date
|
||||
metricID := dateMetricID.metricID
|
||||
if metricID == prevMetricID && date == prevDate {
|
||||
// Fast path for bulk import of multiple rows with the same (date, metricID) pairs.
|
||||
continue
|
||||
}
|
||||
prevDate = date
|
||||
prevMetricID = metricID
|
||||
|
||||
if s.dateMetricIDCache.Has(date, metricID) {
|
||||
// The metricID has been already added to per-day inverted index.
|
||||
continue
|
||||
}
|
||||
dateMetricIDsForCache := make([]dateMetricID, 0, len(pendingDateMetricIDs))
|
||||
for _, dmid := range pendingDateMetricIDs {
|
||||
date := dmid.date
|
||||
metricID := dmid.metricID
|
||||
ok, err := is.hasDateMetricID(date, metricID)
|
||||
if err != nil {
|
||||
if firstError == nil {
|
||||
|
@ -1679,6 +1735,8 @@ func (s *Storage) updatePerDateData(rows []rawRow) error {
|
|||
}
|
||||
if !ok {
|
||||
// The (date, metricID) entry is missing in the indexDB. Add it there.
|
||||
// It is OK if the (date, metricID) entry is added multiple times to db
|
||||
// by concurrent goroutines.
|
||||
if err := is.storeDateMetricID(date, metricID); err != nil {
|
||||
if firstError == nil {
|
||||
firstError = fmt.Errorf("error when storing (date=%d, metricID=%d) in database: %w", date, metricID, err)
|
||||
|
@ -1686,9 +1744,13 @@ func (s *Storage) updatePerDateData(rows []rawRow) error {
|
|||
continue
|
||||
}
|
||||
}
|
||||
// The metric must be added to cache only after it has been successfully added to indexDB.
|
||||
s.dateMetricIDCache.Set(date, metricID)
|
||||
dateMetricIDsForCache = append(dateMetricIDsForCache, dateMetricID{
|
||||
date: date,
|
||||
metricID: metricID,
|
||||
})
|
||||
}
|
||||
// The (date, metricID) entries must be added to cache only after they have been successfully added to indexDB.
|
||||
s.dateMetricIDCache.Store(dateMetricIDsForCache)
|
||||
return firstError
|
||||
}
|
||||
|
||||
|
@ -1771,6 +1833,34 @@ func (dmc *dateMetricIDCache) Has(date, metricID uint64) bool {
|
|||
return ok
|
||||
}
|
||||
|
||||
type dateMetricID struct {
|
||||
date uint64
|
||||
metricID uint64
|
||||
}
|
||||
|
||||
func (dmc *dateMetricIDCache) Store(dmids []dateMetricID) {
|
||||
var prevDate uint64
|
||||
metricIDs := make([]uint64, 0, len(dmids))
|
||||
dmc.mu.Lock()
|
||||
for _, dmid := range dmids {
|
||||
if prevDate == dmid.date {
|
||||
metricIDs = append(metricIDs, dmid.metricID)
|
||||
continue
|
||||
}
|
||||
if len(metricIDs) > 0 {
|
||||
v := dmc.byDateMutable.getOrCreate(prevDate)
|
||||
v.AddMulti(metricIDs)
|
||||
}
|
||||
metricIDs = append(metricIDs[:0], dmid.metricID)
|
||||
prevDate = dmid.date
|
||||
}
|
||||
if len(metricIDs) > 0 {
|
||||
v := dmc.byDateMutable.getOrCreate(prevDate)
|
||||
v.AddMulti(metricIDs)
|
||||
}
|
||||
dmc.mu.Unlock()
|
||||
}
|
||||
|
||||
func (dmc *dateMetricIDCache) Set(date, metricID uint64) {
|
||||
dmc.mu.Lock()
|
||||
v := dmc.byDateMutable.getOrCreate(date)
|
||||
|
@ -1914,7 +2004,7 @@ func (s *Storage) putTSIDToCache(tsid *TSID, metricName []byte) {
|
|||
s.tsidCache.Set(metricName, buf)
|
||||
}
|
||||
|
||||
func openIndexDBTables(path string, metricIDCache, metricNameCache, tsidCache *workingsetcache.Cache) (curr, prev *indexDB, err error) {
|
||||
func openIndexDBTables(path string, metricIDCache, metricNameCache, tsidCache *workingsetcache.Cache, minTimestampForCompositeIndex int64) (curr, prev *indexDB, err error) {
|
||||
if err := fs.MkdirAllIfNotExist(path); err != nil {
|
||||
return nil, nil, fmt.Errorf("cannot create directory %q: %w", path, err)
|
||||
}
|
||||
|
@ -1973,12 +2063,12 @@ func openIndexDBTables(path string, metricIDCache, metricNameCache, tsidCache *w
|
|||
// Open the last two tables.
|
||||
currPath := path + "/" + tableNames[len(tableNames)-1]
|
||||
|
||||
curr, err = openIndexDB(currPath, metricIDCache, metricNameCache, tsidCache)
|
||||
curr, err = openIndexDB(currPath, metricIDCache, metricNameCache, tsidCache, minTimestampForCompositeIndex)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("cannot open curr indexdb table at %q: %w", currPath, err)
|
||||
}
|
||||
prevPath := path + "/" + tableNames[len(tableNames)-2]
|
||||
prev, err = openIndexDB(prevPath, metricIDCache, metricNameCache, tsidCache)
|
||||
prev, err = openIndexDB(prevPath, metricIDCache, metricNameCache, tsidCache, minTimestampForCompositeIndex)
|
||||
if err != nil {
|
||||
curr.MustClose()
|
||||
return nil, nil, fmt.Errorf("cannot open prev indexdb table at %q: %w", prevPath, err)
|
||||
|
|
|
@ -14,6 +14,60 @@ import (
|
|||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/memory"
|
||||
)
|
||||
|
||||
// convertToCompositeTagFilterss converts tfss to composite filters.
|
||||
//
|
||||
// This converts `foo{bar="baz",x=~"a.+"}` to `{foo=bar="baz",foo=x=~"a.+"} filter.
|
||||
func convertToCompositeTagFilterss(tfss []*TagFilters) []*TagFilters {
|
||||
tfssNew := make([]*TagFilters, len(tfss))
|
||||
for i, tfs := range tfss {
|
||||
tfssNew[i] = convertToCompositeTagFilters(tfs)
|
||||
}
|
||||
return tfssNew
|
||||
}
|
||||
|
||||
func convertToCompositeTagFilters(tfs *TagFilters) *TagFilters {
|
||||
// Search for metric name filter, which must be used for creating composite filters.
|
||||
var name []byte
|
||||
for _, tf := range tfs.tfs {
|
||||
if len(tf.key) == 0 && !tf.isNegative && !tf.isRegexp {
|
||||
name = tf.value
|
||||
break
|
||||
}
|
||||
}
|
||||
if len(name) == 0 {
|
||||
// There is no metric name filter, so composite filters cannot be created.
|
||||
return tfs
|
||||
}
|
||||
tfsNew := make([]tagFilter, 0, len(tfs.tfs))
|
||||
var compositeKey []byte
|
||||
compositeFilters := 0
|
||||
for _, tf := range tfs.tfs {
|
||||
if len(tf.key) == 0 {
|
||||
if tf.isNegative || tf.isRegexp || string(tf.value) != string(name) {
|
||||
tfsNew = append(tfsNew, tf)
|
||||
}
|
||||
continue
|
||||
}
|
||||
if string(tf.key) == "__graphite__" || bytes.Equal(tf.key, graphiteReverseTagKey) {
|
||||
tfsNew = append(tfsNew, tf)
|
||||
continue
|
||||
}
|
||||
compositeKey = marshalCompositeTagKey(compositeKey[:0], name, tf.key)
|
||||
var tfNew tagFilter
|
||||
if err := tfNew.Init(tfs.commonPrefix, compositeKey, tf.value, tf.isNegative, tf.isRegexp); err != nil {
|
||||
logger.Panicf("BUG: unexpected error when creating composite tag filter for name=%q and key=%q: %s", name, tf.key, err)
|
||||
}
|
||||
tfsNew = append(tfsNew, tfNew)
|
||||
compositeFilters++
|
||||
}
|
||||
if compositeFilters == 0 {
|
||||
return tfs
|
||||
}
|
||||
tfsCompiled := NewTagFilters()
|
||||
tfsCompiled.tfs = tfsNew
|
||||
return tfsCompiled
|
||||
}
|
||||
|
||||
// TagFilters represents filters used for filtering tags.
|
||||
type TagFilters struct {
|
||||
tfs []tagFilter
|
||||
|
@ -313,6 +367,7 @@ func (tf *tagFilter) Init(commonPrefix, key, value []byte, isNegative, isRegexp
|
|||
if tf.isRegexp {
|
||||
prefix, expr = getRegexpPrefix(tf.value)
|
||||
if len(expr) == 0 {
|
||||
tf.value = append(tf.value[:0], prefix...)
|
||||
tf.isRegexp = false
|
||||
}
|
||||
}
|
||||
|
@ -341,6 +396,21 @@ func (tf *tagFilter) Init(commonPrefix, key, value []byte, isNegative, isRegexp
|
|||
return nil
|
||||
}
|
||||
|
||||
func (tf *tagFilter) match(b []byte) (bool, error) {
|
||||
prefix := tf.prefix
|
||||
if !bytes.HasPrefix(b, prefix) {
|
||||
return tf.isNegative, nil
|
||||
}
|
||||
ok, err := tf.matchSuffix(b[len(prefix):])
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if !ok {
|
||||
return tf.isNegative, nil
|
||||
}
|
||||
return !tf.isNegative, nil
|
||||
}
|
||||
|
||||
func (tf *tagFilter) matchSuffix(b []byte) (bool, error) {
|
||||
// Remove the trailing tagSeparatorChar.
|
||||
if len(b) == 0 || b[len(b)-1] != tagSeparatorChar {
|
||||
|
@ -477,7 +547,9 @@ func getOptimizedReMatchFunc(reMatch func(b []byte) bool, expr string) (func(b [
|
|||
return reMatch, "", reMatchCost
|
||||
}
|
||||
|
||||
// The following & default cost values are returned from BenchmarkOptimizedReMatchCost
|
||||
// These cost values are used for sorting tag filters in ascending order or the required CPU time for execution.
|
||||
//
|
||||
// These values are obtained from BenchmarkOptimizedReMatchCost benchmark.
|
||||
const (
|
||||
fullMatchCost = 1
|
||||
prefixMatchCost = 2
|
||||
|
|
|
@ -6,6 +6,369 @@ import (
|
|||
"testing"
|
||||
)
|
||||
|
||||
func TestConvertToCompositeTagFilters(t *testing.T) {
|
||||
f := func(tfs, resultExpected []TagFilter) {
|
||||
t.Helper()
|
||||
tfsCompiled := NewTagFilters()
|
||||
for _, tf := range tfs {
|
||||
if err := tfsCompiled.Add(tf.Key, tf.Value, tf.IsNegative, tf.IsRegexp); err != nil {
|
||||
t.Fatalf("cannot add tf=%s: %s", tf.String(), err)
|
||||
}
|
||||
}
|
||||
resultCompiled := convertToCompositeTagFilters(tfsCompiled)
|
||||
result := make([]TagFilter, len(resultCompiled.tfs))
|
||||
for i, tf := range resultCompiled.tfs {
|
||||
result[i] = TagFilter{
|
||||
Key: tf.key,
|
||||
Value: tf.value,
|
||||
IsNegative: tf.isNegative,
|
||||
IsRegexp: tf.isRegexp,
|
||||
}
|
||||
}
|
||||
if !reflect.DeepEqual(result, resultExpected) {
|
||||
t.Fatalf("unexpected result;\ngot\n%+v\nwant\n%+v", result, resultExpected)
|
||||
}
|
||||
}
|
||||
|
||||
// Empty filters
|
||||
f(nil, []TagFilter{})
|
||||
|
||||
// A single non-name filter
|
||||
f([]TagFilter{
|
||||
{
|
||||
Key: []byte("foo"),
|
||||
Value: []byte("bar"),
|
||||
IsNegative: false,
|
||||
IsRegexp: false,
|
||||
},
|
||||
}, []TagFilter{
|
||||
{
|
||||
Key: []byte("foo"),
|
||||
Value: []byte("bar"),
|
||||
IsNegative: false,
|
||||
IsRegexp: false,
|
||||
},
|
||||
})
|
||||
|
||||
// Multiple non-name filters
|
||||
f([]TagFilter{
|
||||
{
|
||||
Key: []byte("foo"),
|
||||
Value: []byte("bar"),
|
||||
IsNegative: false,
|
||||
IsRegexp: false,
|
||||
},
|
||||
{
|
||||
Key: []byte("x"),
|
||||
Value: []byte("yy"),
|
||||
IsNegative: true,
|
||||
IsRegexp: false,
|
||||
},
|
||||
}, []TagFilter{
|
||||
{
|
||||
Key: []byte("foo"),
|
||||
Value: []byte("bar"),
|
||||
IsNegative: false,
|
||||
IsRegexp: false,
|
||||
},
|
||||
{
|
||||
Key: []byte("x"),
|
||||
Value: []byte("yy"),
|
||||
IsNegative: true,
|
||||
IsRegexp: false,
|
||||
},
|
||||
})
|
||||
|
||||
// A single name filter
|
||||
f([]TagFilter{
|
||||
{
|
||||
Key: nil,
|
||||
Value: []byte("bar"),
|
||||
IsNegative: false,
|
||||
IsRegexp: false,
|
||||
},
|
||||
}, []TagFilter{
|
||||
{
|
||||
Key: nil,
|
||||
Value: []byte("bar"),
|
||||
IsNegative: false,
|
||||
IsRegexp: false,
|
||||
},
|
||||
})
|
||||
|
||||
// Two name filters
|
||||
f([]TagFilter{
|
||||
{
|
||||
Key: nil,
|
||||
Value: []byte("bar"),
|
||||
IsNegative: false,
|
||||
IsRegexp: false,
|
||||
},
|
||||
{
|
||||
Key: nil,
|
||||
Value: []byte("baz"),
|
||||
IsNegative: false,
|
||||
IsRegexp: false,
|
||||
},
|
||||
}, []TagFilter{
|
||||
{
|
||||
Key: nil,
|
||||
Value: []byte("bar"),
|
||||
IsNegative: false,
|
||||
IsRegexp: false,
|
||||
},
|
||||
{
|
||||
Key: nil,
|
||||
Value: []byte("baz"),
|
||||
IsNegative: false,
|
||||
IsRegexp: false,
|
||||
},
|
||||
})
|
||||
|
||||
// A name filter with non-name filter.
|
||||
f([]TagFilter{
|
||||
{
|
||||
Key: nil,
|
||||
Value: []byte("bar"),
|
||||
IsNegative: false,
|
||||
IsRegexp: false,
|
||||
},
|
||||
{
|
||||
Key: []byte("foo"),
|
||||
Value: []byte("abc"),
|
||||
IsNegative: false,
|
||||
IsRegexp: false,
|
||||
},
|
||||
}, []TagFilter{
|
||||
{
|
||||
Key: []byte("\xfe\x03barfoo"),
|
||||
Value: []byte("abc"),
|
||||
IsNegative: false,
|
||||
IsRegexp: false,
|
||||
},
|
||||
})
|
||||
|
||||
// Two name filters with non-name filter.
|
||||
f([]TagFilter{
|
||||
{
|
||||
Key: nil,
|
||||
Value: []byte("bar"),
|
||||
IsNegative: false,
|
||||
IsRegexp: false,
|
||||
},
|
||||
{
|
||||
Key: nil,
|
||||
Value: []byte("baz"),
|
||||
IsNegative: false,
|
||||
IsRegexp: false,
|
||||
},
|
||||
{
|
||||
Key: []byte("foo"),
|
||||
Value: []byte("abc"),
|
||||
IsNegative: false,
|
||||
IsRegexp: false,
|
||||
},
|
||||
}, []TagFilter{
|
||||
{
|
||||
Key: nil,
|
||||
Value: []byte("baz"),
|
||||
IsNegative: false,
|
||||
IsRegexp: false,
|
||||
},
|
||||
{
|
||||
Key: []byte("\xfe\x03barfoo"),
|
||||
Value: []byte("abc"),
|
||||
IsNegative: false,
|
||||
IsRegexp: false,
|
||||
},
|
||||
})
|
||||
|
||||
// A name filter with negative regexp non-name filter, which can be converted to non-regexp.
|
||||
f([]TagFilter{
|
||||
{
|
||||
Key: nil,
|
||||
Value: []byte("bar"),
|
||||
IsNegative: false,
|
||||
IsRegexp: false,
|
||||
},
|
||||
{
|
||||
Key: []byte("foo"),
|
||||
Value: []byte("abc"),
|
||||
IsNegative: true,
|
||||
IsRegexp: true,
|
||||
},
|
||||
}, []TagFilter{
|
||||
{
|
||||
Key: []byte("\xfe\x03barfoo"),
|
||||
Value: []byte("abc"),
|
||||
IsNegative: true,
|
||||
IsRegexp: false,
|
||||
},
|
||||
})
|
||||
|
||||
// A name filter with negative regexp non-name filter.
|
||||
f([]TagFilter{
|
||||
{
|
||||
Key: nil,
|
||||
Value: []byte("bar"),
|
||||
IsNegative: false,
|
||||
IsRegexp: false,
|
||||
},
|
||||
{
|
||||
Key: []byte("foo"),
|
||||
Value: []byte("abc.+"),
|
||||
IsNegative: true,
|
||||
IsRegexp: true,
|
||||
},
|
||||
}, []TagFilter{
|
||||
{
|
||||
Key: []byte("\xfe\x03barfoo"),
|
||||
Value: []byte("abc.+"),
|
||||
IsNegative: true,
|
||||
IsRegexp: true,
|
||||
},
|
||||
})
|
||||
|
||||
// A name filter with graphite filter.
|
||||
f([]TagFilter{
|
||||
{
|
||||
Key: nil,
|
||||
Value: []byte("bar"),
|
||||
IsNegative: false,
|
||||
IsRegexp: false,
|
||||
},
|
||||
{
|
||||
Key: []byte("__graphite__"),
|
||||
Value: []byte("foo.*.bar"),
|
||||
IsNegative: false,
|
||||
IsRegexp: false,
|
||||
},
|
||||
}, []TagFilter{
|
||||
{
|
||||
Key: nil,
|
||||
Value: []byte("bar"),
|
||||
IsNegative: false,
|
||||
IsRegexp: false,
|
||||
},
|
||||
{
|
||||
Key: []byte("__graphite__"),
|
||||
Value: []byte("foo.*.bar"),
|
||||
IsNegative: false,
|
||||
IsRegexp: false,
|
||||
},
|
||||
})
|
||||
|
||||
// A name filter with non-name filter and a graphite filter.
|
||||
f([]TagFilter{
|
||||
{
|
||||
Key: nil,
|
||||
Value: []byte("bar"),
|
||||
IsNegative: false,
|
||||
IsRegexp: false,
|
||||
},
|
||||
{
|
||||
Key: []byte("foo"),
|
||||
Value: []byte("abc"),
|
||||
IsNegative: true,
|
||||
IsRegexp: true,
|
||||
},
|
||||
{
|
||||
Key: []byte("__graphite__"),
|
||||
Value: []byte("foo.*.bar"),
|
||||
IsNegative: false,
|
||||
IsRegexp: false,
|
||||
},
|
||||
}, []TagFilter{
|
||||
{
|
||||
Key: []byte("\xfe\x03barfoo"),
|
||||
Value: []byte("abc"),
|
||||
IsNegative: true,
|
||||
IsRegexp: false,
|
||||
},
|
||||
{
|
||||
Key: []byte("__graphite__"),
|
||||
Value: []byte("foo.*.bar"),
|
||||
IsNegative: false,
|
||||
IsRegexp: false,
|
||||
},
|
||||
})
|
||||
|
||||
// Regexp name filter, which can be converted to non-regexp, with non-name filter.
|
||||
f([]TagFilter{
|
||||
{
|
||||
Key: nil,
|
||||
Value: []byte("bar"),
|
||||
IsNegative: false,
|
||||
IsRegexp: true,
|
||||
},
|
||||
{
|
||||
Key: []byte("foo"),
|
||||
Value: []byte("abc"),
|
||||
IsNegative: true,
|
||||
IsRegexp: false,
|
||||
},
|
||||
}, []TagFilter{
|
||||
{
|
||||
Key: []byte("\xfe\x03barfoo"),
|
||||
Value: []byte("abc"),
|
||||
IsNegative: true,
|
||||
IsRegexp: false,
|
||||
},
|
||||
})
|
||||
|
||||
// Regexp name filter with non-name filter.
|
||||
f([]TagFilter{
|
||||
{
|
||||
Key: nil,
|
||||
Value: []byte("bar.+"),
|
||||
IsNegative: false,
|
||||
IsRegexp: true,
|
||||
},
|
||||
{
|
||||
Key: []byte("foo"),
|
||||
Value: []byte("abc"),
|
||||
IsNegative: true,
|
||||
IsRegexp: false,
|
||||
},
|
||||
}, []TagFilter{
|
||||
{
|
||||
Key: nil,
|
||||
Value: []byte("bar.+"),
|
||||
IsNegative: false,
|
||||
IsRegexp: true,
|
||||
},
|
||||
{
|
||||
Key: []byte("foo"),
|
||||
Value: []byte("abc"),
|
||||
IsNegative: true,
|
||||
IsRegexp: false,
|
||||
},
|
||||
})
|
||||
|
||||
// Regexp non-name filter, which matches anything.
|
||||
f([]TagFilter{
|
||||
{
|
||||
Key: nil,
|
||||
Value: []byte("bar"),
|
||||
IsNegative: false,
|
||||
IsRegexp: false,
|
||||
},
|
||||
{
|
||||
Key: []byte("foo"),
|
||||
Value: []byte(".*"),
|
||||
IsNegative: false,
|
||||
IsRegexp: true,
|
||||
},
|
||||
}, []TagFilter{
|
||||
{
|
||||
Key: nil,
|
||||
Value: []byte("bar"),
|
||||
IsNegative: false,
|
||||
IsRegexp: false,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestGetCommonPrefix(t *testing.T) {
|
||||
f := func(a []string, expectedPrefix string) {
|
||||
t.Helper()
|
||||
|
@ -614,8 +977,9 @@ func TestTagFiltersString(t *testing.T) {
|
|||
mustAdd("tag_re", "re.value", false, true)
|
||||
mustAdd("tag_nre", "nre.value", true, true)
|
||||
mustAdd("tag_n", "n_value", true, false)
|
||||
mustAdd("tag_re_graphite", "foo\\.bar", false, true)
|
||||
s := tfs.String()
|
||||
sExpected := `{__name__="metric_name", tag_re=~"re.value", tag_nre!~"nre.value", tag_n!="n_value"}`
|
||||
sExpected := `{__name__="metric_name", tag_re=~"re.value", tag_nre!~"nre.value", tag_n!="n_value", tag_re_graphite="foo.bar"}`
|
||||
if s != sExpected {
|
||||
t.Fatalf("unexpected TagFilters.String(); got %q; want %q", s, sExpected)
|
||||
}
|
||||
|
|
52
vendor/cloud.google.com/go/CHANGES.md
generated
vendored
52
vendor/cloud.google.com/go/CHANGES.md
generated
vendored
|
@ -1,6 +1,58 @@
|
|||
# Changes
|
||||
|
||||
|
||||
## [0.76.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.75.0...v0.76.0) (2021-02-02)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **accessapproval:** Migrate the Bazel rules for the libraries published to google-cloud-ruby to use the gapic-generator-ruby instead of the monolith generator. ([ac22beb](https://www.github.com/googleapis/google-cloud-go/commit/ac22beb9b90771b24c8b35db7587ad3f5c0a970e))
|
||||
* **all:** auto-regenerate gapics ([#3526](https://www.github.com/googleapis/google-cloud-go/issues/3526)) ([ab2af0b](https://www.github.com/googleapis/google-cloud-go/commit/ab2af0b32630dd97f44800f4e273184f887375db))
|
||||
* **all:** auto-regenerate gapics ([#3539](https://www.github.com/googleapis/google-cloud-go/issues/3539)) ([84d4d8a](https://www.github.com/googleapis/google-cloud-go/commit/84d4d8ae2d3fbf34a4a312a0a2e4062d18caaa3d))
|
||||
* **all:** auto-regenerate gapics ([#3546](https://www.github.com/googleapis/google-cloud-go/issues/3546)) ([959fde5](https://www.github.com/googleapis/google-cloud-go/commit/959fde5ab12f7aee206dd46022e3cad1bc3470f7))
|
||||
* **all:** auto-regenerate gapics ([#3563](https://www.github.com/googleapis/google-cloud-go/issues/3563)) ([102112a](https://www.github.com/googleapis/google-cloud-go/commit/102112a4e9285a16645aabc89789f613d4f47c9e))
|
||||
* **all:** auto-regenerate gapics ([#3576](https://www.github.com/googleapis/google-cloud-go/issues/3576)) ([ac22beb](https://www.github.com/googleapis/google-cloud-go/commit/ac22beb9b90771b24c8b35db7587ad3f5c0a970e))
|
||||
* **all:** auto-regenerate gapics ([#3580](https://www.github.com/googleapis/google-cloud-go/issues/3580)) ([9974a80](https://www.github.com/googleapis/google-cloud-go/commit/9974a8017b5de8129a586f2404a23396caea0ee1))
|
||||
* **all:** auto-regenerate gapics ([#3587](https://www.github.com/googleapis/google-cloud-go/issues/3587)) ([3859a6f](https://www.github.com/googleapis/google-cloud-go/commit/3859a6ffc447e9c0b4ef231e2788fbbcfe48a94f))
|
||||
* **all:** auto-regenerate gapics ([#3598](https://www.github.com/googleapis/google-cloud-go/issues/3598)) ([7bdebad](https://www.github.com/googleapis/google-cloud-go/commit/7bdebadbe06774c94ab745dfef4ce58ce40a5582))
|
||||
* **appengine:** start generating apiv1 ([#3561](https://www.github.com/googleapis/google-cloud-go/issues/3561)) ([2b6a3b4](https://www.github.com/googleapis/google-cloud-go/commit/2b6a3b4609e389da418a83eb60a8ae3710d646d7))
|
||||
* **assuredworkloads:** updated google.cloud.assuredworkloads.v1beta1.AssuredWorkloadsService service. Clients can now create workloads with US_REGIONAL_ACCESS compliance regime ([7bdebad](https://www.github.com/googleapis/google-cloud-go/commit/7bdebadbe06774c94ab745dfef4ce58ce40a5582))
|
||||
* **binaryauthorization:** start generating apiv1beta1 ([#3562](https://www.github.com/googleapis/google-cloud-go/issues/3562)) ([56e18a6](https://www.github.com/googleapis/google-cloud-go/commit/56e18a64836ab9482528b212eb139f649f7a35c3))
|
||||
* **channel:** Add Pub/Sub endpoints for Cloud Channel API. ([9070c86](https://www.github.com/googleapis/google-cloud-go/commit/9070c86e2c69f9405d42fc0e6fe7afd4a256d8b8))
|
||||
* **cloudtasks:** introducing field: ListQueuesRequest.read_mask, GetQueueRequest.read_mask, Queue.task_ttl, Queue.tombstone_ttl, Queue.stats, Task.pull_message and introducing messages: QueueStats PullMessage docs: updates to max burst size description ([7bdebad](https://www.github.com/googleapis/google-cloud-go/commit/7bdebadbe06774c94ab745dfef4ce58ce40a5582))
|
||||
* **cloudtasks:** introducing fields: ListQueuesRequest.read_mask, GetQueueRequest.read_mask, Queue.task_ttl, Queue.tombstone_ttl, Queue.stats and introducing messages: QueueStats docs: updates to AppEngineHttpRequest description ([7bdebad](https://www.github.com/googleapis/google-cloud-go/commit/7bdebadbe06774c94ab745dfef4ce58ce40a5582))
|
||||
* **datalabeling:** start generating apiv1beta1 ([#3582](https://www.github.com/googleapis/google-cloud-go/issues/3582)) ([d8a7fee](https://www.github.com/googleapis/google-cloud-go/commit/d8a7feef51d3344fa7e258aba1d9fbdab56dadcf))
|
||||
* **dataqna:** start generating apiv1alpha ([#3586](https://www.github.com/googleapis/google-cloud-go/issues/3586)) ([24c5b8f](https://www.github.com/googleapis/google-cloud-go/commit/24c5b8f4f45f8cd8b3001b1ca5a8d80e9f3b39d5))
|
||||
* **dialogflow/cx:** Add new Experiment service docs: minor doc update on redact field in intent.proto and page.proto ([0959f27](https://www.github.com/googleapis/google-cloud-go/commit/0959f27e85efe94d39437ceef0ff62ddceb8e7a7))
|
||||
* **dialogflow/cx:** added support for test cases and agent validation ([7bdebad](https://www.github.com/googleapis/google-cloud-go/commit/7bdebadbe06774c94ab745dfef4ce58ce40a5582))
|
||||
* **dialogflow/cx:** added support for test cases and agent validation ([3859a6f](https://www.github.com/googleapis/google-cloud-go/commit/3859a6ffc447e9c0b4ef231e2788fbbcfe48a94f))
|
||||
* **dialogflow:** add C++ targets for DialogFlow ([959fde5](https://www.github.com/googleapis/google-cloud-go/commit/959fde5ab12f7aee206dd46022e3cad1bc3470f7))
|
||||
* **documentai:** start generating apiv1beta3 ([#3595](https://www.github.com/googleapis/google-cloud-go/issues/3595)) ([5ae21fa](https://www.github.com/googleapis/google-cloud-go/commit/5ae21fa1cfb8b8dacbcd0fc43eee430f7db63102))
|
||||
* **domains:** start generating apiv1beta1 ([#3632](https://www.github.com/googleapis/google-cloud-go/issues/3632)) ([b8ada6f](https://www.github.com/googleapis/google-cloud-go/commit/b8ada6f197e680d0bb26aa031e6431bc099a3149))
|
||||
* **godocfx:** include alt documentation link ([#3530](https://www.github.com/googleapis/google-cloud-go/issues/3530)) ([806cdd5](https://www.github.com/googleapis/google-cloud-go/commit/806cdd56fb6fdddd7a6c1354e55e0d1259bd6c8b))
|
||||
* **internal/gapicgen:** change commit formatting to match standard ([#3500](https://www.github.com/googleapis/google-cloud-go/issues/3500)) ([d1e3d46](https://www.github.com/googleapis/google-cloud-go/commit/d1e3d46c47c425581e2b149c07f8e27ffc373c7e))
|
||||
* **internal/godocfx:** xref function declarations ([#3615](https://www.github.com/googleapis/google-cloud-go/issues/3615)) ([2bdbb87](https://www.github.com/googleapis/google-cloud-go/commit/2bdbb87a682d799cf5e262a61a3ef1faf41151af))
|
||||
* **mediatranslation:** start generating apiv1beta1 ([#3636](https://www.github.com/googleapis/google-cloud-go/issues/3636)) ([4129469](https://www.github.com/googleapis/google-cloud-go/commit/412946966cf7f53c51deff1b1cc1a12d62ed0279))
|
||||
* **memcache:** start generating apiv1 ([#3579](https://www.github.com/googleapis/google-cloud-go/issues/3579)) ([eabf7cf](https://www.github.com/googleapis/google-cloud-go/commit/eabf7cfde7b3a3cc1b35c320ba52e07be9926359))
|
||||
* **networkconnectivity:** initial generation of apiv1alpha1 ([#3567](https://www.github.com/googleapis/google-cloud-go/issues/3567)) ([adf489a](https://www.github.com/googleapis/google-cloud-go/commit/adf489a536292e3196677621477eae0d52761e7f))
|
||||
* **orgpolicy:** start generating apiv2 ([#3652](https://www.github.com/googleapis/google-cloud-go/issues/3652)) ([c103847](https://www.github.com/googleapis/google-cloud-go/commit/c1038475779fda3589aa9659d4ad0b703036b531))
|
||||
* **osconfig/agentendpoint:** add ApplyConfigTask to AgentEndpoint API ([9070c86](https://www.github.com/googleapis/google-cloud-go/commit/9070c86e2c69f9405d42fc0e6fe7afd4a256d8b8))
|
||||
* **osconfig/agentendpoint:** add ApplyConfigTask to AgentEndpoint API ([9af529c](https://www.github.com/googleapis/google-cloud-go/commit/9af529c21e98b62c4617f7a7191c307659cf8bb8))
|
||||
* **recommender:** add bindings for folder/org type resources for protos in recommendations, insights and recommender_service to enable v1 api for folder/org ([7bdebad](https://www.github.com/googleapis/google-cloud-go/commit/7bdebadbe06774c94ab745dfef4ce58ce40a5582))
|
||||
* **recommender:** auto generated cl for enabling v1beta1 folder/org APIs and integration test ([7bdebad](https://www.github.com/googleapis/google-cloud-go/commit/7bdebadbe06774c94ab745dfef4ce58ce40a5582))
|
||||
* **resourcemanager:** start generating apiv2 ([#3575](https://www.github.com/googleapis/google-cloud-go/issues/3575)) ([93d0ebc](https://www.github.com/googleapis/google-cloud-go/commit/93d0ebceb4270351518a13958005bb68f0cace60))
|
||||
* **secretmanager:** added expire_time and ttl fields to Secret ([9974a80](https://www.github.com/googleapis/google-cloud-go/commit/9974a8017b5de8129a586f2404a23396caea0ee1))
|
||||
* **secretmanager:** added expire_time and ttl fields to Secret ([ac22beb](https://www.github.com/googleapis/google-cloud-go/commit/ac22beb9b90771b24c8b35db7587ad3f5c0a970e))
|
||||
* **servicecontrol:** start generating apiv1 ([#3644](https://www.github.com/googleapis/google-cloud-go/issues/3644)) ([f84938b](https://www.github.com/googleapis/google-cloud-go/commit/f84938bb4042a5629fd66bda42de028fd833648a))
|
||||
* **servicemanagement:** start generating apiv1 ([#3614](https://www.github.com/googleapis/google-cloud-go/issues/3614)) ([b96134f](https://www.github.com/googleapis/google-cloud-go/commit/b96134fe91c182237359000cd544af5fec60d7db))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **datacatalog:** Update PHP package name casing to match the PHP namespace in the proto files ([c7ecf0f](https://www.github.com/googleapis/google-cloud-go/commit/c7ecf0f3f454606b124e52d20af2545b2c68646f))
|
||||
* **internal/godocfx:** add TOC element for module root package ([#3599](https://www.github.com/googleapis/google-cloud-go/issues/3599)) ([1d6eb23](https://www.github.com/googleapis/google-cloud-go/commit/1d6eb238206fcf8815d88981527ef176851afd7a))
|
||||
* **profiler:** Force gax to retry in case of certificate errors ([#3178](https://www.github.com/googleapis/google-cloud-go/issues/3178)) ([35dcd72](https://www.github.com/googleapis/google-cloud-go/commit/35dcd725dcd03266ed7439de40c277376b38cd71))
|
||||
|
||||
## [0.75.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.74.0...v0.75.0) (2021-01-11)
|
||||
|
||||
|
||||
|
|
2
vendor/cloud.google.com/go/README.md
generated
vendored
2
vendor/cloud.google.com/go/README.md
generated
vendored
|
@ -25,8 +25,6 @@ To install the packages on your system, *do not clone the repo*. Instead:
|
|||
**NOTE:** Some of these packages are under development, and may occasionally
|
||||
make backwards-incompatible changes.
|
||||
|
||||
**NOTE:** Github repo is a mirror of [https://code.googlesource.com/gocloud](https://code.googlesource.com/gocloud).
|
||||
|
||||
## Supported APIs
|
||||
|
||||
| Google API | Status | Package |
|
||||
|
|
19
vendor/cloud.google.com/go/go.mod
generated
vendored
19
vendor/cloud.google.com/go/go.mod
generated
vendored
|
@ -8,17 +8,18 @@ require (
|
|||
github.com/golang/protobuf v1.4.3
|
||||
github.com/google/go-cmp v0.5.4
|
||||
github.com/google/martian/v3 v3.1.0
|
||||
github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2
|
||||
github.com/google/pprof v0.0.0-20210122040257-d980be63207e
|
||||
github.com/googleapis/gax-go/v2 v2.0.5
|
||||
github.com/jstemmer/go-junit-report v0.9.1
|
||||
go.opencensus.io v0.22.5
|
||||
golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5
|
||||
golang.org/x/mod v0.4.0 // indirect
|
||||
golang.org/x/net v0.0.0-20201224014010-6772e930b67b
|
||||
golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5
|
||||
golang.org/x/text v0.3.4
|
||||
golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e
|
||||
google.golang.org/api v0.36.0
|
||||
google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7
|
||||
google.golang.org/grpc v1.34.0
|
||||
golang.org/x/mod v0.4.1 // indirect
|
||||
golang.org/x/net v0.0.0-20210119194325-5f4716e94777
|
||||
golang.org/x/oauth2 v0.0.0-20210113205817-d3ed898aa8a3
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c // indirect
|
||||
golang.org/x/text v0.3.5
|
||||
golang.org/x/tools v0.1.0
|
||||
google.golang.org/api v0.38.0
|
||||
google.golang.org/genproto v0.0.0-20210202153253-cf70463f6119
|
||||
google.golang.org/grpc v1.35.0
|
||||
)
|
||||
|
|
160
vendor/cloud.google.com/go/go.sum
generated
vendored
160
vendor/cloud.google.com/go/go.sum
generated
vendored
|
@ -14,6 +14,7 @@ cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZ
|
|||
cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
|
||||
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
|
||||
cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
|
||||
cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
|
||||
cloud.google.com/go/bigquery v1.0.1 h1:hL+ycaJpVE9M7nLoiXb/Pn10ENE2u+oddxbD8uu0ZVU=
|
||||
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
||||
cloud.google.com/go/bigquery v1.3.0 h1:sAbMqjY1PEQKZBWfbu6Y6bsupJ9c4QdHnzg/VvYTLcE=
|
||||
|
@ -59,11 +60,13 @@ github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMn
|
|||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||
github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
|
@ -144,30 +147,24 @@ github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hf
|
|||
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99 h1:Ak8CrdlwwXwAZxzS66vgPt4U8yUZX7JwLvVR58FN5jM=
|
||||
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2 h1:LR89qFljJ48s990kEKGsk213yIJDPI4205OKOzbURK8=
|
||||
github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210122040257-d980be63207e h1:41CTEDOoUXp+FxbPYuEhth5dE/s+NT1cRuhSoqhBQ1E=
|
||||
github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4 h1:hU4mGcQI4DaAYW+IbTun+2qEZVFxK0ySjQLTbS0VQKc=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo=
|
||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=
|
||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6 h1:UDMh68UUwekSh5iP2OMhRRZJiiBccgV7axzUG8vi56c=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024 h1:rBMNdlhTLzJjJSDIjNEXX1Pz3Hmwmz91v+zycvx9PJc=
|
||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||
github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o=
|
||||
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
|
@ -179,14 +176,10 @@ github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
|
|||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
go.opencensus.io v0.21.0 h1:mU6zScU4U1YAFPHEHYk+3JC4SY7JxgkqS10ZOSyksNg=
|
||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||
go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4=
|
||||
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
||||
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8=
|
||||
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.4 h1:LYy1Hy3MJdrCdMwwzxA/dRok4ejH+RwNGbuoD9fCjto=
|
||||
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.5 h1:dntmOdLpSpHlVqbW5Eay97DelsZHe+55D+xC6i0dDS0=
|
||||
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
|
||||
|
@ -195,36 +188,27 @@ golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8U
|
|||
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4 h1:c2HOrn5iMezYjSlGPncknSEr/8x5LELb/ilJbXi9DEA=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522 h1:OeRHuibLsmZkFj773W4LcfAGsSxJgfPONhr8cmO+eLA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979 h1:Agxu5KLo8o7Bb634SVDnhIfpTvxmzUwhbYAzBvXt6h4=
|
||||
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
|
||||
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
|
||||
golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
|
||||
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6 h1:QE6XYQK6naiK1EPAe1g/ILLxN5RBoH5xkJk3CqlMI/Y=
|
||||
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f h1:hX65Cu3JDlGH3uEdK7I99Ii+9kjD6mvnnpfLdEAH0x4=
|
||||
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190409202823-959b441ac422 h1:QzoH/1pFpZguR8NrRHLcO6jKqfv2zpuSqZLgdm7ZmjI=
|
||||
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac h1:8R1esu+8QioDxo4E4mX6bFztO+dMTM49DNAaWfO5OeY=
|
||||
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
|
||||
golang.org/x/lint v0.0.0-20200130185559-910be7a94367 h1:0IiAsCRByjO2QjX7ZPkw5oU9x+n1YqRL802rjC0c3Aw=
|
||||
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k=
|
||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5 h1:2M3HP5CCK1Si9FQhwnzYhXdG6DXeebvUHFpre8QvbyI=
|
||||
golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
|
@ -232,27 +216,22 @@ golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU
|
|||
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
|
||||
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
|
||||
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee h1:WG0RUwxtNT4qqaXX3DPA8zHFNm/D9xaBpxzHt1WcA/E=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.0 h1:8pl+sMODzuvGJkmj2W4kZihvVb5mKm8pB/X44PIQHv8=
|
||||
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.1 h1:Kvvh58BN8Y9/lBi7hTekvtMpm07eUZ0ck5pRHpsMWrY=
|
||||
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c h1:uOCk1iQW6Vc18bnC13MfzScl+wdKBmM9Y9kU7Z83/lw=
|
||||
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
|
@ -263,62 +242,46 @@ golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLL
|
|||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5 h1:WQ8q63x+f/zpC8Ac1s9wLElVoHhm32p6tudrU72n1QA=
|
||||
golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200506145744-7e3656a0809f h1:QBjCr1Fz5kw158VqdE9JfI9cJnl/ymnJWAdMuinqL7Y=
|
||||
golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200625001655-4c5254603344 h1:vGXIOMxbNfDTk/aXCmfdLgkrSV+Z2tcbze+pEc3v5W4=
|
||||
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20200707034311-ab3426394381 h1:VXak5I6aEWmAXeQjA+QSZzlgNrpq9mjcfDemuexIKsU=
|
||||
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20200822124328-c89045814202 h1:VvcQYSHwXgi7W+TpUR6A9g6Up98WAHf3f/ulnJ62IyA=
|
||||
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974 h1:IX6qOQeG5uLjB/hjjwjedwfjND0hgjPMMyO1RoIXQNI=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20201224014010-6772e930b67b h1:iFwSg7t5GZmB/Q5TjiEAsdoLDrdJRC1RiF2WhuV29Qw=
|
||||
golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210119194325-5f4716e94777 h1:003p0dJM77cxMSyCPFphvZf/Y5/NXf5fzg6ufd1/Oew=
|
||||
golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421 h1:Wo7BWFiOk0QRFMLYMqJGFMd9CgUAcGx7V+qEg/h5IBI=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw=
|
||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43 h1:ld7aEMNHoBnnDAX15v1T6z31v8HwR2A9FYOuAhWqkwc=
|
||||
golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58 h1:Mj83v+wSRNEar42a/MQgxk9X42TdEmrOl9i+y8WbxLo=
|
||||
golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5 h1:Lm4OryKCca1vehdsWogr9N4t7NfZxLbJoc/H0w4K4S4=
|
||||
golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210113205817-d3ed898aa8a3 h1:BaN3BAqnopnKjvl+15DYP6LLrbBHfbfmlFYzmFj/Q9Q=
|
||||
golang.org/x/oauth2 v0.0.0-20210113205817-d3ed898aa8a3/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6 h1:bjcUS9ztw9kFmmIxJInhon/0Is3p+EHBKNgquIzo1OI=
|
||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a h1:WXEvlFVvvGxCJLG6REjsT03iWnKLEWinaScsxF2Vm2o=
|
||||
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208 h1:qwRHBd0NqMbJxfbotnDhm2ByMI1Shq4Y6oRJo21SGJA=
|
||||
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 h1:SQFwaSi55rU7vdNs9Yr0Z324VNlrF+0wMqRXT4St8ck=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a h1:DcqTD9SDLc+1P/r1EmRBwnVsrOwW+kk2vWf9n+1sGhs=
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b h1:ag/x1USPSsqHud38I9BAC88qdNLDHHtQ4mlgQIZPPNA=
|
||||
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0 h1:HyfiK1WMnHj5FXFXatD+Qs1A/xC2Run6RzeW1SyHxpc=
|
||||
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
|
@ -328,62 +291,48 @@ golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae h1:/WDfKMnPU+m5M4xB+6x4kaepxRw6jWvR5iDRdvjHgy8=
|
||||
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200501052902-10377860bb8e h1:hq86ru83GdWTlfQFZGO4nZJTU4Bs2wfHl8oFHRaXsfc=
|
||||
golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25 h1:OKbAoGs4fGM5cPLlVQLZGYkFC8OnOfgo6tt0Smf9XhM=
|
||||
golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200523222454-059865788121 h1:rITEj+UZHYC927n8GT97eC3zrpzXdb/voyeOuVKS46o=
|
||||
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642 h1:B6caxRw+hozq68X2MY7jEpZh/cr4/aHLv9xU8Kkadrw=
|
||||
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f h1:Fqb3ao1hUmOR3GkUOg/Y+BadLwykBIzs5q8Ez2SbHyc=
|
||||
golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f h1:+Nyd8tzPX9R7BWHguqsrbFdRx3WQ/1ib8I44HXV5yTA=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3 h1:kzM6+9dur93BcC2kVlYl34cHU+TYZLanmpSJHVMmL64=
|
||||
golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c h1:VwygUrnw9jn88c4u8GD3rZQbqrP/tgas88tPUbBxQrk=
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2 h1:z99zHgr7hKfrUcX/KsoJk5FJfjTceCKIp96+biqP4To=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.4 h1:0YWbFKbhXG/wIiuHDSKpS0Iy7FSA+u45VtBMfQcFTTc=
|
||||
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c h1:fqgJT0MGcGpPgpWU7VRdRjuArfcOvC4AoJmILihzhDg=
|
||||
golang.org/x/text v0.3.5 h1:i6eZZ+zk0SOf0xgBpEpPD18qWcJda6q1sxt3S0kzyUQ=
|
||||
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138 h1:H3uGjxCR/6Ds0Mjgyp7LMK81+LvmbvWWEnJhzk1Pi9E=
|
||||
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c h1:97SnQk1GYRXJgvwZ8fadnxDOWfKvkNQHH3CtZntPSrM=
|
||||
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0 h1:Dh6fw+p6FyRl5x/FvNswO1ji0lIGzm3KP8Y9VkS9PTE=
|
||||
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff h1:On1qIo75ByTwFJ4/W2bIqHcwJ9XAqtSWUs8GwRrIhtc=
|
||||
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
|
@ -404,80 +353,60 @@ golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapK
|
|||
golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
|
||||
golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
|
||||
golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
|
||||
golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d h1:lzLdP95xJmMpwQ6LUHwrc5V7js93hTiY7gkznu0BgmY=
|
||||
golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88 h1:4j84u0sokprDu3IdSYHJMmou+YSLflMz8p7yAx/QI4g=
|
||||
golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d h1:szSOL78iTCl0LF1AMjhSWJj8tIM0KixlUUnBtYXsmd8=
|
||||
golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
|
||||
golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e h1:Z2uDrs8MyXUWJbwGc4V+nGjV4Ygo+oubBbWSVQw21/I=
|
||||
golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.0 h1:po9/4sTYwZU9lPhi1tOrb4hCv3qrhiQ77LZfGa2OjwY=
|
||||
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
||||
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
|
||||
google.golang.org/api v0.8.0 h1:VGGbLNyPF7dvYHhcUGYBBGCRDDK0RRJAI6KCvo0CL+E=
|
||||
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||
google.golang.org/api v0.9.0 h1:jbyannxz0XFD3zdjgrSUsaJbgpH4eTrkdhRChkHPfO8=
|
||||
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||
google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||
google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||
google.golang.org/api v0.15.0 h1:yzlyyDW/J0w8yNFJIhiAJy4kq74S+1DOLdawELNxFMA=
|
||||
google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||
google.golang.org/api v0.17.0 h1:0q95w+VuFtv4PAx4PZVQdBMmYbaCHbnfKaEiDIcVyag=
|
||||
google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.22.0 h1:J1Pl9P2lnmYFSJvgs70DKELqHNh8CNWXPbud4njEE2s=
|
||||
google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.24.0 h1:cG03eaksBzhfSIk7JRGctfp3lanklcOM/mTGvow7BbQ=
|
||||
google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
||||
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
||||
google.golang.org/api v0.29.0 h1:BaiDisFir8O4IJxvAabCGGkQ6yCJegNQqSVoYUNAnbk=
|
||||
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
|
||||
google.golang.org/api v0.30.0 h1:yfrXXP61wVuLb0vBcG6qaOoIoqYEzOQS8jum51jkv2w=
|
||||
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
|
||||
google.golang.org/api v0.35.0 h1:TBCmTTxUrRDA1iTctnK/fIeitxIZ+TQuaf0j29fmCGo=
|
||||
google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
|
||||
google.golang.org/api v0.36.0 h1:l2Nfbl2GPXdWorv+dT2XfinX2jOOw4zv1VhLstx+6rE=
|
||||
google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=
|
||||
google.golang.org/api v0.38.0 h1:vDyWk6eup8eQAidaZ31sNWIn8tZEL8qpbtGkBD4ytQo=
|
||||
google.golang.org/api v0.38.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4c=
|
||||
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.6.1 h1:QzqyMA1tlu6CgqCDUtU9V+ZKhLFT2dkJuANu5QaxI3I=
|
||||
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
|
||||
google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM=
|
||||
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/appengine v1.6.6 h1:lMO5rYAqUxkmaj76jAkRUvt5JZgFymx/+Q5Mzfivuhc=
|
||||
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
|
||||
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19 h1:Lj2SnHtxkRGJDqnGaSjo+CCdIieEnwVazbOXILwQemk=
|
||||
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873 h1:nfPFGzJkUDX6uBmpN/pSw7MbOAWegH5QDQuoXFHedLg=
|
||||
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64 h1:iKtrH9Y8mcbADOP0YFaEMth7OfuHY9xHOwNj4znpM1A=
|
||||
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51 h1:Ex1mq5jaJof+kRnYi3SlYJ8KKa9Ao3NHyIT5XJ1gF6U=
|
||||
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
|
||||
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
|
@ -492,83 +421,60 @@ google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfG
|
|||
google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84 h1:pSLkPbrjnPyLDYUO2VM9mDLqo2V6CFBY84lFSZAfoi4=
|
||||
google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380 h1:xriR1EgvKfkKxIoU2uUvrMVl+H26359loFFUleSMXFo=
|
||||
google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
|
||||
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c h1:Lq4llNryJoaVFRmvrIwC/ZHH7tNt4tUYIu8+se2aayY=
|
||||
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7 h1:n7yjMkxUgbEahYENvAGVlxMUW8TF/KEavLez31znfDw=
|
||||
google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/grpc v1.19.0 h1:cfg4PD8YEdSFnm7qLV4++93WcmhH2nIUhMjhdCvl3j8=
|
||||
google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20210202153253-cf70463f6119 h1:m9+RjTMas6brUP8DBxSAa/WIPFy7FIhKpvk+9Ppce8E=
|
||||
google.golang.org/genproto v0.0.0-20210202153253-cf70463f6119/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1 h1:Hz2g2wirWK7H0qIIhGIqRGTuMwTE8HEKFnDZZ7lm9NU=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
google.golang.org/grpc v1.21.1 h1:j6XxA85m/6txkUCHvzlV5f+HBNl/1r5cZ2A/3IEFOO8=
|
||||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
||||
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.27.0 h1:rRYRFMVgRv6E0D70Skyfsr28tDXIuuPZyWGMPdMcnXg=
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.27.1 h1:zvIju4sqAGvwKspUQOhwnpcqSbzi7/H6QomNNjTL4sk=
|
||||
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
|
||||
google.golang.org/grpc v1.29.1 h1:EC2SB8S04d2r73uptxphDSUG+kTKVgjRPF+N3xpxRB4=
|
||||
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
|
||||
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||
google.golang.org/grpc v1.31.0 h1:T7P4R73V3SSDPhH7WW7ATbfViLtmamH0DKrP3f9AuDI=
|
||||
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||
google.golang.org/grpc v1.31.1 h1:SfXqXS5hkufcdZ/mHtYCh53P2b+92WQq/DZcKLgsFRs=
|
||||
google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||
google.golang.org/grpc v1.33.2 h1:EQyQC3sa8M+p6Ulc8yy9SWSS2GVwyRc83gAbG8lrl4o=
|
||||
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
|
||||
google.golang.org/grpc v1.34.0 h1:raiipEjMOIC/TO2AvyTxP25XFdLxNIBwzDh3FM3XztI=
|
||||
google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
|
||||
google.golang.org/grpc v1.35.0 h1:TwIQcH3es+MojMVojxxfQ3l3OF2KzlRxML2xZq0kRo8=
|
||||
google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||
google.golang.org/protobuf v1.21.0 h1:qdOKuR/EIArgaWNjetjgTzgVTAZ+S/WXVrq9HW9zimw=
|
||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||
google.golang.org/protobuf v1.22.0 h1:cJv5/xdbk1NnMPR1VP9+HU6gupuG9MLBoH1r6RHZ2MY=
|
||||
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM=
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc h1:TnonUr8u3himcMY0vSh23jFOXA+cnucl1gB6EQTReBI=
|
||||
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.24.0 h1:UhZDfRO8JRQru4/+LlLE0BRKGF8L+PICnvYZmx/fEGA=
|
||||
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
|
||||
google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c=
|
||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a h1:/8zB6iBfHCl1qAnEAWwGPNrUvapuy6CPla1VM0k8hQw=
|
||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a h1:LJwr7TCTghdatWv40WobzlKXc9c4s8oGa7QKJUtHhWA=
|
||||
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM=
|
||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||
honnef.co/go/tools v0.0.1-2020.1.3 h1:sXmLre5bzIR6ypkjXCDI3jHPssRhc8KD/Ome589sc3U=
|
||||
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
rsc.io/binaryregexp v0.2.0 h1:HfqmD5MEmC0zvwBuF187nq9mdnXjXsSivRiXN7SmRkE=
|
||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||
rsc.io/quote/v3 v3.1.0 h1:9JKUTTIUgS6kzR9mK1YuGKv6Nl+DijDNIc0ghT58FaY=
|
||||
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
||||
rsc.io/sampler v1.3.0 h1:7uVkIFmeBqHfdjD+gZwtXXI+RODJ2Wc4O7MPEh/QiW4=
|
||||
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
||||
|
|
116
vendor/cloud.google.com/go/internal/.repo-metadata-full.json
generated
vendored
116
vendor/cloud.google.com/go/internal/.repo-metadata-full.json
generated
vendored
|
@ -5,7 +5,7 @@
|
|||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://pkg.go.dev/cloud.google.com/go/accessapproval/apiv1",
|
||||
"release_level": "beta"
|
||||
"release_level": "ga"
|
||||
},
|
||||
"cloud.google.com/go/analytics/admin/apiv1alpha": {
|
||||
"distribution_name": "cloud.google.com/go/analytics/admin/apiv1alpha",
|
||||
|
@ -23,6 +23,14 @@
|
|||
"docs_url": "https://pkg.go.dev/cloud.google.com/go/analytics/data/apiv1alpha",
|
||||
"release_level": "alpha"
|
||||
},
|
||||
"cloud.google.com/go/appengine/apiv1": {
|
||||
"distribution_name": "cloud.google.com/go/appengine/apiv1",
|
||||
"description": "App Engine Audit Data",
|
||||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://pkg.go.dev/cloud.google.com/go/appengine/apiv1",
|
||||
"release_level": "beta"
|
||||
},
|
||||
"cloud.google.com/go/area120/tables/apiv1alpha1": {
|
||||
"distribution_name": "cloud.google.com/go/area120/tables/apiv1alpha1",
|
||||
"description": "",
|
||||
|
@ -37,7 +45,7 @@
|
|||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://pkg.go.dev/cloud.google.com/go/artifactregistry/apiv1beta2",
|
||||
"release_level": "beta"
|
||||
"release_level": "ga"
|
||||
},
|
||||
"cloud.google.com/go/asset/apiv1": {
|
||||
"distribution_name": "cloud.google.com/go/asset/apiv1",
|
||||
|
@ -189,7 +197,7 @@
|
|||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://pkg.go.dev/cloud.google.com/go/billing/budgets/apiv1",
|
||||
"release_level": "beta"
|
||||
"release_level": "ga"
|
||||
},
|
||||
"cloud.google.com/go/billing/budgets/apiv1beta1": {
|
||||
"distribution_name": "cloud.google.com/go/billing/budgets/apiv1beta1",
|
||||
|
@ -199,6 +207,14 @@
|
|||
"docs_url": "https://pkg.go.dev/cloud.google.com/go/billing/budgets/apiv1beta1",
|
||||
"release_level": "beta"
|
||||
},
|
||||
"cloud.google.com/go/binaryauthorization/apiv1beta1": {
|
||||
"distribution_name": "cloud.google.com/go/binaryauthorization/apiv1beta1",
|
||||
"description": "Binary Authorization API",
|
||||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://pkg.go.dev/cloud.google.com/go/binaryauthorization/apiv1beta1",
|
||||
"release_level": "beta"
|
||||
},
|
||||
"cloud.google.com/go/channel/apiv1": {
|
||||
"distribution_name": "cloud.google.com/go/channel/apiv1",
|
||||
"description": "Cloud Channel API",
|
||||
|
@ -271,6 +287,14 @@
|
|||
"docs_url": "https://pkg.go.dev/cloud.google.com/go/datacatalog/apiv1beta1",
|
||||
"release_level": "beta"
|
||||
},
|
||||
"cloud.google.com/go/datalabeling/apiv1beta1": {
|
||||
"distribution_name": "cloud.google.com/go/datalabeling/apiv1beta1",
|
||||
"description": "Data Labeling API",
|
||||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://pkg.go.dev/cloud.google.com/go/datalabeling/apiv1beta1",
|
||||
"release_level": "beta"
|
||||
},
|
||||
"cloud.google.com/go/dataproc/apiv1": {
|
||||
"distribution_name": "cloud.google.com/go/dataproc/apiv1",
|
||||
"description": "Cloud Dataproc API",
|
||||
|
@ -287,6 +311,14 @@
|
|||
"docs_url": "https://pkg.go.dev/cloud.google.com/go/dataproc/apiv1beta2",
|
||||
"release_level": "beta"
|
||||
},
|
||||
"cloud.google.com/go/dataqna/apiv1alpha": {
|
||||
"distribution_name": "cloud.google.com/go/dataqna/apiv1alpha",
|
||||
"description": "Data QnA API",
|
||||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://pkg.go.dev/cloud.google.com/go/dataqna/apiv1alpha",
|
||||
"release_level": "alpha"
|
||||
},
|
||||
"cloud.google.com/go/datastore": {
|
||||
"distribution_name": "cloud.google.com/go/datastore",
|
||||
"description": "Cloud Datastore",
|
||||
|
@ -335,6 +367,22 @@
|
|||
"docs_url": "https://pkg.go.dev/cloud.google.com/go/dlp/apiv2",
|
||||
"release_level": "ga"
|
||||
},
|
||||
"cloud.google.com/go/documentai/apiv1beta3": {
|
||||
"distribution_name": "cloud.google.com/go/documentai/apiv1beta3",
|
||||
"description": "Cloud Document AI API",
|
||||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://pkg.go.dev/cloud.google.com/go/documentai/apiv1beta3",
|
||||
"release_level": "beta"
|
||||
},
|
||||
"cloud.google.com/go/domains/apiv1beta1": {
|
||||
"distribution_name": "cloud.google.com/go/domains/apiv1beta1",
|
||||
"description": "Cloud Domains API",
|
||||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://pkg.go.dev/cloud.google.com/go/domains/apiv1beta1",
|
||||
"release_level": "beta"
|
||||
},
|
||||
"cloud.google.com/go/errorreporting": {
|
||||
"distribution_name": "cloud.google.com/go/errorreporting",
|
||||
"description": "Cloud Error Reporting API",
|
||||
|
@ -477,6 +525,22 @@
|
|||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://pkg.go.dev/cloud.google.com/go/managedidentities/apiv1",
|
||||
"release_level": "ga"
|
||||
},
|
||||
"cloud.google.com/go/mediatranslation/apiv1beta1": {
|
||||
"distribution_name": "cloud.google.com/go/mediatranslation/apiv1beta1",
|
||||
"description": "Media Translation API",
|
||||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://pkg.go.dev/cloud.google.com/go/mediatranslation/apiv1beta1",
|
||||
"release_level": "beta"
|
||||
},
|
||||
"cloud.google.com/go/memcache/apiv1": {
|
||||
"distribution_name": "cloud.google.com/go/memcache/apiv1",
|
||||
"description": "Cloud Memorystore for Memcached API",
|
||||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://pkg.go.dev/cloud.google.com/go/memcache/apiv1",
|
||||
"release_level": "beta"
|
||||
},
|
||||
"cloud.google.com/go/memcache/apiv1beta2": {
|
||||
|
@ -503,6 +567,14 @@
|
|||
"docs_url": "https://pkg.go.dev/cloud.google.com/go/monitoring/dashboard/apiv1",
|
||||
"release_level": "ga"
|
||||
},
|
||||
"cloud.google.com/go/networkconnectivity/apiv1alpha1": {
|
||||
"distribution_name": "cloud.google.com/go/networkconnectivity/apiv1alpha1",
|
||||
"description": "Network Connectivity API",
|
||||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://pkg.go.dev/cloud.google.com/go/networkconnectivity/apiv1alpha1",
|
||||
"release_level": "alpha"
|
||||
},
|
||||
"cloud.google.com/go/notebooks/apiv1beta1": {
|
||||
"distribution_name": "cloud.google.com/go/notebooks/apiv1beta1",
|
||||
"description": "Notebooks API",
|
||||
|
@ -511,6 +583,14 @@
|
|||
"docs_url": "https://pkg.go.dev/cloud.google.com/go/notebooks/apiv1beta1",
|
||||
"release_level": "beta"
|
||||
},
|
||||
"cloud.google.com/go/orgpolicy/apiv2": {
|
||||
"distribution_name": "cloud.google.com/go/orgpolicy/apiv2",
|
||||
"description": "Organization Policy API",
|
||||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://pkg.go.dev/cloud.google.com/go/orgpolicy/apiv2",
|
||||
"release_level": "beta"
|
||||
},
|
||||
"cloud.google.com/go/osconfig/agentendpoint/apiv1": {
|
||||
"distribution_name": "cloud.google.com/go/osconfig/agentendpoint/apiv1",
|
||||
"description": "OS Config API",
|
||||
|
@ -655,13 +735,21 @@
|
|||
"docs_url": "https://pkg.go.dev/cloud.google.com/go/redis/apiv1beta1",
|
||||
"release_level": "beta"
|
||||
},
|
||||
"cloud.google.com/go/resourcemanager/apiv2": {
|
||||
"distribution_name": "cloud.google.com/go/resourcemanager/apiv2",
|
||||
"description": "Cloud Resource Manager API",
|
||||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://pkg.go.dev/cloud.google.com/go/resourcemanager/apiv2",
|
||||
"release_level": "beta"
|
||||
},
|
||||
"cloud.google.com/go/retail/apiv2": {
|
||||
"distribution_name": "cloud.google.com/go/retail/apiv2",
|
||||
"description": "Retail API",
|
||||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://pkg.go.dev/cloud.google.com/go/retail/apiv2",
|
||||
"release_level": "beta"
|
||||
"release_level": "ga"
|
||||
},
|
||||
"cloud.google.com/go/rpcreplay": {
|
||||
"distribution_name": "cloud.google.com/go/rpcreplay",
|
||||
|
@ -743,13 +831,21 @@
|
|||
"docs_url": "https://pkg.go.dev/cloud.google.com/go/securitycenter/settings/apiv1beta1",
|
||||
"release_level": "beta"
|
||||
},
|
||||
"cloud.google.com/go/servicecontrol/apiv1": {
|
||||
"distribution_name": "cloud.google.com/go/servicecontrol/apiv1",
|
||||
"description": "",
|
||||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://pkg.go.dev/cloud.google.com/go/servicecontrol/apiv1",
|
||||
"release_level": "beta"
|
||||
},
|
||||
"cloud.google.com/go/servicedirectory/apiv1": {
|
||||
"distribution_name": "cloud.google.com/go/servicedirectory/apiv1",
|
||||
"description": "Service Directory API",
|
||||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://pkg.go.dev/cloud.google.com/go/servicedirectory/apiv1",
|
||||
"release_level": "beta"
|
||||
"release_level": "ga"
|
||||
},
|
||||
"cloud.google.com/go/servicedirectory/apiv1beta1": {
|
||||
"distribution_name": "cloud.google.com/go/servicedirectory/apiv1beta1",
|
||||
|
@ -759,6 +855,14 @@
|
|||
"docs_url": "https://pkg.go.dev/cloud.google.com/go/servicedirectory/apiv1beta1",
|
||||
"release_level": "beta"
|
||||
},
|
||||
"cloud.google.com/go/servicemanagement/apiv1": {
|
||||
"distribution_name": "cloud.google.com/go/servicemanagement/apiv1",
|
||||
"description": "",
|
||||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://pkg.go.dev/cloud.google.com/go/servicemanagement/apiv1",
|
||||
"release_level": "beta"
|
||||
},
|
||||
"cloud.google.com/go/spanner": {
|
||||
"distribution_name": "cloud.google.com/go/spanner",
|
||||
"description": "Cloud Spanner",
|
||||
|
@ -933,7 +1037,7 @@
|
|||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://pkg.go.dev/cloud.google.com/go/websecurityscanner/apiv1",
|
||||
"release_level": "beta"
|
||||
"release_level": "ga"
|
||||
},
|
||||
"cloud.google.com/go/workflows/apiv1beta": {
|
||||
"distribution_name": "cloud.google.com/go/workflows/apiv1beta",
|
||||
|
|
13
vendor/cloud.google.com/go/storage/CHANGES.md
generated
vendored
13
vendor/cloud.google.com/go/storage/CHANGES.md
generated
vendored
|
@ -1,5 +1,18 @@
|
|||
# Changes
|
||||
|
||||
## [1.13.0](https://www.github.com/googleapis/google-cloud-go/compare/storage/v1.12.0...v1.13.0) (2021-02-03)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **storage:** add missing StorageClass in BucketAttrsToUpdate ([#3038](https://www.github.com/googleapis/google-cloud-go/issues/3038)) ([2fa1b72](https://www.github.com/googleapis/google-cloud-go/commit/2fa1b727f8a7b20aa62fe0990530744f6c109be0))
|
||||
* **storage:** add projection parameter for BucketHandle.Objects() ([#3549](https://www.github.com/googleapis/google-cloud-go/issues/3549)) ([9b9c3dc](https://www.github.com/googleapis/google-cloud-go/commit/9b9c3dce3ee10af5b6c4d070821bf47a861efd5b))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **storage:** fix endpoint selection logic ([#3172](https://www.github.com/googleapis/google-cloud-go/issues/3172)) ([99edf0d](https://www.github.com/googleapis/google-cloud-go/commit/99edf0d211a9e617f2586fbc83b6f9630da3c537))
|
||||
|
||||
## v1.12.0
|
||||
- V4 signed URL fixes:
|
||||
- Fix encoding of spaces in query parameters.
|
||||
|
|
4
vendor/cloud.google.com/go/storage/README.md
generated
vendored
4
vendor/cloud.google.com/go/storage/README.md
generated
vendored
|
@ -1,8 +1,8 @@
|
|||
## Cloud Storage [![GoDoc](https://godoc.org/cloud.google.com/go/storage?status.svg)](https://godoc.org/cloud.google.com/go/storage)
|
||||
## Cloud Storage [![Go Reference](https://pkg.go.dev/badge/cloud.google.com/go/storage.svg)](https://pkg.go.dev/cloud.google.com/go/storage)
|
||||
|
||||
- [About Cloud Storage](https://cloud.google.com/storage/)
|
||||
- [API documentation](https://cloud.google.com/storage/docs)
|
||||
- [Go client documentation](https://godoc.org/cloud.google.com/go/storage)
|
||||
- [Go client documentation](https://pkg.go.dev/cloud.google.com/go/storage)
|
||||
- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/storage)
|
||||
|
||||
### Example Usage
|
||||
|
|
17
vendor/cloud.google.com/go/storage/bucket.go
generated
vendored
17
vendor/cloud.google.com/go/storage/bucket.go
generated
vendored
|
@ -134,7 +134,7 @@ func (b *BucketHandle) DefaultObjectACL() *ACLHandle {
|
|||
//
|
||||
// name must consist entirely of valid UTF-8-encoded runes. The full specification
|
||||
// for valid object names can be found at:
|
||||
// https://cloud.google.com/storage/docs/bucket-naming
|
||||
// https://cloud.google.com/storage/docs/naming-objects
|
||||
func (b *BucketHandle) Object(name string) *ObjectHandle {
|
||||
return &ObjectHandle{
|
||||
c: b.c,
|
||||
|
@ -661,6 +661,14 @@ type BucketAttrsToUpdate struct {
|
|||
// for more information.
|
||||
UniformBucketLevelAccess *UniformBucketLevelAccess
|
||||
|
||||
// StorageClass is the default storage class of the bucket. This defines
|
||||
// how objects in the bucket are stored and determines the SLA
|
||||
// and the cost of storage. Typical values are "STANDARD", "NEARLINE",
|
||||
// "COLDLINE" and "ARCHIVE". Defaults to "STANDARD".
|
||||
// See https://cloud.google.com/storage/docs/storage-classes for all
|
||||
// valid values.
|
||||
StorageClass string
|
||||
|
||||
// If set, updates the retention policy of the bucket. Using
|
||||
// RetentionPolicy.RetentionPeriod = 0 will delete the existing policy.
|
||||
//
|
||||
|
@ -801,6 +809,7 @@ func (ua *BucketAttrsToUpdate) toRawBucket() *raw.Bucket {
|
|||
rb.DefaultObjectAcl = nil
|
||||
rb.ForceSendFields = append(rb.ForceSendFields, "DefaultObjectAcl")
|
||||
}
|
||||
rb.StorageClass = ua.StorageClass
|
||||
if ua.setLabels != nil || ua.deleteLabels != nil {
|
||||
rb.Labels = map[string]string{}
|
||||
for k, v := range ua.setLabels {
|
||||
|
@ -1195,7 +1204,11 @@ func (it *ObjectIterator) Next() (*ObjectAttrs, error) {
|
|||
func (it *ObjectIterator) fetch(pageSize int, pageToken string) (string, error) {
|
||||
req := it.bucket.c.raw.Objects.List(it.bucket.name)
|
||||
setClientHeader(req.Header())
|
||||
req.Projection("full")
|
||||
projection := it.query.Projection
|
||||
if projection == ProjectionDefault {
|
||||
projection = ProjectionFull
|
||||
}
|
||||
req.Projection(projection.String())
|
||||
req.Delimiter(it.query.Delimiter)
|
||||
req.Prefix(it.query.Prefix)
|
||||
req.StartOffset(it.query.StartOffset)
|
||||
|
|
18
vendor/cloud.google.com/go/storage/go.mod
generated
vendored
18
vendor/cloud.google.com/go/storage/go.mod
generated
vendored
|
@ -3,13 +3,15 @@ module cloud.google.com/go/storage
|
|||
go 1.11
|
||||
|
||||
require (
|
||||
cloud.google.com/go v0.66.0
|
||||
github.com/golang/protobuf v1.4.2
|
||||
github.com/google/go-cmp v0.5.2
|
||||
cloud.google.com/go v0.75.0
|
||||
github.com/golang/protobuf v1.4.3
|
||||
github.com/google/go-cmp v0.5.4
|
||||
github.com/googleapis/gax-go/v2 v2.0.5
|
||||
golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43
|
||||
golang.org/x/tools v0.0.0-20200918232735-d647fc253266 // indirect
|
||||
google.golang.org/api v0.32.0
|
||||
google.golang.org/genproto v0.0.0-20200921151605-7abf4a1a14d5
|
||||
google.golang.org/grpc v1.32.0
|
||||
golang.org/x/mod v0.4.1 // indirect
|
||||
golang.org/x/oauth2 v0.0.0-20210113205817-d3ed898aa8a3
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c // indirect
|
||||
golang.org/x/tools v0.1.0 // indirect
|
||||
google.golang.org/api v0.38.0
|
||||
google.golang.org/genproto v0.0.0-20210203152818-3206188e46ba
|
||||
google.golang.org/grpc v1.35.0
|
||||
)
|
||||
|
|
137
vendor/cloud.google.com/go/storage/go.sum
generated
vendored
137
vendor/cloud.google.com/go/storage/go.sum
generated
vendored
|
@ -21,8 +21,11 @@ cloud.google.com/go v0.62.0 h1:RmDygqvj27Zf3fCQjQRtLyC7KwFcHkeJitcO0OoGOcA=
|
|||
cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
|
||||
cloud.google.com/go v0.65.0 h1:Dg9iHVQfrhq82rUNu9ZxUDrJLaxFUe/HlCVaLyRruq8=
|
||||
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
|
||||
cloud.google.com/go v0.66.0 h1:DZeAkuQGQqnm9Xv36SbMJEU8aFBz4wL04UpMWPWwjzg=
|
||||
cloud.google.com/go v0.66.0/go.mod h1:dgqGAjKCDxyhGTtC9dAREQGUJpkceNm1yt590Qno0Ko=
|
||||
cloud.google.com/go v0.72.0 h1:eWRCuwubtDrCJG0oSUMgnsbD4CmPFQF2ei4OFbXvwww=
|
||||
cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
|
||||
cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
|
||||
cloud.google.com/go v0.75.0 h1:XgtDnVJRCPEUG21gjFiRPz4zI1Mjg16R+NYQjfmU4XY=
|
||||
cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY=
|
||||
cloud.google.com/go/bigquery v1.0.1 h1:hL+ycaJpVE9M7nLoiXb/Pn10ENE2u+oddxbD8uu0ZVU=
|
||||
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
||||
cloud.google.com/go/bigquery v1.3.0 h1:sAbMqjY1PEQKZBWfbu6Y6bsupJ9c4QdHnzg/VvYTLcE=
|
||||
|
@ -61,10 +64,14 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P
|
|||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||
github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
|
@ -103,6 +110,8 @@ github.com/golang/protobuf v1.4.1 h1:ZFgWrT+bLgsYPirOnRfKLYJLvssAegOj/hgyMFdJZe0
|
|||
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||
github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0=
|
||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM=
|
||||
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
|
@ -118,10 +127,14 @@ github.com/google/go-cmp v0.5.1 h1:JFrFEBb2xKufg6XkJsJr+WbKb4FQlURi5RUcBveYu9k=
|
|||
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM=
|
||||
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M=
|
||||
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/google/martian/v3 v3.0.0 h1:pMen7vLs8nvgEYhywH3KDWJIJTeEr2ULsVWHWYHQyBs=
|
||||
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
|
||||
github.com/google/martian/v3 v3.1.0 h1:wCKgOCHuUEVfsaQLpPSJb7VdYCdTVZQAuOdYm1yc/60=
|
||||
github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
|
||||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
|
@ -129,8 +142,11 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf
|
|||
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200905233945-acf8798be1f7/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
|
@ -138,6 +154,7 @@ github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ
|
|||
github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=
|
||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024 h1:rBMNdlhTLzJjJSDIjNEXX1Pz3Hmwmz91v+zycvx9PJc=
|
||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||
github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o=
|
||||
|
@ -151,6 +168,7 @@ github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:
|
|||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
|
@ -164,6 +182,8 @@ go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8=
|
|||
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.4 h1:LYy1Hy3MJdrCdMwwzxA/dRok4ejH+RwNGbuoD9fCjto=
|
||||
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.5 h1:dntmOdLpSpHlVqbW5Eay97DelsZHe+55D+xC6i0dDS0=
|
||||
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
|
@ -200,6 +220,8 @@ golang.org/x/lint v0.0.0-20200130185559-910be7a94367 h1:0IiAsCRByjO2QjX7ZPkw5oU9
|
|||
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k=
|
||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5 h1:2M3HP5CCK1Si9FQhwnzYhXdG6DXeebvUHFpre8QvbyI=
|
||||
golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
|
||||
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
|
||||
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
|
||||
|
@ -211,6 +233,10 @@ golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ=
|
|||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.0 h1:8pl+sMODzuvGJkmj2W4kZihvVb5mKm8pB/X44PIQHv8=
|
||||
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.1 h1:Kvvh58BN8Y9/lBi7hTekvtMpm07eUZ0ck5pRHpsMWrY=
|
||||
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
|
@ -248,8 +274,13 @@ golang.org/x/net v0.0.0-20200707034311-ab3426394381 h1:VXak5I6aEWmAXeQjA+QSZzlgN
|
|||
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20200822124328-c89045814202 h1:VvcQYSHwXgi7W+TpUR6A9g6Up98WAHf3f/ulnJ62IyA=
|
||||
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20200904194848-62affa334b73 h1:MXfv8rhZWmFeqX3GNZRsd6vOLoaCHjYEX3qkRo3YBUA=
|
||||
golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974 h1:IX6qOQeG5uLjB/hjjwjedwfjND0hgjPMMyO1RoIXQNI=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20201031054903-ff519b6c9102 h1:42cLlJJdEh+ySyeUUbEQ5bsTiq8voBeTuweGVkY6Puw=
|
||||
golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20201224014010-6772e930b67b h1:iFwSg7t5GZmB/Q5TjiEAsdoLDrdJRC1RiF2WhuV29Qw=
|
||||
golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0=
|
||||
|
@ -259,6 +290,12 @@ golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BG
|
|||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43 h1:ld7aEMNHoBnnDAX15v1T6z31v8HwR2A9FYOuAhWqkwc=
|
||||
golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58 h1:Mj83v+wSRNEar42a/MQgxk9X42TdEmrOl9i+y8WbxLo=
|
||||
golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5 h1:Lm4OryKCca1vehdsWogr9N4t7NfZxLbJoc/H0w4K4S4=
|
||||
golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210113205817-d3ed898aa8a3 h1:BaN3BAqnopnKjvl+15DYP6LLrbBHfbfmlFYzmFj/Q9Q=
|
||||
golang.org/x/oauth2 v0.0.0-20210113205817-d3ed898aa8a3/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
|
@ -270,6 +307,8 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ
|
|||
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a h1:WXEvlFVvvGxCJLG6REjsT03iWnKLEWinaScsxF2Vm2o=
|
||||
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
|
@ -307,9 +346,19 @@ golang.org/x/sys v0.0.0-20200523222454-059865788121 h1:rITEj+UZHYC927n8GT97eC3zr
|
|||
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642 h1:B6caxRw+hozq68X2MY7jEpZh/cr4/aHLv9xU8Kkadrw=
|
||||
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200828194041-157a740278f4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f h1:Fqb3ao1hUmOR3GkUOg/Y+BadLwykBIzs5q8Ez2SbHyc=
|
||||
golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f h1:+Nyd8tzPX9R7BWHguqsrbFdRx3WQ/1ib8I44HXV5yTA=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3 h1:kzM6+9dur93BcC2kVlYl34cHU+TYZLanmpSJHVMmL64=
|
||||
golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4 h1:myAQVi0cGEoqQVR5POX+8RR2mrocKqNN1hmeMqhX27k=
|
||||
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c h1:VwygUrnw9jn88c4u8GD3rZQbqrP/tgas88tPUbBxQrk=
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
|
@ -317,6 +366,8 @@ golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
|
|||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.4 h1:0YWbFKbhXG/wIiuHDSKpS0Iy7FSA+u45VtBMfQcFTTc=
|
||||
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
|
@ -371,55 +422,50 @@ golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc
|
|||
golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d h1:szSOL78iTCl0LF1AMjhSWJj8tIM0KixlUUnBtYXsmd8=
|
||||
golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.0.0-20200828161849-5deb26317202/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
|
||||
golang.org/x/tools v0.0.0-20200915173823-2db8f0ff891c/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU=
|
||||
golang.org/x/tools v0.0.0-20200918232735-d647fc253266 h1:k7tVuG0g1JwmD3Jh8oAl1vQ1C3jb4Hi/dUl1wWDBJpQ=
|
||||
golang.org/x/tools v0.0.0-20200918232735-d647fc253266/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU=
|
||||
golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.0 h1:po9/4sTYwZU9lPhi1tOrb4hCv3qrhiQ77LZfGa2OjwY=
|
||||
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
||||
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
|
||||
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||
google.golang.org/api v0.9.0 h1:jbyannxz0XFD3zdjgrSUsaJbgpH4eTrkdhRChkHPfO8=
|
||||
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||
google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||
google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||
google.golang.org/api v0.15.0 h1:yzlyyDW/J0w8yNFJIhiAJy4kq74S+1DOLdawELNxFMA=
|
||||
google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||
google.golang.org/api v0.17.0 h1:0q95w+VuFtv4PAx4PZVQdBMmYbaCHbnfKaEiDIcVyag=
|
||||
google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.19.0 h1:GwFK8+l5/gdsOYKz5p6M4UK+QT8OvmHWZPJCnf+5DjA=
|
||||
google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.20.0 h1:jz2KixHX7EcCPiQrySzPdnYT7DbINAypCqKZ1Z7GM40=
|
||||
google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.22.0 h1:J1Pl9P2lnmYFSJvgs70DKELqHNh8CNWXPbud4njEE2s=
|
||||
google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.24.0 h1:cG03eaksBzhfSIk7JRGctfp3lanklcOM/mTGvow7BbQ=
|
||||
google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
||||
google.golang.org/api v0.28.0 h1:jMF5hhVfMkTZwHW1SDpKq5CkgWLXOb31Foaca9Zr3oM=
|
||||
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
||||
google.golang.org/api v0.29.0 h1:BaiDisFir8O4IJxvAabCGGkQ6yCJegNQqSVoYUNAnbk=
|
||||
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
|
||||
google.golang.org/api v0.30.0 h1:yfrXXP61wVuLb0vBcG6qaOoIoqYEzOQS8jum51jkv2w=
|
||||
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
|
||||
google.golang.org/api v0.31.0/go.mod h1:CL+9IBCa2WWU6gRuBWaKqGWLFFwbEUXkfeMkHLQWYWo=
|
||||
google.golang.org/api v0.32.0 h1:Le77IccnTqEa8ryp9wIpX5W3zYm7Gf9LhOp9PHcwFts=
|
||||
google.golang.org/api v0.32.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
|
||||
google.golang.org/api v0.35.0 h1:TBCmTTxUrRDA1iTctnK/fIeitxIZ+TQuaf0j29fmCGo=
|
||||
google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
|
||||
google.golang.org/api v0.36.0 h1:l2Nfbl2GPXdWorv+dT2XfinX2jOOw4zv1VhLstx+6rE=
|
||||
google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=
|
||||
google.golang.org/api v0.38.0 h1:vDyWk6eup8eQAidaZ31sNWIn8tZEL8qpbtGkBD4ytQo=
|
||||
google.golang.org/api v0.38.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.6.1 h1:QzqyMA1tlu6CgqCDUtU9V+ZKhLFT2dkJuANu5QaxI3I=
|
||||
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
|
||||
google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM=
|
||||
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/appengine v1.6.6 h1:lMO5rYAqUxkmaj76jAkRUvt5JZgFymx/+Q5Mzfivuhc=
|
||||
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
|
||||
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
|
@ -427,69 +473,54 @@ google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRn
|
|||
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51 h1:Ex1mq5jaJof+kRnYi3SlYJ8KKa9Ao3NHyIT5XJ1gF6U=
|
||||
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
|
||||
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba h1:pRj9OXZbwNtbtZtOB4dLwfK4u+EVRMvP+e9zKkg2grM=
|
||||
google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150 h1:VPpdpQkGvFicX9yo4G5oxZPi9ALBnEOZblPSa/Wa2m4=
|
||||
google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90 h1:7THRSvPuzF1bql5kyFzX0JM0vpGhwuhskgJrJsbZ80Y=
|
||||
google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
|
||||
google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce h1:1mbrb1tUU+Zmt5C94IGKADBTJZjZXAd+BubWi7r9EiI=
|
||||
google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383 h1:Vo0fD5w0fUKriWlZLyrim2GXbumyN0D6euW79T9PgEE=
|
||||
google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200305110556-506484158171 h1:xes2Q2k+d/+YNXVw0FpZkIDJiaux4OVrRKXRAzH6A0U=
|
||||
google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200312145019-da6875a35672 h1:jiDSspVssiikoRPFHT6pYrL+CL6/yIc3b9AuHO/4xik=
|
||||
google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940 h1:MRHtG0U6SnaUb+s+LhNE1qt1FQ1wlhqr5E4usBKC0uA=
|
||||
google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84 h1:pSLkPbrjnPyLDYUO2VM9mDLqo2V6CFBY84lFSZAfoi4=
|
||||
google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
|
||||
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c h1:Lq4llNryJoaVFRmvrIwC/ZHH7tNt4tUYIu8+se2aayY=
|
||||
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20200831141814-d751682dd103/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20200914193844-75d14daec038/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20200921151605-7abf4a1a14d5 h1:B9nroC8SSX5GtbVvxPF9tYIVkaCpjhVLOrlAY8ONzm8=
|
||||
google.golang.org/genproto v0.0.0-20200921151605-7abf4a1a14d5/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20210203152818-3206188e46ba h1:np3A9jnmE/eMtrOwwvUycmQ1XoLyj5nqZ41bAyYLqJ0=
|
||||
google.golang.org/genproto v0.0.0-20210203152818-3206188e46ba/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
google.golang.org/grpc v1.21.1 h1:j6XxA85m/6txkUCHvzlV5f+HBNl/1r5cZ2A/3IEFOO8=
|
||||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
||||
google.golang.org/grpc v1.26.0 h1:2dTRdpdFEEhJYQD8EMLB61nnrzSCTbG38PhqdhvOltg=
|
||||
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.27.0 h1:rRYRFMVgRv6E0D70Skyfsr28tDXIuuPZyWGMPdMcnXg=
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.27.1 h1:zvIju4sqAGvwKspUQOhwnpcqSbzi7/H6QomNNjTL4sk=
|
||||
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.28.0 h1:bO/TA4OxCOummhSf10siHuG7vJOiwh7SpRpFZDkOgl4=
|
||||
google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
|
||||
google.golang.org/grpc v1.29.1 h1:EC2SB8S04d2r73uptxphDSUG+kTKVgjRPF+N3xpxRB4=
|
||||
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
|
||||
google.golang.org/grpc v1.30.0 h1:M5a8xTlYTxwMn5ZFkwhRabsygDY5G8TYLyQDBxJNAxE=
|
||||
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||
google.golang.org/grpc v1.31.0 h1:T7P4R73V3SSDPhH7WW7ATbfViLtmamH0DKrP3f9AuDI=
|
||||
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||
google.golang.org/grpc v1.31.1 h1:SfXqXS5hkufcdZ/mHtYCh53P2b+92WQq/DZcKLgsFRs=
|
||||
google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||
google.golang.org/grpc v1.32.0 h1:zWTV+LMdc3kaiJMSTOFz2UgSBgx8RNQoTGiZu3fR9S0=
|
||||
google.golang.org/grpc v1.32.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
|
||||
google.golang.org/grpc v1.34.0 h1:raiipEjMOIC/TO2AvyTxP25XFdLxNIBwzDh3FM3XztI=
|
||||
google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
|
||||
google.golang.org/grpc v1.35.0 h1:TwIQcH3es+MojMVojxxfQ3l3OF2KzlRxML2xZq0kRo8=
|
||||
google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
|
|
80
vendor/cloud.google.com/go/storage/storage.go
generated
vendored
80
vendor/cloud.google.com/go/storage/storage.go
generated
vendored
|
@ -43,6 +43,7 @@ import (
|
|||
"cloud.google.com/go/internal/version"
|
||||
"google.golang.org/api/googleapi"
|
||||
"google.golang.org/api/option"
|
||||
"google.golang.org/api/option/internaloption"
|
||||
raw "google.golang.org/api/storage/v1"
|
||||
htransport "google.golang.org/api/transport/http"
|
||||
)
|
||||
|
@ -105,41 +106,48 @@ type Client struct {
|
|||
func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) {
|
||||
var host, readHost, scheme string
|
||||
|
||||
// In general, it is recommended to use raw.NewService instead of htransport.NewClient
|
||||
// since raw.NewService configures the correct default endpoints when initializing the
|
||||
// internal http client. However, in our case, "NewRangeReader" in reader.go needs to
|
||||
// access the http client directly to make requests, so we create the client manually
|
||||
// here so it can be re-used by both reader.go and raw.NewService. This means we need to
|
||||
// manually configure the default endpoint options on the http client. Furthermore, we
|
||||
// need to account for STORAGE_EMULATOR_HOST override when setting the default endpoints.
|
||||
if host = os.Getenv("STORAGE_EMULATOR_HOST"); host == "" {
|
||||
scheme = "https"
|
||||
readHost = "storage.googleapis.com"
|
||||
|
||||
// Prepend default options to avoid overriding options passed by the user.
|
||||
opts = append([]option.ClientOption{option.WithScopes(ScopeFullControl), option.WithUserAgent(userAgent)}, opts...)
|
||||
|
||||
opts = append(opts, internaloption.WithDefaultEndpoint("https://storage.googleapis.com/storage/v1/"))
|
||||
opts = append(opts, internaloption.WithDefaultMTLSEndpoint("https://storage.mtls.googleapis.com/storage/v1/"))
|
||||
} else {
|
||||
scheme = "http"
|
||||
readHost = host
|
||||
|
||||
opts = append([]option.ClientOption{option.WithoutAuthentication()}, opts...)
|
||||
|
||||
opts = append(opts, internaloption.WithDefaultEndpoint(host))
|
||||
opts = append(opts, internaloption.WithDefaultMTLSEndpoint(host))
|
||||
}
|
||||
|
||||
// htransport selects the correct endpoint among WithEndpoint (user override), WithDefaultEndpoint, and WithDefaultMTLSEndpoint.
|
||||
hc, ep, err := htransport.NewClient(ctx, opts...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("dialing: %v", err)
|
||||
}
|
||||
rawService, err := raw.NewService(ctx, option.WithHTTPClient(hc))
|
||||
// RawService should be created with the chosen endpoint to take account of user override.
|
||||
rawService, err := raw.NewService(ctx, option.WithEndpoint(ep), option.WithHTTPClient(hc))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("storage client: %v", err)
|
||||
}
|
||||
if ep == "" {
|
||||
// Override the default value for BasePath from the raw client.
|
||||
// TODO: remove when the raw client uses this endpoint as its default (~end of 2020)
|
||||
rawService.BasePath = "https://storage.googleapis.com/storage/v1/"
|
||||
} else {
|
||||
// If the endpoint has been set explicitly, use this for the BasePath
|
||||
// as well as readHost
|
||||
rawService.BasePath = ep
|
||||
u, err := url.Parse(ep)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("supplied endpoint %v is not valid: %v", ep, err)
|
||||
}
|
||||
readHost = u.Host
|
||||
// Update readHost with the chosen endpoint.
|
||||
u, err := url.Parse(ep)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("supplied endpoint %q is not valid: %v", ep, err)
|
||||
}
|
||||
readHost = u.Host
|
||||
|
||||
return &Client{
|
||||
hc: hc,
|
||||
|
@ -821,8 +829,8 @@ func (o *ObjectHandle) Attrs(ctx context.Context) (attrs *ObjectAttrs, err error
|
|||
return newObject(obj), nil
|
||||
}
|
||||
|
||||
// Update updates an object with the provided attributes.
|
||||
// All zero-value attributes are ignored.
|
||||
// Update updates an object with the provided attributes. See
|
||||
// ObjectAttrsToUpdate docs for details on treatment of zero values.
|
||||
// ErrObjectNotExist will be returned if the object is not found.
|
||||
func (o *ObjectHandle) Update(ctx context.Context, uattrs ObjectAttrsToUpdate) (oa *ObjectAttrs, err error) {
|
||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Object.Update")
|
||||
|
@ -933,7 +941,8 @@ func (o *ObjectHandle) ObjectName() string {
|
|||
|
||||
// ObjectAttrsToUpdate is used to update the attributes of an object.
|
||||
// Only fields set to non-nil values will be updated.
|
||||
// Set a field to its zero value to delete it.
|
||||
// For all fields except CustomTime, set the field to its zero value to delete
|
||||
// it. CustomTime cannot be deleted or changed to an earlier time once set.
|
||||
//
|
||||
// For example, to change ContentType and delete ContentEncoding and
|
||||
// Metadata, use
|
||||
|
@ -950,8 +959,8 @@ type ObjectAttrsToUpdate struct {
|
|||
ContentEncoding optional.String
|
||||
ContentDisposition optional.String
|
||||
CacheControl optional.String
|
||||
CustomTime time.Time
|
||||
Metadata map[string]string // set to map[string]string{} to delete
|
||||
CustomTime time.Time // Cannot be deleted or backdated from its current value.
|
||||
Metadata map[string]string // Set to map[string]string{} to delete.
|
||||
ACL []ACLRule
|
||||
|
||||
// If not empty, applies a predefined set of access controls. ACL must be nil.
|
||||
|
@ -1221,7 +1230,8 @@ type ObjectAttrs struct {
|
|||
// LifecycleConditions to manage object lifecycles.
|
||||
//
|
||||
// CustomTime cannot be removed once set on an object. It can be updated to a
|
||||
// later value but not to an earlier one.
|
||||
// later value but not to an earlier one. For more information see
|
||||
// https://cloud.google.com/storage/docs/metadata#custom-time .
|
||||
CustomTime time.Time
|
||||
}
|
||||
|
||||
|
@ -1298,6 +1308,31 @@ func encodeUint32(u uint32) string {
|
|||
return base64.StdEncoding.EncodeToString(b)
|
||||
}
|
||||
|
||||
// Projection is enumerated type for Query.Projection.
|
||||
type Projection int
|
||||
|
||||
const (
|
||||
// ProjectionDefault returns all fields of objects.
|
||||
ProjectionDefault Projection = iota
|
||||
|
||||
// ProjectionFull returns all fields of objects.
|
||||
ProjectionFull
|
||||
|
||||
// ProjectionNoACL returns all fields of objects except for Owner and ACL.
|
||||
ProjectionNoACL
|
||||
)
|
||||
|
||||
func (p Projection) String() string {
|
||||
switch p {
|
||||
case ProjectionFull:
|
||||
return "full"
|
||||
case ProjectionNoACL:
|
||||
return "noAcl"
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
// Query represents a query to filter objects from a bucket.
|
||||
type Query struct {
|
||||
// Delimiter returns results in a directory-like fashion.
|
||||
|
@ -1333,6 +1368,11 @@ type Query struct {
|
|||
// lexicographically before endOffset. If startOffset is also set, the objects
|
||||
// listed will have names between startOffset (inclusive) and endOffset (exclusive).
|
||||
EndOffset string
|
||||
|
||||
// Projection defines the set of properties to return. It will default to ProjectionFull,
|
||||
// which returns all properties. Passing ProjectionNoACL will omit Owner and ACL,
|
||||
// which may improve performance when listing many objects.
|
||||
Projection Projection
|
||||
}
|
||||
|
||||
// attrToFieldMap maps the field names of ObjectAttrs to the underlying field
|
||||
|
|
6
vendor/github.com/VictoriaMetrics/metrics/metrics.go
generated
vendored
6
vendor/github.com/VictoriaMetrics/metrics/metrics.go
generated
vendored
|
@ -58,11 +58,17 @@ func WritePrometheus(w io.Writer, exposeProcessMetrics bool) {
|
|||
// metrics.WriteProcessMetrics(w)
|
||||
// })
|
||||
//
|
||||
// See also WrteFDMetrics.
|
||||
func WriteProcessMetrics(w io.Writer) {
|
||||
writeGoMetrics(w)
|
||||
writeProcessMetrics(w)
|
||||
}
|
||||
|
||||
// WriteFDMetrics writes `process_max_fds` and `process_open_fds` metrics to w.
|
||||
func WriteFDMetrics(w io.Writer) {
|
||||
writeFDMetrics(w)
|
||||
}
|
||||
|
||||
// UnregisterMetric removes metric with the given name from default set.
|
||||
func UnregisterMetric(name string) bool {
|
||||
return defaultSet.UnregisterMetric(name)
|
||||
|
|
69
vendor/github.com/VictoriaMetrics/metrics/process_metrics_linux.go
generated
vendored
69
vendor/github.com/VictoriaMetrics/metrics/process_metrics_linux.go
generated
vendored
|
@ -6,6 +6,9 @@ import (
|
|||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
|
@ -81,3 +84,69 @@ func writeProcessMetrics(w io.Writer) {
|
|||
}
|
||||
|
||||
var startTimeSeconds = time.Now().Unix()
|
||||
|
||||
// WriteFDMetrics writes process_max_fds and process_open_fds metrics to w.
|
||||
func writeFDMetrics(w io.Writer) {
|
||||
totalOpenFDs, err := getOpenFDsCount("/proc/self/fd")
|
||||
if err != nil {
|
||||
log.Printf("ERROR: cannot determine open file descriptors count: %s", err)
|
||||
return
|
||||
}
|
||||
maxOpenFDs, err := getMaxFilesLimit("/proc/self/limits")
|
||||
if err != nil {
|
||||
log.Printf("ERROR: cannot determine the limit on open file descritors: %s", err)
|
||||
return
|
||||
}
|
||||
fmt.Fprintf(w, "process_max_fds %d\n", maxOpenFDs)
|
||||
fmt.Fprintf(w, "process_open_fds %d\n", totalOpenFDs)
|
||||
}
|
||||
|
||||
func getOpenFDsCount(path string) (uint64, error) {
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer f.Close()
|
||||
var totalOpenFDs uint64
|
||||
for {
|
||||
names, err := f.Readdirnames(512)
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("unexpected error at Readdirnames: %s", err)
|
||||
}
|
||||
totalOpenFDs += uint64(len(names))
|
||||
}
|
||||
return totalOpenFDs, nil
|
||||
}
|
||||
|
||||
func getMaxFilesLimit(path string) (uint64, error) {
|
||||
data, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
lines := strings.Split(string(data), "\n")
|
||||
const prefix = "Max open files"
|
||||
for _, s := range lines {
|
||||
if !strings.HasPrefix(s, prefix) {
|
||||
continue
|
||||
}
|
||||
text := strings.TrimSpace(s[len(prefix):])
|
||||
// Extract soft limit.
|
||||
n := strings.IndexByte(text, ' ')
|
||||
if n < 0 {
|
||||
return 0, fmt.Errorf("cannot extract soft limit from %q", s)
|
||||
}
|
||||
text = text[:n]
|
||||
if text == "unlimited" {
|
||||
return 1<<64 - 1, nil
|
||||
}
|
||||
limit, err := strconv.ParseUint(text, 10, 64)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("cannot parse soft limit from %q: %s", s, err)
|
||||
}
|
||||
return limit, nil
|
||||
}
|
||||
return 0, fmt.Errorf("cannot find max open files limit")
|
||||
}
|
||||
|
|
4
vendor/github.com/VictoriaMetrics/metrics/process_metrics_other.go
generated
vendored
4
vendor/github.com/VictoriaMetrics/metrics/process_metrics_other.go
generated
vendored
|
@ -9,3 +9,7 @@ import (
|
|||
func writeProcessMetrics(w io.Writer) {
|
||||
// TODO: implement it
|
||||
}
|
||||
|
||||
func writeFDMetrics(w io.Writer) {
|
||||
// TODO: implement it.
|
||||
}
|
||||
|
|
34
vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
generated
vendored
34
vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
generated
vendored
|
@ -1583,6 +1583,12 @@ var awsPartition = partition{
|
|||
Region: "us-east-2",
|
||||
},
|
||||
},
|
||||
"fips-us-west-1": endpoint{
|
||||
Hostname: "cognito-idp-fips.us-west-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-west-1",
|
||||
},
|
||||
},
|
||||
"fips-us-west-2": endpoint{
|
||||
Hostname: "cognito-idp-fips.us-west-2.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
|
@ -7449,6 +7455,16 @@ var awscnPartition = partition{
|
|||
"cn-north-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"guardduty": service{
|
||||
IsRegionalized: boxedTrue,
|
||||
Defaults: endpoint{
|
||||
Protocols: []string{"https"},
|
||||
},
|
||||
Endpoints: endpoints{
|
||||
"cn-north-1": endpoint{},
|
||||
"cn-northwest-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"health": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
|
@ -8323,6 +8339,12 @@ var awsusgovPartition = partition{
|
|||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"connect": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"datasync": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
|
@ -9173,6 +9195,12 @@ var awsusgovPartition = partition{
|
|||
|
||||
Endpoints: endpoints{
|
||||
"us-gov-west-1": endpoint{},
|
||||
"us-gov-west-1-fips": endpoint{
|
||||
Hostname: "runtime.sagemaker.us-gov-west-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-gov-west-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"s3": service{
|
||||
|
@ -9910,6 +9938,12 @@ var awsisoPartition = partition{
|
|||
"us-iso-east-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"outposts": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-iso-east-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"rds": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
|
|
10
vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go
generated
vendored
10
vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go
generated
vendored
|
@ -102,7 +102,7 @@ func resolveCredsFromProfile(cfg *aws.Config,
|
|||
)
|
||||
|
||||
case sharedCfg.hasSSOConfiguration():
|
||||
creds = resolveSSOCredentials(cfg, sharedCfg, handlers)
|
||||
creds, err = resolveSSOCredentials(cfg, sharedCfg, handlers)
|
||||
|
||||
case len(sharedCfg.CredentialProcess) != 0:
|
||||
// Get credentials from CredentialProcess
|
||||
|
@ -155,7 +155,11 @@ func resolveCredsFromProfile(cfg *aws.Config,
|
|||
return creds, nil
|
||||
}
|
||||
|
||||
func resolveSSOCredentials(cfg *aws.Config, sharedCfg sharedConfig, handlers request.Handlers) *credentials.Credentials {
|
||||
func resolveSSOCredentials(cfg *aws.Config, sharedCfg sharedConfig, handlers request.Handlers) (*credentials.Credentials, error) {
|
||||
if err := sharedCfg.validateSSOConfiguration(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cfgCopy := cfg.Copy()
|
||||
cfgCopy.Region = &sharedCfg.SSORegion
|
||||
|
||||
|
@ -167,7 +171,7 @@ func resolveSSOCredentials(cfg *aws.Config, sharedCfg sharedConfig, handlers req
|
|||
sharedCfg.SSOAccountID,
|
||||
sharedCfg.SSORoleName,
|
||||
sharedCfg.SSOStartURL,
|
||||
)
|
||||
), nil
|
||||
}
|
||||
|
||||
// valid credential source values
|
||||
|
|
12
vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go
generated
vendored
12
vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go
generated
vendored
|
@ -70,6 +70,8 @@ const (
|
|||
|
||||
// sharedConfig represents the configuration fields of the SDK config files.
|
||||
type sharedConfig struct {
|
||||
Profile string
|
||||
|
||||
// Credentials values from the config file. Both aws_access_key_id and
|
||||
// aws_secret_access_key must be provided together in the same file to be
|
||||
// considered valid. The values will be ignored if not a complete group.
|
||||
|
@ -201,6 +203,8 @@ func loadSharedConfigIniFiles(filenames []string) ([]sharedConfigFile, error) {
|
|||
}
|
||||
|
||||
func (cfg *sharedConfig) setFromIniFiles(profiles map[string]struct{}, profile string, files []sharedConfigFile, exOpts bool) error {
|
||||
cfg.Profile = profile
|
||||
|
||||
// Trim files from the list that don't exist.
|
||||
var skippedFiles int
|
||||
var profileNotFoundErr error
|
||||
|
@ -365,10 +369,6 @@ func (cfg *sharedConfig) validateCredentialsConfig(profile string) error {
|
|||
return err
|
||||
}
|
||||
|
||||
if err := cfg.validateSSOConfiguration(profile); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -409,7 +409,7 @@ func (cfg *sharedConfig) validateCredentialType() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (cfg *sharedConfig) validateSSOConfiguration(profile string) error {
|
||||
func (cfg *sharedConfig) validateSSOConfiguration() error {
|
||||
if !cfg.hasSSOConfiguration() {
|
||||
return nil
|
||||
}
|
||||
|
@ -433,7 +433,7 @@ func (cfg *sharedConfig) validateSSOConfiguration(profile string) error {
|
|||
|
||||
if len(missing) > 0 {
|
||||
return fmt.Errorf("profile %q is configured to use SSO but is missing required configuration: %s",
|
||||
profile, strings.Join(missing, ", "))
|
||||
cfg.Profile, strings.Join(missing, ", "))
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
2
vendor/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
|
@ -5,4 +5,4 @@ package aws
|
|||
const SDKName = "aws-sdk-go"
|
||||
|
||||
// SDKVersion is the version of this SDK
|
||||
const SDKVersion = "1.37.1"
|
||||
const SDKVersion = "1.37.7"
|
||||
|
|
13
vendor/github.com/aws/aws-sdk-go/service/s3/endpoint.go
generated
vendored
13
vendor/github.com/aws/aws-sdk-go/service/s3/endpoint.go
generated
vendored
|
@ -98,7 +98,7 @@ func endpointHandler(req *request.Request) {
|
|||
Request: req,
|
||||
}
|
||||
|
||||
if resReq.IsCrossPartition() {
|
||||
if len(resReq.Request.ClientInfo.PartitionID) != 0 && resReq.IsCrossPartition() {
|
||||
req.Error = s3shared.NewClientPartitionMismatchError(resource,
|
||||
req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil)
|
||||
return
|
||||
|
@ -110,11 +110,6 @@ func endpointHandler(req *request.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
if resReq.HasCustomEndpoint() {
|
||||
req.Error = s3shared.NewInvalidARNWithCustomEndpointError(resource, nil)
|
||||
return
|
||||
}
|
||||
|
||||
switch tv := resource.(type) {
|
||||
case arn.AccessPointARN:
|
||||
err = updateRequestAccessPointEndpoint(req, tv)
|
||||
|
@ -155,8 +150,7 @@ func updateRequestAccessPointEndpoint(req *request.Request, accessPoint arn.Acce
|
|||
req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil)
|
||||
}
|
||||
|
||||
// Ignore the disable host prefix for access points since custom endpoints
|
||||
// are not supported.
|
||||
// Ignore the disable host prefix for access points
|
||||
req.Config.DisableEndpointHostPrefix = aws.Bool(false)
|
||||
|
||||
if err := accessPointEndpointBuilder(accessPoint).build(req); err != nil {
|
||||
|
@ -181,8 +175,7 @@ func updateRequestOutpostAccessPointEndpoint(req *request.Request, accessPoint a
|
|||
req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil)
|
||||
}
|
||||
|
||||
// Ignore the disable host prefix for access points since custom endpoints
|
||||
// are not supported.
|
||||
// Ignore the disable host prefix for access points
|
||||
req.Config.DisableEndpointHostPrefix = aws.Bool(false)
|
||||
|
||||
if err := outpostAccessPointEndpointBuilder(accessPoint).build(req); err != nil {
|
||||
|
|
42
vendor/github.com/aws/aws-sdk-go/service/s3/endpoint_builder.go
generated
vendored
42
vendor/github.com/aws/aws-sdk-go/service/s3/endpoint_builder.go
generated
vendored
|
@ -22,6 +22,11 @@ const (
|
|||
outpostAccessPointPrefixTemplate = accessPointPrefixTemplate + "{" + outpostPrefixLabel + "}."
|
||||
)
|
||||
|
||||
// hasCustomEndpoint returns true if endpoint is a custom endpoint
|
||||
func hasCustomEndpoint(r *request.Request) bool {
|
||||
return len(aws.StringValue(r.Config.Endpoint)) > 0
|
||||
}
|
||||
|
||||
// accessPointEndpointBuilder represents the endpoint builder for access point arn
|
||||
type accessPointEndpointBuilder arn.AccessPointARN
|
||||
|
||||
|
@ -55,16 +60,19 @@ func (a accessPointEndpointBuilder) build(req *request.Request) error {
|
|||
req.ClientInfo.PartitionID, cfgRegion, err)
|
||||
}
|
||||
|
||||
if err = updateRequestEndpoint(req, endpoint.URL); err != nil {
|
||||
return err
|
||||
}
|
||||
endpoint.URL = endpoints.AddScheme(endpoint.URL, aws.BoolValue(req.Config.DisableSSL))
|
||||
|
||||
const serviceEndpointLabel = "s3-accesspoint"
|
||||
if !hasCustomEndpoint(req) {
|
||||
if err = updateRequestEndpoint(req, endpoint.URL); err != nil {
|
||||
return err
|
||||
}
|
||||
const serviceEndpointLabel = "s3-accesspoint"
|
||||
|
||||
// dual stack provided by endpoint resolver
|
||||
cfgHost := req.HTTPRequest.URL.Host
|
||||
if strings.HasPrefix(cfgHost, "s3") {
|
||||
req.HTTPRequest.URL.Host = serviceEndpointLabel + cfgHost[2:]
|
||||
// dual stack provided by endpoint resolver
|
||||
cfgHost := req.HTTPRequest.URL.Host
|
||||
if strings.HasPrefix(cfgHost, "s3") {
|
||||
req.HTTPRequest.URL.Host = serviceEndpointLabel + cfgHost[2:]
|
||||
}
|
||||
}
|
||||
|
||||
protocol.HostPrefixBuilder{
|
||||
|
@ -116,14 +124,17 @@ func (o outpostAccessPointEndpointBuilder) build(req *request.Request) error {
|
|||
req.ClientInfo.PartitionID, resolveRegion, err)
|
||||
}
|
||||
|
||||
if err = updateRequestEndpoint(req, endpoint.URL); err != nil {
|
||||
return err
|
||||
}
|
||||
endpoint.URL = endpoints.AddScheme(endpoint.URL, aws.BoolValue(req.Config.DisableSSL))
|
||||
|
||||
// add url host as s3-outposts
|
||||
cfgHost := req.HTTPRequest.URL.Host
|
||||
if strings.HasPrefix(cfgHost, endpointsID) {
|
||||
req.HTTPRequest.URL.Host = resolveService + cfgHost[len(endpointsID):]
|
||||
if !hasCustomEndpoint(req) {
|
||||
if err = updateRequestEndpoint(req, endpoint.URL); err != nil {
|
||||
return err
|
||||
}
|
||||
// add url host as s3-outposts
|
||||
cfgHost := req.HTTPRequest.URL.Host
|
||||
if strings.HasPrefix(cfgHost, endpointsID) {
|
||||
req.HTTPRequest.URL.Host = resolveService + cfgHost[len(endpointsID):]
|
||||
}
|
||||
}
|
||||
|
||||
protocol.HostPrefixBuilder{
|
||||
|
@ -159,7 +170,6 @@ func resolveRegionalEndpoint(r *request.Request, region string, endpointsID stri
|
|||
}
|
||||
|
||||
func updateRequestEndpoint(r *request.Request, endpoint string) (err error) {
|
||||
endpoint = endpoints.AddScheme(endpoint, aws.BoolValue(r.Config.DisableSSL))
|
||||
|
||||
r.HTTPRequest.URL, err = url.Parse(endpoint + r.Operation.HTTPPath)
|
||||
if err != nil {
|
||||
|
|
3
vendor/github.com/aws/aws-sdk-go/service/s3/service.go
generated
vendored
3
vendor/github.com/aws/aws-sdk-go/service/s3/service.go
generated
vendored
|
@ -48,6 +48,9 @@ const (
|
|||
// svc := s3.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
|
||||
func New(p client.ConfigProvider, cfgs ...*aws.Config) *S3 {
|
||||
c := p.ClientConfig(EndpointsID, cfgs...)
|
||||
if c.SigningNameDerived || len(c.SigningName) == 0 {
|
||||
c.SigningName = "s3"
|
||||
}
|
||||
return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName)
|
||||
}
|
||||
|
||||
|
|
4
vendor/github.com/prometheus/procfs/Makefile.common
generated
vendored
4
vendor/github.com/prometheus/procfs/Makefile.common
generated
vendored
|
@ -78,7 +78,7 @@ ifneq ($(shell which gotestsum),)
|
|||
endif
|
||||
endif
|
||||
|
||||
PROMU_VERSION ?= 0.6.0
|
||||
PROMU_VERSION ?= 0.7.0
|
||||
PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz
|
||||
|
||||
GOLANGCI_LINT :=
|
||||
|
@ -245,10 +245,12 @@ common-docker-publish: $(PUBLISH_DOCKER_ARCHS)
|
|||
$(PUBLISH_DOCKER_ARCHS): common-docker-publish-%:
|
||||
docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)"
|
||||
|
||||
DOCKER_MAJOR_VERSION_TAG = $(firstword $(subst ., ,$(shell cat VERSION)))
|
||||
.PHONY: common-docker-tag-latest $(TAG_DOCKER_ARCHS)
|
||||
common-docker-tag-latest: $(TAG_DOCKER_ARCHS)
|
||||
$(TAG_DOCKER_ARCHS): common-docker-tag-latest-%:
|
||||
docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest"
|
||||
docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)"
|
||||
|
||||
.PHONY: common-docker-manifest
|
||||
common-docker-manifest:
|
||||
|
|
6
vendor/github.com/prometheus/procfs/SECURITY.md
generated
vendored
Normal file
6
vendor/github.com/prometheus/procfs/SECURITY.md
generated
vendored
Normal file
|
@ -0,0 +1,6 @@
|
|||
# Reporting a security issue
|
||||
|
||||
The Prometheus security policy, including how to report vulnerabilities, can be
|
||||
found here:
|
||||
|
||||
https://prometheus.io/docs/operating/security/
|
17
vendor/github.com/prometheus/procfs/fixtures.ttar
generated
vendored
17
vendor/github.com/prometheus/procfs/fixtures.ttar
generated
vendored
|
@ -2204,10 +2204,25 @@ Lines: 1
|
|||
00015c73 00020e76 F0000769 00000000
|
||||
Mode: 644
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/proc/net/tcp
|
||||
Lines: 4
|
||||
sl local_address rem_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode
|
||||
0: 0500000A:0016 00000000:0000 0A 00000000:00000001 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0
|
||||
1: 00000000:0016 00000000:0000 0A 00000001:00000000 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0
|
||||
2: 00000000:0016 00000000:0000 0A 00000001:00000001 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0
|
||||
Mode: 644
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/proc/net/tcp6
|
||||
Lines: 3
|
||||
sl local_address remote_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode ref pointer drops
|
||||
1315: 00000000000000000000000000000000:14EB 00000000000000000000000000000000:0000 07 00000000:00000000 00:00000000 00000000 981 0 21040 2 0000000013726323 0
|
||||
6073: 000080FE00000000FFADE15609667CFE:C781 00000000000000000000000000000000:0000 07 00000000:00000000 00:00000000 00000000 1000 0 11337031 2 00000000b9256fdd 0
|
||||
Mode: 644
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/proc/net/udp
|
||||
Lines: 4
|
||||
sl local_address rem_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode
|
||||
0: 0A000005:0016 00000000:0000 0A 00000000:00000001 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0
|
||||
0: 0500000A:0016 00000000:0000 0A 00000000:00000001 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0
|
||||
1: 00000000:0016 00000000:0000 0A 00000001:00000000 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0
|
||||
2: 00000000:0016 00000000:0000 0A 00000001:00000001 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0
|
||||
Mode: 644
|
||||
|
|
228
vendor/github.com/prometheus/procfs/net_ip_socket.go
generated
vendored
Normal file
228
vendor/github.com/prometheus/procfs/net_ip_socket.go
generated
vendored
Normal file
|
@ -0,0 +1,228 @@
|
|||
// Copyright 2020 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package procfs
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
// readLimit is used by io.LimitReader while reading the content of the
|
||||
// /proc/net/udp{,6} files. The number of lines inside such a file is dynamic
|
||||
// as each line represents a single used socket.
|
||||
// In theory, the number of available sockets is 65535 (2^16 - 1) per IP.
|
||||
// With e.g. 150 Byte per line and the maximum number of 65535,
|
||||
// the reader needs to handle 150 Byte * 65535 =~ 10 MB for a single IP.
|
||||
readLimit = 4294967296 // Byte -> 4 GiB
|
||||
)
|
||||
|
||||
// this contains generic data structures for both udp and tcp sockets
|
||||
type (
|
||||
// NetIPSocket represents the contents of /proc/net/{t,u}dp{,6} file without the header.
|
||||
NetIPSocket []*netIPSocketLine
|
||||
|
||||
// NetIPSocketSummary provides already computed values like the total queue lengths or
|
||||
// the total number of used sockets. In contrast to NetIPSocket it does not collect
|
||||
// the parsed lines into a slice.
|
||||
NetIPSocketSummary struct {
|
||||
// TxQueueLength shows the total queue length of all parsed tx_queue lengths.
|
||||
TxQueueLength uint64
|
||||
// RxQueueLength shows the total queue length of all parsed rx_queue lengths.
|
||||
RxQueueLength uint64
|
||||
// UsedSockets shows the total number of parsed lines representing the
|
||||
// number of used sockets.
|
||||
UsedSockets uint64
|
||||
}
|
||||
|
||||
// netIPSocketLine represents the fields parsed from a single line
|
||||
// in /proc/net/{t,u}dp{,6}. Fields which are not used by IPSocket are skipped.
|
||||
// For the proc file format details, see https://linux.die.net/man/5/proc.
|
||||
netIPSocketLine struct {
|
||||
Sl uint64
|
||||
LocalAddr net.IP
|
||||
LocalPort uint64
|
||||
RemAddr net.IP
|
||||
RemPort uint64
|
||||
St uint64
|
||||
TxQueue uint64
|
||||
RxQueue uint64
|
||||
UID uint64
|
||||
}
|
||||
)
|
||||
|
||||
func newNetIPSocket(file string) (NetIPSocket, error) {
|
||||
f, err := os.Open(file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
var netIPSocket NetIPSocket
|
||||
|
||||
lr := io.LimitReader(f, readLimit)
|
||||
s := bufio.NewScanner(lr)
|
||||
s.Scan() // skip first line with headers
|
||||
for s.Scan() {
|
||||
fields := strings.Fields(s.Text())
|
||||
line, err := parseNetIPSocketLine(fields)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
netIPSocket = append(netIPSocket, line)
|
||||
}
|
||||
if err := s.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return netIPSocket, nil
|
||||
}
|
||||
|
||||
// newNetIPSocketSummary creates a new NetIPSocket{,6} from the contents of the given file.
|
||||
func newNetIPSocketSummary(file string) (*NetIPSocketSummary, error) {
|
||||
f, err := os.Open(file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
var netIPSocketSummary NetIPSocketSummary
|
||||
|
||||
lr := io.LimitReader(f, readLimit)
|
||||
s := bufio.NewScanner(lr)
|
||||
s.Scan() // skip first line with headers
|
||||
for s.Scan() {
|
||||
fields := strings.Fields(s.Text())
|
||||
line, err := parseNetIPSocketLine(fields)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
netIPSocketSummary.TxQueueLength += line.TxQueue
|
||||
netIPSocketSummary.RxQueueLength += line.RxQueue
|
||||
netIPSocketSummary.UsedSockets++
|
||||
}
|
||||
if err := s.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &netIPSocketSummary, nil
|
||||
}
|
||||
|
||||
// the /proc/net/{t,u}dp{,6} files are network byte order for ipv4 and for ipv6 the address is four words consisting of four bytes each. In each of those four words the four bytes are written in reverse order.
|
||||
|
||||
func parseIP(hexIP string) (net.IP, error) {
|
||||
var byteIP []byte
|
||||
byteIP, err := hex.DecodeString(hexIP)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"cannot parse address field in socket line: %s", hexIP)
|
||||
}
|
||||
switch len(byteIP) {
|
||||
case 4:
|
||||
return net.IP{byteIP[3], byteIP[2], byteIP[1], byteIP[0]}, nil
|
||||
case 16:
|
||||
i := net.IP{
|
||||
byteIP[3], byteIP[2], byteIP[1], byteIP[0],
|
||||
byteIP[7], byteIP[6], byteIP[5], byteIP[4],
|
||||
byteIP[11], byteIP[10], byteIP[9], byteIP[8],
|
||||
byteIP[15], byteIP[14], byteIP[13], byteIP[12],
|
||||
}
|
||||
return i, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("Unable to parse IP %s", hexIP)
|
||||
}
|
||||
}
|
||||
|
||||
// parseNetIPSocketLine parses a single line, represented by a list of fields.
|
||||
func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) {
|
||||
line := &netIPSocketLine{}
|
||||
if len(fields) < 8 {
|
||||
return nil, fmt.Errorf(
|
||||
"cannot parse net socket line as it has less then 8 columns: %s",
|
||||
strings.Join(fields, " "),
|
||||
)
|
||||
}
|
||||
var err error // parse error
|
||||
|
||||
// sl
|
||||
s := strings.Split(fields[0], ":")
|
||||
if len(s) != 2 {
|
||||
return nil, fmt.Errorf(
|
||||
"cannot parse sl field in socket line: %s", fields[0])
|
||||
}
|
||||
|
||||
if line.Sl, err = strconv.ParseUint(s[0], 0, 64); err != nil {
|
||||
return nil, fmt.Errorf("cannot parse sl value in socket line: %s", err)
|
||||
}
|
||||
// local_address
|
||||
l := strings.Split(fields[1], ":")
|
||||
if len(l) != 2 {
|
||||
return nil, fmt.Errorf(
|
||||
"cannot parse local_address field in socket line: %s", fields[1])
|
||||
}
|
||||
if line.LocalAddr, err = parseIP(l[0]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if line.LocalPort, err = strconv.ParseUint(l[1], 16, 64); err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"cannot parse local_address port value in socket line: %s", err)
|
||||
}
|
||||
|
||||
// remote_address
|
||||
r := strings.Split(fields[2], ":")
|
||||
if len(r) != 2 {
|
||||
return nil, fmt.Errorf(
|
||||
"cannot parse rem_address field in socket line: %s", fields[1])
|
||||
}
|
||||
if line.RemAddr, err = parseIP(r[0]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if line.RemPort, err = strconv.ParseUint(r[1], 16, 64); err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"cannot parse rem_address port value in socket line: %s", err)
|
||||
}
|
||||
|
||||
// st
|
||||
if line.St, err = strconv.ParseUint(fields[3], 16, 64); err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"cannot parse st value in socket line: %s", err)
|
||||
}
|
||||
|
||||
// tx_queue and rx_queue
|
||||
q := strings.Split(fields[4], ":")
|
||||
if len(q) != 2 {
|
||||
return nil, fmt.Errorf(
|
||||
"cannot parse tx/rx queues in socket line as it has a missing colon: %s",
|
||||
fields[4],
|
||||
)
|
||||
}
|
||||
if line.TxQueue, err = strconv.ParseUint(q[0], 16, 64); err != nil {
|
||||
return nil, fmt.Errorf("cannot parse tx_queue value in socket line: %s", err)
|
||||
}
|
||||
if line.RxQueue, err = strconv.ParseUint(q[1], 16, 64); err != nil {
|
||||
return nil, fmt.Errorf("cannot parse rx_queue value in socket line: %s", err)
|
||||
}
|
||||
|
||||
// uid
|
||||
if line.UID, err = strconv.ParseUint(fields[7], 0, 64); err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"cannot parse uid value in socket line: %s", err)
|
||||
}
|
||||
|
||||
return line, nil
|
||||
}
|
64
vendor/github.com/prometheus/procfs/net_tcp.go
generated
vendored
Normal file
64
vendor/github.com/prometheus/procfs/net_tcp.go
generated
vendored
Normal file
|
@ -0,0 +1,64 @@
|
|||
// Copyright 2020 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package procfs
|
||||
|
||||
type (
|
||||
// NetTCP represents the contents of /proc/net/tcp{,6} file without the header.
|
||||
NetTCP []*netIPSocketLine
|
||||
|
||||
// NetTCPSummary provides already computed values like the total queue lengths or
|
||||
// the total number of used sockets. In contrast to NetTCP it does not collect
|
||||
// the parsed lines into a slice.
|
||||
NetTCPSummary NetIPSocketSummary
|
||||
)
|
||||
|
||||
// NetTCP returns the IPv4 kernel/networking statistics for TCP datagrams
|
||||
// read from /proc/net/tcp.
|
||||
func (fs FS) NetTCP() (NetTCP, error) {
|
||||
return newNetTCP(fs.proc.Path("net/tcp"))
|
||||
}
|
||||
|
||||
// NetTCP6 returns the IPv6 kernel/networking statistics for TCP datagrams
|
||||
// read from /proc/net/tcp6.
|
||||
func (fs FS) NetTCP6() (NetTCP, error) {
|
||||
return newNetTCP(fs.proc.Path("net/tcp6"))
|
||||
}
|
||||
|
||||
// NetTCPSummary returns already computed statistics like the total queue lengths
|
||||
// for TCP datagrams read from /proc/net/tcp.
|
||||
func (fs FS) NetTCPSummary() (*NetTCPSummary, error) {
|
||||
return newNetTCPSummary(fs.proc.Path("net/tcp"))
|
||||
}
|
||||
|
||||
// NetTCP6Summary returns already computed statistics like the total queue lengths
|
||||
// for TCP datagrams read from /proc/net/tcp6.
|
||||
func (fs FS) NetTCP6Summary() (*NetTCPSummary, error) {
|
||||
return newNetTCPSummary(fs.proc.Path("net/tcp6"))
|
||||
}
|
||||
|
||||
// newNetTCP creates a new NetTCP{,6} from the contents of the given file.
|
||||
func newNetTCP(file string) (NetTCP, error) {
|
||||
n, err := newNetIPSocket(file)
|
||||
n1 := NetTCP(n)
|
||||
return n1, err
|
||||
}
|
||||
|
||||
func newNetTCPSummary(file string) (*NetTCPSummary, error) {
|
||||
n, err := newNetIPSocketSummary(file)
|
||||
if n == nil {
|
||||
return nil, err
|
||||
}
|
||||
n1 := NetTCPSummary(*n)
|
||||
return &n1, err
|
||||
}
|
183
vendor/github.com/prometheus/procfs/net_udp.go
generated
vendored
183
vendor/github.com/prometheus/procfs/net_udp.go
generated
vendored
|
@ -13,58 +13,14 @@
|
|||
|
||||
package procfs
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
// readLimit is used by io.LimitReader while reading the content of the
|
||||
// /proc/net/udp{,6} files. The number of lines inside such a file is dynamic
|
||||
// as each line represents a single used socket.
|
||||
// In theory, the number of available sockets is 65535 (2^16 - 1) per IP.
|
||||
// With e.g. 150 Byte per line and the maximum number of 65535,
|
||||
// the reader needs to handle 150 Byte * 65535 =~ 10 MB for a single IP.
|
||||
readLimit = 4294967296 // Byte -> 4 GiB
|
||||
)
|
||||
|
||||
type (
|
||||
// NetUDP represents the contents of /proc/net/udp{,6} file without the header.
|
||||
NetUDP []*netUDPLine
|
||||
NetUDP []*netIPSocketLine
|
||||
|
||||
// NetUDPSummary provides already computed values like the total queue lengths or
|
||||
// the total number of used sockets. In contrast to NetUDP it does not collect
|
||||
// the parsed lines into a slice.
|
||||
NetUDPSummary struct {
|
||||
// TxQueueLength shows the total queue length of all parsed tx_queue lengths.
|
||||
TxQueueLength uint64
|
||||
// RxQueueLength shows the total queue length of all parsed rx_queue lengths.
|
||||
RxQueueLength uint64
|
||||
// UsedSockets shows the total number of parsed lines representing the
|
||||
// number of used sockets.
|
||||
UsedSockets uint64
|
||||
}
|
||||
|
||||
// netUDPLine represents the fields parsed from a single line
|
||||
// in /proc/net/udp{,6}. Fields which are not used by UDP are skipped.
|
||||
// For the proc file format details, see https://linux.die.net/man/5/proc.
|
||||
netUDPLine struct {
|
||||
Sl uint64
|
||||
LocalAddr net.IP
|
||||
LocalPort uint64
|
||||
RemAddr net.IP
|
||||
RemPort uint64
|
||||
St uint64
|
||||
TxQueue uint64
|
||||
RxQueue uint64
|
||||
UID uint64
|
||||
}
|
||||
NetUDPSummary NetIPSocketSummary
|
||||
)
|
||||
|
||||
// NetUDP returns the IPv4 kernel/networking statistics for UDP datagrams
|
||||
|
@ -93,137 +49,16 @@ func (fs FS) NetUDP6Summary() (*NetUDPSummary, error) {
|
|||
|
||||
// newNetUDP creates a new NetUDP{,6} from the contents of the given file.
|
||||
func newNetUDP(file string) (NetUDP, error) {
|
||||
f, err := os.Open(file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
netUDP := NetUDP{}
|
||||
|
||||
lr := io.LimitReader(f, readLimit)
|
||||
s := bufio.NewScanner(lr)
|
||||
s.Scan() // skip first line with headers
|
||||
for s.Scan() {
|
||||
fields := strings.Fields(s.Text())
|
||||
line, err := parseNetUDPLine(fields)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
netUDP = append(netUDP, line)
|
||||
}
|
||||
if err := s.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return netUDP, nil
|
||||
n, err := newNetIPSocket(file)
|
||||
n1 := NetUDP(n)
|
||||
return n1, err
|
||||
}
|
||||
|
||||
// newNetUDPSummary creates a new NetUDP{,6} from the contents of the given file.
|
||||
func newNetUDPSummary(file string) (*NetUDPSummary, error) {
|
||||
f, err := os.Open(file)
|
||||
if err != nil {
|
||||
n, err := newNetIPSocketSummary(file)
|
||||
if n == nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
netUDPSummary := &NetUDPSummary{}
|
||||
|
||||
lr := io.LimitReader(f, readLimit)
|
||||
s := bufio.NewScanner(lr)
|
||||
s.Scan() // skip first line with headers
|
||||
for s.Scan() {
|
||||
fields := strings.Fields(s.Text())
|
||||
line, err := parseNetUDPLine(fields)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
netUDPSummary.TxQueueLength += line.TxQueue
|
||||
netUDPSummary.RxQueueLength += line.RxQueue
|
||||
netUDPSummary.UsedSockets++
|
||||
}
|
||||
if err := s.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return netUDPSummary, nil
|
||||
}
|
||||
|
||||
// parseNetUDPLine parses a single line, represented by a list of fields.
|
||||
func parseNetUDPLine(fields []string) (*netUDPLine, error) {
|
||||
line := &netUDPLine{}
|
||||
if len(fields) < 8 {
|
||||
return nil, fmt.Errorf(
|
||||
"cannot parse net udp socket line as it has less then 8 columns: %s",
|
||||
strings.Join(fields, " "),
|
||||
)
|
||||
}
|
||||
var err error // parse error
|
||||
|
||||
// sl
|
||||
s := strings.Split(fields[0], ":")
|
||||
if len(s) != 2 {
|
||||
return nil, fmt.Errorf(
|
||||
"cannot parse sl field in udp socket line: %s", fields[0])
|
||||
}
|
||||
|
||||
if line.Sl, err = strconv.ParseUint(s[0], 0, 64); err != nil {
|
||||
return nil, fmt.Errorf("cannot parse sl value in udp socket line: %s", err)
|
||||
}
|
||||
// local_address
|
||||
l := strings.Split(fields[1], ":")
|
||||
if len(l) != 2 {
|
||||
return nil, fmt.Errorf(
|
||||
"cannot parse local_address field in udp socket line: %s", fields[1])
|
||||
}
|
||||
if line.LocalAddr, err = hex.DecodeString(l[0]); err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"cannot parse local_address value in udp socket line: %s", err)
|
||||
}
|
||||
if line.LocalPort, err = strconv.ParseUint(l[1], 16, 64); err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"cannot parse local_address port value in udp socket line: %s", err)
|
||||
}
|
||||
|
||||
// remote_address
|
||||
r := strings.Split(fields[2], ":")
|
||||
if len(r) != 2 {
|
||||
return nil, fmt.Errorf(
|
||||
"cannot parse rem_address field in udp socket line: %s", fields[1])
|
||||
}
|
||||
if line.RemAddr, err = hex.DecodeString(r[0]); err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"cannot parse rem_address value in udp socket line: %s", err)
|
||||
}
|
||||
if line.RemPort, err = strconv.ParseUint(r[1], 16, 64); err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"cannot parse rem_address port value in udp socket line: %s", err)
|
||||
}
|
||||
|
||||
// st
|
||||
if line.St, err = strconv.ParseUint(fields[3], 16, 64); err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"cannot parse st value in udp socket line: %s", err)
|
||||
}
|
||||
|
||||
// tx_queue and rx_queue
|
||||
q := strings.Split(fields[4], ":")
|
||||
if len(q) != 2 {
|
||||
return nil, fmt.Errorf(
|
||||
"cannot parse tx/rx queues in udp socket line as it has a missing colon: %s",
|
||||
fields[4],
|
||||
)
|
||||
}
|
||||
if line.TxQueue, err = strconv.ParseUint(q[0], 16, 64); err != nil {
|
||||
return nil, fmt.Errorf("cannot parse tx_queue value in udp socket line: %s", err)
|
||||
}
|
||||
if line.RxQueue, err = strconv.ParseUint(q[1], 16, 64); err != nil {
|
||||
return nil, fmt.Errorf("cannot parse rx_queue value in udp socket line: %s", err)
|
||||
}
|
||||
|
||||
// uid
|
||||
if line.UID, err = strconv.ParseUint(fields[7], 0, 64); err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"cannot parse uid value in udp socket line: %s", err)
|
||||
}
|
||||
|
||||
return line, nil
|
||||
n1 := NetUDPSummary(*n)
|
||||
return &n1, err
|
||||
}
|
||||
|
|
2
vendor/github.com/prometheus/procfs/proc_cgroup.go
generated
vendored
2
vendor/github.com/prometheus/procfs/proc_cgroup.go
generated
vendored
|
@ -49,7 +49,7 @@ type Cgroup struct {
|
|||
func parseCgroupString(cgroupStr string) (*Cgroup, error) {
|
||||
var err error
|
||||
|
||||
fields := strings.Split(cgroupStr, ":")
|
||||
fields := strings.SplitN(cgroupStr, ":", 3)
|
||||
if len(fields) < 3 {
|
||||
return nil, fmt.Errorf("at least 3 fields required, found %d fields in cgroup string: %s", len(fields), cgroupStr)
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue