mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2024-11-21 14:44:00 +00:00
vendor: run make vendor-update
This commit is contained in:
parent
cc8427f11b
commit
4cd1497ac1
76 changed files with 1667 additions and 672 deletions
32
go.mod
32
go.mod
|
@ -14,8 +14,8 @@ require (
|
||||||
github.com/VictoriaMetrics/metrics v1.24.0
|
github.com/VictoriaMetrics/metrics v1.24.0
|
||||||
github.com/VictoriaMetrics/metricsql v0.61.1
|
github.com/VictoriaMetrics/metricsql v0.61.1
|
||||||
github.com/aws/aws-sdk-go-v2 v1.19.0
|
github.com/aws/aws-sdk-go-v2 v1.19.0
|
||||||
github.com/aws/aws-sdk-go-v2/config v1.18.28
|
github.com/aws/aws-sdk-go-v2/config v1.18.29
|
||||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.72
|
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.73
|
||||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.37.0
|
github.com/aws/aws-sdk-go-v2/service/s3 v1.37.0
|
||||||
github.com/bmatcuk/doublestar/v4 v4.6.0
|
github.com/bmatcuk/doublestar/v4 v4.6.0
|
||||||
github.com/cespare/xxhash/v2 v2.2.0
|
github.com/cespare/xxhash/v2 v2.2.0
|
||||||
|
@ -25,7 +25,7 @@ require (
|
||||||
github.com/googleapis/gax-go/v2 v2.12.0
|
github.com/googleapis/gax-go/v2 v2.12.0
|
||||||
github.com/influxdata/influxdb v1.11.2
|
github.com/influxdata/influxdb v1.11.2
|
||||||
github.com/klauspost/compress v1.16.7
|
github.com/klauspost/compress v1.16.7
|
||||||
github.com/prometheus/prometheus v0.45.0
|
github.com/prometheus/prometheus v0.46.0
|
||||||
github.com/urfave/cli/v2 v2.25.7
|
github.com/urfave/cli/v2 v2.25.7
|
||||||
github.com/valyala/fastjson v1.6.4
|
github.com/valyala/fastjson v1.6.4
|
||||||
github.com/valyala/fastrand v1.1.0
|
github.com/valyala/fastrand v1.1.0
|
||||||
|
@ -36,13 +36,13 @@ require (
|
||||||
golang.org/x/net v0.12.0
|
golang.org/x/net v0.12.0
|
||||||
golang.org/x/oauth2 v0.10.0
|
golang.org/x/oauth2 v0.10.0
|
||||||
golang.org/x/sys v0.10.0
|
golang.org/x/sys v0.10.0
|
||||||
google.golang.org/api v0.132.0
|
google.golang.org/api v0.134.0
|
||||||
gopkg.in/yaml.v2 v2.4.0
|
gopkg.in/yaml.v2 v2.4.0
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
cloud.google.com/go v0.110.6 // indirect
|
cloud.google.com/go v0.110.6 // indirect
|
||||||
cloud.google.com/go/compute v1.22.0 // indirect
|
cloud.google.com/go/compute v1.23.0 // indirect
|
||||||
cloud.google.com/go/compute/metadata v0.2.3 // indirect
|
cloud.google.com/go/compute/metadata v0.2.3 // indirect
|
||||||
cloud.google.com/go/iam v1.1.1 // indirect
|
cloud.google.com/go/iam v1.1.1 // indirect
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 // indirect
|
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 // indirect
|
||||||
|
@ -50,9 +50,9 @@ require (
|
||||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 // indirect
|
github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 // indirect
|
||||||
github.com/VividCortex/ewma v1.2.0 // indirect
|
github.com/VividCortex/ewma v1.2.0 // indirect
|
||||||
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect
|
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect
|
||||||
github.com/aws/aws-sdk-go v1.44.302 // indirect
|
github.com/aws/aws-sdk-go v1.44.309 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10 // indirect
|
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/credentials v1.13.27 // indirect
|
github.com/aws/aws-sdk-go-v2/credentials v1.13.28 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.5 // indirect
|
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.5 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.35 // indirect
|
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.35 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.29 // indirect
|
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.29 // indirect
|
||||||
|
@ -64,11 +64,11 @@ require (
|
||||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.14.4 // indirect
|
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.14.4 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/service/sso v1.12.13 // indirect
|
github.com/aws/aws-sdk-go-v2/service/sso v1.12.13 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.13 // indirect
|
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.13 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/service/sts v1.19.3 // indirect
|
github.com/aws/aws-sdk-go-v2/service/sts v1.20.0 // indirect
|
||||||
github.com/aws/smithy-go v1.13.5 // indirect
|
github.com/aws/smithy-go v1.13.5 // indirect
|
||||||
github.com/beorn7/perks v1.0.1 // indirect
|
github.com/beorn7/perks v1.0.1 // indirect
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
|
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
|
||||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||||
github.com/dennwc/varint v1.0.0 // indirect
|
github.com/dennwc/varint v1.0.0 // indirect
|
||||||
github.com/fatih/color v1.15.0 // indirect
|
github.com/fatih/color v1.15.0 // indirect
|
||||||
github.com/felixge/httpsnoop v1.0.3 // indirect
|
github.com/felixge/httpsnoop v1.0.3 // indirect
|
||||||
|
@ -89,18 +89,18 @@ require (
|
||||||
github.com/kylelemons/godebug v1.1.0 // indirect
|
github.com/kylelemons/godebug v1.1.0 // indirect
|
||||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||||
github.com/mattn/go-isatty v0.0.19 // indirect
|
github.com/mattn/go-isatty v0.0.19 // indirect
|
||||||
github.com/mattn/go-runewidth v0.0.14 // indirect
|
github.com/mattn/go-runewidth v0.0.15 // indirect
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect
|
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect
|
||||||
github.com/oklog/ulid v1.3.1 // indirect
|
github.com/oklog/ulid v1.3.1 // indirect
|
||||||
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect
|
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect
|
||||||
github.com/pkg/errors v0.9.1 // indirect
|
github.com/pkg/errors v0.9.1 // indirect
|
||||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||||
github.com/prometheus/client_golang v1.16.0 // indirect
|
github.com/prometheus/client_golang v1.16.0 // indirect
|
||||||
github.com/prometheus/client_model v0.4.0 // indirect
|
github.com/prometheus/client_model v0.4.0 // indirect
|
||||||
github.com/prometheus/common v0.44.0 // indirect
|
github.com/prometheus/common v0.44.0 // indirect
|
||||||
github.com/prometheus/common/sigv4 v0.1.0 // indirect
|
github.com/prometheus/common/sigv4 v0.1.0 // indirect
|
||||||
github.com/prometheus/procfs v0.11.0 // indirect
|
github.com/prometheus/procfs v0.11.1 // indirect
|
||||||
github.com/rivo/uniseg v0.4.4 // indirect
|
github.com/rivo/uniseg v0.4.4 // indirect
|
||||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||||
github.com/stretchr/testify v1.8.4 // indirect
|
github.com/stretchr/testify v1.8.4 // indirect
|
||||||
|
@ -120,10 +120,10 @@ require (
|
||||||
golang.org/x/time v0.3.0 // indirect
|
golang.org/x/time v0.3.0 // indirect
|
||||||
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
|
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
|
||||||
google.golang.org/appengine v1.6.7 // indirect
|
google.golang.org/appengine v1.6.7 // indirect
|
||||||
google.golang.org/genproto v0.0.0-20230717213848-3f92550aa753 // indirect
|
google.golang.org/genproto v0.0.0-20230726155614-23370e0ffb3e // indirect
|
||||||
google.golang.org/genproto/googleapis/api v0.0.0-20230717213848-3f92550aa753 // indirect
|
google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e // indirect
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230717213848-3f92550aa753 // indirect
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20230726155614-23370e0ffb3e // indirect
|
||||||
google.golang.org/grpc v1.56.2 // indirect
|
google.golang.org/grpc v1.57.0 // indirect
|
||||||
google.golang.org/protobuf v1.31.0 // indirect
|
google.golang.org/protobuf v1.31.0 // indirect
|
||||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||||
)
|
)
|
||||||
|
|
118
go.sum
118
go.sum
|
@ -21,8 +21,8 @@ cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvf
|
||||||
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
|
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
|
||||||
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
|
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
|
||||||
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
|
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
|
||||||
cloud.google.com/go/compute v1.22.0 h1:cB8R6FtUtT1TYGl5R3xuxnW6OUIc/DrT2aiR16TTG7Y=
|
cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY=
|
||||||
cloud.google.com/go/compute v1.22.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM=
|
cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM=
|
||||||
cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=
|
cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=
|
||||||
cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
|
cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
|
||||||
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
|
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
|
||||||
|
@ -63,7 +63,7 @@ github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 h1:OBhqkivkhkM
|
||||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0/go.mod h1:kgDmCTgBzIEPFElEF+FK0SdjAor06dRq2Go927dnQ6o=
|
github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0/go.mod h1:kgDmCTgBzIEPFElEF+FK0SdjAor06dRq2Go927dnQ6o=
|
||||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||||
github.com/Microsoft/go-winio v0.6.0 h1:slsWYD/zyx7lCXoZVlvQrj0hPTM1HI4+v1sIda2yDvg=
|
github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
|
||||||
github.com/VictoriaMetrics/fastcache v1.12.1 h1:i0mICQuojGDL3KblA7wUNlY5lOK6a4bwt3uRKnkZU40=
|
github.com/VictoriaMetrics/fastcache v1.12.1 h1:i0mICQuojGDL3KblA7wUNlY5lOK6a4bwt3uRKnkZU40=
|
||||||
github.com/VictoriaMetrics/fastcache v1.12.1/go.mod h1:tX04vaqcNoQeGLD+ra5pU5sWkuxnzWhEzLwhP9w653o=
|
github.com/VictoriaMetrics/fastcache v1.12.1/go.mod h1:tX04vaqcNoQeGLD+ra5pU5sWkuxnzWhEzLwhP9w653o=
|
||||||
github.com/VictoriaMetrics/fasthttp v1.2.0 h1:nd9Wng4DlNtaI27WlYh5mGXCJOmee/2c2blTJwfyU9I=
|
github.com/VictoriaMetrics/fasthttp v1.2.0 h1:nd9Wng4DlNtaI27WlYh5mGXCJOmee/2c2blTJwfyU9I=
|
||||||
|
@ -88,20 +88,20 @@ github.com/andybalholm/brotli v1.0.3/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHG
|
||||||
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
|
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
|
||||||
github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA=
|
github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA=
|
||||||
github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
||||||
github.com/aws/aws-sdk-go v1.44.302 h1:ST3ko6GrJKn3Xi+nAvxjG3uk/V1pW8KC52WLeIxqqNk=
|
github.com/aws/aws-sdk-go v1.44.309 h1:IPJOFBzXekakxmEpDwd4RTKmmBR6LIAiXgNsM51bWbU=
|
||||||
github.com/aws/aws-sdk-go v1.44.302/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
|
github.com/aws/aws-sdk-go v1.44.309/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
|
||||||
github.com/aws/aws-sdk-go-v2 v1.19.0 h1:klAT+y3pGFBU/qVf1uzwttpBbiuozJYWzNLHioyDJ+k=
|
github.com/aws/aws-sdk-go-v2 v1.19.0 h1:klAT+y3pGFBU/qVf1uzwttpBbiuozJYWzNLHioyDJ+k=
|
||||||
github.com/aws/aws-sdk-go-v2 v1.19.0/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw=
|
github.com/aws/aws-sdk-go-v2 v1.19.0/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw=
|
||||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10 h1:dK82zF6kkPeCo8J1e+tGx4JdvDIQzj7ygIoLg8WMuGs=
|
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10 h1:dK82zF6kkPeCo8J1e+tGx4JdvDIQzj7ygIoLg8WMuGs=
|
||||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10/go.mod h1:VeTZetY5KRJLuD/7fkQXMU6Mw7H5m/KP2J5Iy9osMno=
|
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10/go.mod h1:VeTZetY5KRJLuD/7fkQXMU6Mw7H5m/KP2J5Iy9osMno=
|
||||||
github.com/aws/aws-sdk-go-v2/config v1.18.28 h1:TINEaKyh1Td64tqFvn09iYpKiWjmHYrG1fa91q2gnqw=
|
github.com/aws/aws-sdk-go-v2/config v1.18.29 h1:yA+bSSRGhBwWuprG9I4VgxfK//NBLZ/0BGOHiV3f9oM=
|
||||||
github.com/aws/aws-sdk-go-v2/config v1.18.28/go.mod h1:nIL+4/8JdAuNHEjn/gPEXqtnS02Q3NXB/9Z7o5xE4+A=
|
github.com/aws/aws-sdk-go-v2/config v1.18.29/go.mod h1:bJT6P8A+KU1qvNMp8aj+/NmaI06Z670dHNoWsrLOgMg=
|
||||||
github.com/aws/aws-sdk-go-v2/credentials v1.13.27 h1:dz0yr/yR1jweAnsCx+BmjerUILVPQ6FS5AwF/OyG1kA=
|
github.com/aws/aws-sdk-go-v2/credentials v1.13.28 h1:WM9tEHgoOh5ThJZ042UKnSx7TXGSC/bz63X3fsrQL2o=
|
||||||
github.com/aws/aws-sdk-go-v2/credentials v1.13.27/go.mod h1:syOqAek45ZXZp29HlnRS/BNgMIW6uiRmeuQsz4Qh2UE=
|
github.com/aws/aws-sdk-go-v2/credentials v1.13.28/go.mod h1:86BSbSeamnVVdr1hPfBZVN8SXM7KxSAZAvhNxVfi8fU=
|
||||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.5 h1:kP3Me6Fy3vdi+9uHd7YLr6ewPxRL+PU6y15urfTaamU=
|
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.5 h1:kP3Me6Fy3vdi+9uHd7YLr6ewPxRL+PU6y15urfTaamU=
|
||||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.5/go.mod h1:Gj7tm95r+QsDoN2Fhuz/3npQvcZbkEf5mL70n3Xfluc=
|
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.5/go.mod h1:Gj7tm95r+QsDoN2Fhuz/3npQvcZbkEf5mL70n3Xfluc=
|
||||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.72 h1:m0MmP89v1B0t3b8W8rtATU76KNsodak69QtiokHyEvo=
|
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.73 h1:ez9K7LHdBxo+g1ExpUPsWEFfHxzHhiVZriWAJAiZwKc=
|
||||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.72/go.mod h1:ylOTxIuoTL+XjH46Omv2iPjHdeGUk3SQ4hxYho4EHMA=
|
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.73/go.mod h1:+eJGIv33Lpdk3pbgIzKisFleIOB9dfWuVuipuve5Gsw=
|
||||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.35 h1:hMUCiE3Zi5AHrRNGf5j985u0WyqI6r2NULhUfo0N/No=
|
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.35 h1:hMUCiE3Zi5AHrRNGf5j985u0WyqI6r2NULhUfo0N/No=
|
||||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.35/go.mod h1:ipR5PvpSPqIqL5Mi82BxLnfMkHVbmco8kUwO2xrCi0M=
|
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.35/go.mod h1:ipR5PvpSPqIqL5Mi82BxLnfMkHVbmco8kUwO2xrCi0M=
|
||||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.29 h1:yOpYx+FTBdpk/g+sBU6Cb1H0U/TLEcYYp66mYqsPpcc=
|
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.29 h1:yOpYx+FTBdpk/g+sBU6Cb1H0U/TLEcYYp66mYqsPpcc=
|
||||||
|
@ -124,8 +124,8 @@ github.com/aws/aws-sdk-go-v2/service/sso v1.12.13 h1:sWDv7cMITPcZ21QdreULwxOOAmE
|
||||||
github.com/aws/aws-sdk-go-v2/service/sso v1.12.13/go.mod h1:DfX0sWuT46KpcqbMhJ9QWtxAIP1VozkDWf8VAkByjYY=
|
github.com/aws/aws-sdk-go-v2/service/sso v1.12.13/go.mod h1:DfX0sWuT46KpcqbMhJ9QWtxAIP1VozkDWf8VAkByjYY=
|
||||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.13 h1:BFubHS/xN5bjl818QaroN6mQdjneYQ+AOx44KNXlyH4=
|
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.13 h1:BFubHS/xN5bjl818QaroN6mQdjneYQ+AOx44KNXlyH4=
|
||||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.13/go.mod h1:BzqsVVFduubEmzrVtUFQQIQdFqvUItF8XUq2EnS8Wog=
|
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.13/go.mod h1:BzqsVVFduubEmzrVtUFQQIQdFqvUItF8XUq2EnS8Wog=
|
||||||
github.com/aws/aws-sdk-go-v2/service/sts v1.19.3 h1:e5mnydVdCVWxP+5rPAGi2PYxC7u2OZgH1ypC114H04U=
|
github.com/aws/aws-sdk-go-v2/service/sts v1.20.0 h1:jKmIOO+dFvCPuIhhM8u0Dy3dtd590n2kEDSYiGHoI98=
|
||||||
github.com/aws/aws-sdk-go-v2/service/sts v1.19.3/go.mod h1:yVGZA1CPkmUhBdA039jXNJJG7/6t+G+EBWmFq23xqnY=
|
github.com/aws/aws-sdk-go-v2/service/sts v1.20.0/go.mod h1:yVGZA1CPkmUhBdA039jXNJJG7/6t+G+EBWmFq23xqnY=
|
||||||
github.com/aws/smithy-go v1.13.5 h1:hgz0X/DX0dGqTYpGALqXJoRKRj5oQ7150i5FdTePzO8=
|
github.com/aws/smithy-go v1.13.5 h1:hgz0X/DX0dGqTYpGALqXJoRKRj5oQ7150i5FdTePzO8=
|
||||||
github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA=
|
github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA=
|
||||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||||
|
@ -154,26 +154,27 @@ github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+g
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
|
github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
|
||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||||
|
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE=
|
github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE=
|
||||||
github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA=
|
github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA=
|
||||||
github.com/digitalocean/godo v1.99.0 h1:gUHO7n9bDaZFWvbzOum4bXE0/09ZuYA9yA8idQHX57E=
|
github.com/digitalocean/godo v1.99.0 h1:gUHO7n9bDaZFWvbzOum4bXE0/09ZuYA9yA8idQHX57E=
|
||||||
github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
|
github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
|
||||||
github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68=
|
github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8=
|
||||||
github.com/docker/docker v24.0.2+incompatible h1:eATx+oLz9WdNVkQrr0qjQ8HvRJ4bOOxfzEo8R+dA3cg=
|
github.com/docker/docker v24.0.4+incompatible h1:s/LVDftw9hjblvqIeTiGYXBCD95nOEEl7qRsRrIOuQI=
|
||||||
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
|
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
|
||||||
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
|
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
|
||||||
github.com/edsrzf/mmap-go v1.1.0 h1:6EUwBLQ/Mcr1EYLE4Tn1VdW1A4ckqCQWZBw8Hr0kjpQ=
|
github.com/edsrzf/mmap-go v1.1.0 h1:6EUwBLQ/Mcr1EYLE4Tn1VdW1A4ckqCQWZBw8Hr0kjpQ=
|
||||||
github.com/emicklei/go-restful/v3 v3.10.1 h1:rc42Y5YTp7Am7CS630D7JmhRjq4UlEUuEKfrDac4bSQ=
|
github.com/emicklei/go-restful/v3 v3.10.2 h1:hIovbnmBTLjHXkqEBUz3HGpXZdM7ZrE9fJIZIqlJLqE=
|
||||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
||||||
github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
|
github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
|
||||||
github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f h1:7T++XKzy4xg7PKy+bM+Sa9/oe1OC88yz2hXQUISoXfA=
|
github.com/envoyproxy/go-control-plane v0.11.1 h1:wSUXTlLfiAQRWs2F+p+EKOY9rUyis1MyGqJ2DIk5HpM=
|
||||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||||
github.com/envoyproxy/protoc-gen-validate v1.0.1 h1:kt9FtLiooDc0vbwTLhdg3dyNX1K9Qwa1EK9LcD4jVUQ=
|
github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA=
|
||||||
github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs=
|
github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs=
|
||||||
github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw=
|
github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw=
|
||||||
github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk=
|
github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk=
|
||||||
|
@ -198,9 +199,9 @@ github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
|
||||||
github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||||
github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE=
|
github.com/go-openapi/jsonpointer v0.20.0 h1:ESKJdU9ASRfaPNOPRx12IUyA1vn3R9GiE3KYD14BXdQ=
|
||||||
github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
|
github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
|
||||||
github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g=
|
github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU=
|
||||||
github.com/go-resty/resty/v2 v2.7.0 h1:me+K9p3uhSmXtrBZ4k9jcEAfJmuC8IivWHwaLZwPrFY=
|
github.com/go-resty/resty/v2 v2.7.0 h1:me+K9p3uhSmXtrBZ4k9jcEAfJmuC8IivWHwaLZwPrFY=
|
||||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||||
github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg=
|
github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg=
|
||||||
|
@ -285,31 +286,31 @@ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+
|
||||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||||
github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas=
|
github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas=
|
||||||
github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU=
|
github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU=
|
||||||
github.com/gophercloud/gophercloud v1.4.0 h1:RqEu43vaX0lb0LanZr5BylK5ICVxjpFFoc0sxivyuHU=
|
github.com/gophercloud/gophercloud v1.5.0 h1:cDN6XFCLKiiqvYpjQLq9AiM7RDRbIC9450WpPH+yvXo=
|
||||||
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
|
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
|
||||||
github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd h1:PpuIBO5P3e9hpqBD0O/HjhShYuM6XE0i/lbE6J94kww=
|
github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd h1:PpuIBO5P3e9hpqBD0O/HjhShYuM6XE0i/lbE6J94kww=
|
||||||
github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A=
|
github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A=
|
||||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
|
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
|
||||||
github.com/hashicorp/consul/api v1.21.0 h1:WMR2JiyuaQWRAMFaOGiYfY4Q4HRpyYRe/oYQofjyduM=
|
github.com/hashicorp/consul/api v1.22.0 h1:ydEvDooB/A0c/xpsBd8GSt7P2/zYPBui4KrNip0xGjE=
|
||||||
github.com/hashicorp/cronexpr v1.1.1 h1:NJZDd87hGXjoZBdvyCF9mX4DCq5Wy7+A/w+A7q0wn6c=
|
github.com/hashicorp/cronexpr v1.1.2 h1:wG/ZYIKT+RT3QkOdgYc+xsKWVRgnxJ1OJtjjy84fJ9A=
|
||||||
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
|
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
|
||||||
github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
|
github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
|
||||||
github.com/hashicorp/go-hclog v1.4.0 h1:ctuWFGrhFha8BnnzxqeRGidlEcQkDyL5u8J8t5eA11I=
|
github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c=
|
||||||
github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc=
|
github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc=
|
||||||
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
|
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
|
||||||
github.com/hashicorp/go-retryablehttp v0.7.2 h1:AcYqCvkpalPnPF2pn0KamgwamS42TqUDDYFRKq/RAd0=
|
github.com/hashicorp/go-retryablehttp v0.7.4 h1:ZQgVdpTdAL7WpMIwLzCfbalOcSUdkDZnpUv3/+BxzFA=
|
||||||
github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc=
|
github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc=
|
||||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||||
github.com/hashicorp/golang-lru v0.6.0 h1:uL2shRDx7RTrOrTCUZEGP/wJUFiUI8QT6E7z5o8jga4=
|
github.com/hashicorp/golang-lru v0.6.0 h1:uL2shRDx7RTrOrTCUZEGP/wJUFiUI8QT6E7z5o8jga4=
|
||||||
github.com/hashicorp/nomad/api v0.0.0-20230605233119-67e39d5d248f h1:yxjcAZRuYymIDC0W4IQHgTe9EQdu2BsjPlVmKwyVZT4=
|
github.com/hashicorp/nomad/api v0.0.0-20230718173136-3a687930bd3e h1:sr4lujmn9heD030xx/Pd4B/JSmvRhFzuotNXaaV0WLs=
|
||||||
github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY=
|
github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY=
|
||||||
github.com/hetznercloud/hcloud-go v1.45.1 h1:nl0OOklFfQT5J6AaNIOhl5Ruh3fhmGmhvZEqHbibVuk=
|
github.com/hetznercloud/hcloud-go/v2 v2.0.0 h1:Sg1DJ+MAKvbYAqaBaq9tPbwXBS2ckPIaMtVdUjKu+4g=
|
||||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||||
github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk=
|
github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
|
||||||
github.com/influxdata/influxdb v1.11.2 h1:qOF3uQN1mDfJNEKwbAgJsqehf8IXgKok2vlGm736oGo=
|
github.com/influxdata/influxdb v1.11.2 h1:qOF3uQN1mDfJNEKwbAgJsqehf8IXgKok2vlGm736oGo=
|
||||||
github.com/influxdata/influxdb v1.11.2/go.mod h1:eUMkLTE2vQwvSk6KGMrTBLKPaqSuczuelGbggigMPFw=
|
github.com/influxdata/influxdb v1.11.2/go.mod h1:eUMkLTE2vQwvSk6KGMrTBLKPaqSuczuelGbggigMPFw=
|
||||||
github.com/ionos-cloud/sdk-go/v6 v6.1.7 h1:uVG1Q/ZDJ7YmCI9Oevpue9xJEH5UrUMyXv8gm7NTxIw=
|
github.com/ionos-cloud/sdk-go/v6 v6.1.8 h1:493wE/BkZxJf7x79UCE0cYGPZoqQcPiEBALvt7uVGY0=
|
||||||
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
|
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
|
||||||
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
|
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
|
||||||
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
|
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
|
||||||
|
@ -342,19 +343,19 @@ github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||||
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
||||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||||
github.com/linode/linodego v1.17.0 h1:aWS98f0jUoY2lhsEuBxRdVkqyGM0nazPd68AEDF0EvU=
|
github.com/linode/linodego v1.19.0 h1:n4WJrcr9+30e9JGZ6DI0nZbm5SdAj1kSwvvt/998YUw=
|
||||||
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
|
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
|
||||||
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
||||||
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
|
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
|
||||||
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||||
github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
|
github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
|
||||||
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||||
github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU=
|
github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U=
|
||||||
github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
|
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
||||||
github.com/miekg/dns v1.1.54 h1:5jon9mWcb0sFJGpnI99tOMhCPyJ+RPVz5b63MQG0VWI=
|
github.com/miekg/dns v1.1.55 h1:GoQ4hpsj0nFLYe+bWiCToyrBEJXkQfOOIvFGFy0lEgo=
|
||||||
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
|
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
|
||||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
||||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||||
|
@ -378,8 +379,9 @@ github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
|
||||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
|
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||||
|
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||||
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||||
|
@ -404,10 +406,10 @@ github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R
|
||||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||||
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||||
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||||
github.com/prometheus/procfs v0.11.0 h1:5EAgkfkMl659uZPbe9AS2N68a7Cc1TJbPEuGzFuRbyk=
|
github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI=
|
||||||
github.com/prometheus/procfs v0.11.0/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM=
|
github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY=
|
||||||
github.com/prometheus/prometheus v0.45.0 h1:O/uG+Nw4kNxx/jDPxmjsSDd+9Ohql6E7ZSY1x5x/0KI=
|
github.com/prometheus/prometheus v0.46.0 h1:9JSdXnsuT6YsbODEhSQMwxNkGwPExfmzqG73vCMk/Kw=
|
||||||
github.com/prometheus/prometheus v0.45.0/go.mod h1:jC5hyO8ItJBnDWGecbEucMyXjzxGv9cxsxsjS9u5s1w=
|
github.com/prometheus/prometheus v0.46.0/go.mod h1:10L5IJE5CEsjee1FnOcVswYXlPIscDWWt3IJ2UDYrz4=
|
||||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||||
github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
|
github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
|
||||||
github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
|
github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
|
||||||
|
@ -416,7 +418,7 @@ github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR
|
||||||
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.17 h1:1WuWJu7/e8SqK+uQl7lfk/N/oMZTL2NE/TJsNKRNMc4=
|
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.19 h1:+1H+N9QFl2Sfvia0FBYfMrHYHYhmpZxhSE0wpPL2lYs=
|
||||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||||
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
||||||
|
@ -526,7 +528,7 @@ golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzB
|
||||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||||
golang.org/x/mod v0.11.0 h1:bUO06HqtnRcc/7l71XBe4WcqTZ+3AH1J59zWDDwLKgU=
|
golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc=
|
||||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
@ -697,7 +699,7 @@ golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc
|
||||||
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||||
golang.org/x/tools v0.9.3 h1:Gn1I8+64MsuTb/HpH+LmQtNas23LhUVr3rYZ0eKuaMM=
|
golang.org/x/tools v0.11.0 h1:EMCa6U9S2LtZXLAMoWiR/R8dAQFRqbAitmbJ2UKhoi8=
|
||||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
@ -720,8 +722,8 @@ google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M
|
||||||
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
||||||
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
|
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
|
||||||
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
|
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
|
||||||
google.golang.org/api v0.132.0 h1:8t2/+qZ26kAOGSmOiHwVycqVaDg7q3JDILrNi/Z6rvc=
|
google.golang.org/api v0.134.0 h1:ktL4Goua+UBgoP1eL1/60LwZJqa1sIzkLmvoR3hR6Gw=
|
||||||
google.golang.org/api v0.132.0/go.mod h1:AeTBC6GpJnJSRJjktDcPX0QwtS8pGYZOV6MSuSCusw0=
|
google.golang.org/api v0.134.0/go.mod h1:sjRL3UnjTx5UqNQS9EWr9N8p7xbHpy1k0XGRLCf3Spk=
|
||||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||||
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||||
|
@ -760,12 +762,12 @@ google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7Fc
|
||||||
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||||
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||||
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||||
google.golang.org/genproto v0.0.0-20230717213848-3f92550aa753 h1:+VoAg+OKmWaommL56xmZSE2sUK8A7m6SUO7X89F2tbw=
|
google.golang.org/genproto v0.0.0-20230726155614-23370e0ffb3e h1:xIXmWJ303kJCuogpj0bHq+dcjcZHU+XFyc1I0Yl9cRg=
|
||||||
google.golang.org/genproto v0.0.0-20230717213848-3f92550aa753/go.mod h1:iqkVr8IRpZ53gx1dEnWlCUIEwDWqWARWrbzpasaTNYM=
|
google.golang.org/genproto v0.0.0-20230726155614-23370e0ffb3e/go.mod h1:0ggbjUrZYpy1q+ANUS30SEoGZ53cdfwtbuG7Ptgy108=
|
||||||
google.golang.org/genproto/googleapis/api v0.0.0-20230717213848-3f92550aa753 h1:lCbbUxUDD+DiXx9Q6F/ttL0aAu7N2pz8XnmMm8ZW4NE=
|
google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e h1:z3vDksarJxsAKM5dmEGv0GHwE2hKJ096wZra71Vs4sw=
|
||||||
google.golang.org/genproto/googleapis/api v0.0.0-20230717213848-3f92550aa753/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ=
|
google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ=
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230717213848-3f92550aa753 h1:XUODHrpzJEUeWmVo/jfNTLj0YyVveOo28oE6vkFbkO4=
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20230726155614-23370e0ffb3e h1:S83+ibolgyZ0bqz7KEsUOPErxcv4VzlszxY+31OfB/E=
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230717213848-3f92550aa753/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM=
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20230726155614-23370e0ffb3e/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM=
|
||||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||||
|
@ -782,8 +784,8 @@ google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTp
|
||||||
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
|
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
|
||||||
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
||||||
google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ=
|
google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ=
|
||||||
google.golang.org/grpc v1.56.2 h1:fVRFRnXvU+x6C4IlHZewvJOVHoOv1TUuQyoRsYnB4bI=
|
google.golang.org/grpc v1.57.0 h1:kfzNeI/klCGD2YPMUlaGNT3pxvYfga7smW3Vth8Zsiw=
|
||||||
google.golang.org/grpc v1.56.2/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s=
|
google.golang.org/grpc v1.57.0/go.mod h1:Sd+9RMTACXwmub0zcNY2c4arhtrbBYD1AUHI/dt16Mo=
|
||||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||||
|
@ -825,16 +827,16 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh
|
||||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||||
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||||
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||||
k8s.io/api v0.26.2 h1:dM3cinp3PGB6asOySalOZxEG4CZ0IAdJsrYZXE/ovGQ=
|
k8s.io/api v0.27.3 h1:yR6oQXXnUEBWEWcvPWS0jQL575KoAboQPfJAuKNrw5Y=
|
||||||
k8s.io/apimachinery v0.26.2 h1:da1u3D5wfR5u2RpLhE/ZtZS2P7QvDgLZTi9wrNZl/tQ=
|
k8s.io/apimachinery v0.27.3 h1:Ubye8oBufD04l9QnNtW05idcOe9Z3GQN8+7PqmuVcUM=
|
||||||
k8s.io/client-go v0.26.2 h1:s1WkVujHX3kTp4Zn4yGNFK+dlDXy1bAAkIl+cFAiuYI=
|
k8s.io/client-go v0.27.3 h1:7dnEGHZEJld3lYwxvLl7WoehK6lAq7GvgjxpA3nv1E8=
|
||||||
k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
|
k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
|
||||||
k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg=
|
k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg=
|
||||||
k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f h1:2kWPakN3i/k81b0gvD5C5FJ2kxm1WrQFanWchyKuqGg=
|
k8s.io/kube-openapi v0.0.0-20230525220651-2546d827e515 h1:OmK1d0WrkD3IPfkskvroRykOulHVHf0s0ZIFRjyt+UI=
|
||||||
k8s.io/utils v0.0.0-20230308161112-d77c459e9343 h1:m7tbIjXGcGIAtpmQr7/NAi7RsWoW3E7Zcm4jI1HicTc=
|
k8s.io/utils v0.0.0-20230711102312-30195339c3c7 h1:ZgnF1KZsYxWIifwSNZFZgNtWE89WI5yiP5WwlfDoIyc=
|
||||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||||
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
||||||
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
||||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
|
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
|
||||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE=
|
sigs.k8s.io/structured-merge-diff/v4 v4.3.0 h1:UZbZAZfX0wV2zr7YZorDz6GXROfDFj6LvqCRm4VUVKk=
|
||||||
sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo=
|
sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo=
|
||||||
|
|
2
vendor/cloud.google.com/go/compute/internal/version.go
generated
vendored
2
vendor/cloud.google.com/go/compute/internal/version.go
generated
vendored
|
@ -15,4 +15,4 @@
|
||||||
package internal
|
package internal
|
||||||
|
|
||||||
// Version is the current tagged release of the library.
|
// Version is the current tagged release of the library.
|
||||||
const Version = "1.22.0"
|
const Version = "1.23.0"
|
||||||
|
|
4
vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md
generated
vendored
4
vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md
generated
vendored
|
@ -1,3 +1,7 @@
|
||||||
|
# v1.18.29 (2023-07-25)
|
||||||
|
|
||||||
|
* **Dependency Update**: Updated to the latest SDK module versions
|
||||||
|
|
||||||
# v1.18.28 (2023-07-13)
|
# v1.18.28 (2023-07-13)
|
||||||
|
|
||||||
* **Dependency Update**: Updated to the latest SDK module versions
|
* **Dependency Update**: Updated to the latest SDK module versions
|
||||||
|
|
2
vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go
generated
vendored
|
@ -3,4 +3,4 @@
|
||||||
package config
|
package config
|
||||||
|
|
||||||
// goModuleVersion is the tagged release for this module
|
// goModuleVersion is the tagged release for this module
|
||||||
const goModuleVersion = "1.18.28"
|
const goModuleVersion = "1.18.29"
|
||||||
|
|
4
vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md
generated
vendored
4
vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md
generated
vendored
|
@ -1,3 +1,7 @@
|
||||||
|
# v1.13.28 (2023-07-25)
|
||||||
|
|
||||||
|
* **Dependency Update**: Updated to the latest SDK module versions
|
||||||
|
|
||||||
# v1.13.27 (2023-07-13)
|
# v1.13.27 (2023-07-13)
|
||||||
|
|
||||||
* **Dependency Update**: Updated to the latest SDK module versions
|
* **Dependency Update**: Updated to the latest SDK module versions
|
||||||
|
|
2
vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go
generated
vendored
|
@ -3,4 +3,4 @@
|
||||||
package credentials
|
package credentials
|
||||||
|
|
||||||
// goModuleVersion is the tagged release for this module
|
// goModuleVersion is the tagged release for this module
|
||||||
const goModuleVersion = "1.13.27"
|
const goModuleVersion = "1.13.28"
|
||||||
|
|
4
vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/CHANGELOG.md
generated
vendored
4
vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/CHANGELOG.md
generated
vendored
|
@ -1,3 +1,7 @@
|
||||||
|
# v1.11.73 (2023-07-25)
|
||||||
|
|
||||||
|
* **Dependency Update**: Updated to the latest SDK module versions
|
||||||
|
|
||||||
# v1.11.72 (2023-07-13)
|
# v1.11.72 (2023-07-13)
|
||||||
|
|
||||||
* **Dependency Update**: Updated to the latest SDK module versions
|
* **Dependency Update**: Updated to the latest SDK module versions
|
||||||
|
|
2
vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/go_module_metadata.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/go_module_metadata.go
generated
vendored
|
@ -3,4 +3,4 @@
|
||||||
package manager
|
package manager
|
||||||
|
|
||||||
// goModuleVersion is the tagged release for this module
|
// goModuleVersion is the tagged release for this module
|
||||||
const goModuleVersion = "1.11.72"
|
const goModuleVersion = "1.11.73"
|
||||||
|
|
4
vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md
generated
vendored
4
vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md
generated
vendored
|
@ -1,3 +1,7 @@
|
||||||
|
# v1.20.0 (2023-07-25)
|
||||||
|
|
||||||
|
* **Feature**: API updates for the AWS Security Token Service
|
||||||
|
|
||||||
# v1.19.3 (2023-07-13)
|
# v1.19.3 (2023-07-13)
|
||||||
|
|
||||||
* **Dependency Update**: Updated to the latest SDK module versions
|
* **Dependency Update**: Updated to the latest SDK module versions
|
||||||
|
|
3
vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go
generated
vendored
3
vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go
generated
vendored
|
@ -199,6 +199,9 @@ type AssumeRoleInput struct {
|
||||||
// in the IAM User Guide.
|
// in the IAM User Guide.
|
||||||
PolicyArns []types.PolicyDescriptorType
|
PolicyArns []types.PolicyDescriptorType
|
||||||
|
|
||||||
|
// Reserved for future use.
|
||||||
|
ProvidedContexts []types.ProvidedContext
|
||||||
|
|
||||||
// The identification number of the MFA device that is associated with the user
|
// The identification number of the MFA device that is associated with the user
|
||||||
// who is making the AssumeRole call. Specify this value if the trust policy of
|
// who is making the AssumeRole call. Specify this value if the trust policy of
|
||||||
// the role being assumed includes a condition that requires MFA authentication.
|
// the role being assumed includes a condition that requires MFA authentication.
|
||||||
|
|
3
vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go
generated
vendored
3
vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go
generated
vendored
|
@ -149,7 +149,8 @@ type AssumeRoleWithWebIdentityInput struct {
|
||||||
// The OAuth 2.0 access token or OpenID Connect ID token that is provided by the
|
// The OAuth 2.0 access token or OpenID Connect ID token that is provided by the
|
||||||
// identity provider. Your application must get this token by authenticating the
|
// identity provider. Your application must get this token by authenticating the
|
||||||
// user who is using your application with a web identity provider before the
|
// user who is using your application with a web identity provider before the
|
||||||
// application makes an AssumeRoleWithWebIdentity call.
|
// application makes an AssumeRoleWithWebIdentity call. Only tokens with RSA
|
||||||
|
// algorithms (RS256) are supported.
|
||||||
//
|
//
|
||||||
// This member is required.
|
// This member is required.
|
||||||
WebIdentityToken *string
|
WebIdentityToken *string
|
||||||
|
|
2
vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go
generated
vendored
|
@ -3,4 +3,4 @@
|
||||||
package sts
|
package sts
|
||||||
|
|
||||||
// goModuleVersion is the tagged release for this module
|
// goModuleVersion is the tagged release for this module
|
||||||
const goModuleVersion = "1.19.3"
|
const goModuleVersion = "1.20.0"
|
||||||
|
|
36
vendor/github.com/aws/aws-sdk-go-v2/service/sts/serializers.go
generated
vendored
36
vendor/github.com/aws/aws-sdk-go-v2/service/sts/serializers.go
generated
vendored
|
@ -546,6 +546,35 @@ func awsAwsquery_serializeDocumentPolicyDescriptorType(v *types.PolicyDescriptor
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func awsAwsquery_serializeDocumentProvidedContext(v *types.ProvidedContext, value query.Value) error {
|
||||||
|
object := value.Object()
|
||||||
|
_ = object
|
||||||
|
|
||||||
|
if v.ContextAssertion != nil {
|
||||||
|
objectKey := object.Key("ContextAssertion")
|
||||||
|
objectKey.String(*v.ContextAssertion)
|
||||||
|
}
|
||||||
|
|
||||||
|
if v.ProviderArn != nil {
|
||||||
|
objectKey := object.Key("ProviderArn")
|
||||||
|
objectKey.String(*v.ProviderArn)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func awsAwsquery_serializeDocumentProvidedContextsListType(v []types.ProvidedContext, value query.Value) error {
|
||||||
|
array := value.Array("member")
|
||||||
|
|
||||||
|
for i := range v {
|
||||||
|
av := array.Value()
|
||||||
|
if err := awsAwsquery_serializeDocumentProvidedContext(&v[i], av); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func awsAwsquery_serializeDocumentTag(v *types.Tag, value query.Value) error {
|
func awsAwsquery_serializeDocumentTag(v *types.Tag, value query.Value) error {
|
||||||
object := value.Object()
|
object := value.Object()
|
||||||
_ = object
|
_ = object
|
||||||
|
@ -611,6 +640,13 @@ func awsAwsquery_serializeOpDocumentAssumeRoleInput(v *AssumeRoleInput, value qu
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if v.ProvidedContexts != nil {
|
||||||
|
objectKey := object.Key("ProvidedContexts")
|
||||||
|
if err := awsAwsquery_serializeDocumentProvidedContextsListType(v.ProvidedContexts, objectKey); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if v.RoleArn != nil {
|
if v.RoleArn != nil {
|
||||||
objectKey := object.Key("RoleArn")
|
objectKey := object.Key("RoleArn")
|
||||||
objectKey.String(*v.RoleArn)
|
objectKey.String(*v.RoleArn)
|
||||||
|
|
12
vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/types.go
generated
vendored
12
vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/types.go
generated
vendored
|
@ -89,6 +89,18 @@ type PolicyDescriptorType struct {
|
||||||
noSmithyDocumentSerde
|
noSmithyDocumentSerde
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Reserved for future use.
|
||||||
|
type ProvidedContext struct {
|
||||||
|
|
||||||
|
// Reserved for future use.
|
||||||
|
ContextAssertion *string
|
||||||
|
|
||||||
|
// Reserved for future use.
|
||||||
|
ProviderArn *string
|
||||||
|
|
||||||
|
noSmithyDocumentSerde
|
||||||
|
}
|
||||||
|
|
||||||
// You can pass custom key-value pair attributes when you assume a role or
|
// You can pass custom key-value pair attributes when you assume a role or
|
||||||
// federate a user. These are called session tags. You can then use the session
|
// federate a user. These are called session tags. You can then use the session
|
||||||
// tags to control access to resources. For more information, see Tagging Amazon
|
// tags to control access to resources. For more information, see Tagging Amazon
|
||||||
|
|
139
vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
generated
vendored
139
vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
generated
vendored
|
@ -2660,6 +2660,9 @@ var awsPartition = partition{
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ap-south-1",
|
Region: "ap-south-1",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
endpointKey{
|
||||||
|
Region: "ap-south-2",
|
||||||
|
}: endpoint{},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ap-southeast-1",
|
Region: "ap-southeast-1",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
@ -2675,12 +2678,18 @@ var awsPartition = partition{
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "eu-central-1",
|
Region: "eu-central-1",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
endpointKey{
|
||||||
|
Region: "eu-central-2",
|
||||||
|
}: endpoint{},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "eu-north-1",
|
Region: "eu-north-1",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "eu-south-1",
|
Region: "eu-south-1",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
endpointKey{
|
||||||
|
Region: "eu-south-2",
|
||||||
|
}: endpoint{},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "eu-west-1",
|
Region: "eu-west-1",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
@ -2690,6 +2699,9 @@ var awsPartition = partition{
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "eu-west-3",
|
Region: "eu-west-3",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
endpointKey{
|
||||||
|
Region: "me-central-1",
|
||||||
|
}: endpoint{},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "me-south-1",
|
Region: "me-south-1",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
@ -25178,7 +25190,7 @@ var awsPartition = partition{
|
||||||
Region: "af-south-1",
|
Region: "af-south-1",
|
||||||
Variant: dualStackVariant,
|
Variant: dualStackVariant,
|
||||||
}: endpoint{
|
}: endpoint{
|
||||||
Hostname: "servicediscovery.af-south-1.amazonaws.com",
|
Hostname: "servicediscovery.af-south-1.api.aws",
|
||||||
},
|
},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ap-east-1",
|
Region: "ap-east-1",
|
||||||
|
@ -25187,7 +25199,7 @@ var awsPartition = partition{
|
||||||
Region: "ap-east-1",
|
Region: "ap-east-1",
|
||||||
Variant: dualStackVariant,
|
Variant: dualStackVariant,
|
||||||
}: endpoint{
|
}: endpoint{
|
||||||
Hostname: "servicediscovery.ap-east-1.amazonaws.com",
|
Hostname: "servicediscovery.ap-east-1.api.aws",
|
||||||
},
|
},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ap-northeast-1",
|
Region: "ap-northeast-1",
|
||||||
|
@ -25196,7 +25208,7 @@ var awsPartition = partition{
|
||||||
Region: "ap-northeast-1",
|
Region: "ap-northeast-1",
|
||||||
Variant: dualStackVariant,
|
Variant: dualStackVariant,
|
||||||
}: endpoint{
|
}: endpoint{
|
||||||
Hostname: "servicediscovery.ap-northeast-1.amazonaws.com",
|
Hostname: "servicediscovery.ap-northeast-1.api.aws",
|
||||||
},
|
},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ap-northeast-2",
|
Region: "ap-northeast-2",
|
||||||
|
@ -25205,7 +25217,7 @@ var awsPartition = partition{
|
||||||
Region: "ap-northeast-2",
|
Region: "ap-northeast-2",
|
||||||
Variant: dualStackVariant,
|
Variant: dualStackVariant,
|
||||||
}: endpoint{
|
}: endpoint{
|
||||||
Hostname: "servicediscovery.ap-northeast-2.amazonaws.com",
|
Hostname: "servicediscovery.ap-northeast-2.api.aws",
|
||||||
},
|
},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ap-northeast-3",
|
Region: "ap-northeast-3",
|
||||||
|
@ -25214,7 +25226,7 @@ var awsPartition = partition{
|
||||||
Region: "ap-northeast-3",
|
Region: "ap-northeast-3",
|
||||||
Variant: dualStackVariant,
|
Variant: dualStackVariant,
|
||||||
}: endpoint{
|
}: endpoint{
|
||||||
Hostname: "servicediscovery.ap-northeast-3.amazonaws.com",
|
Hostname: "servicediscovery.ap-northeast-3.api.aws",
|
||||||
},
|
},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ap-south-1",
|
Region: "ap-south-1",
|
||||||
|
@ -25223,7 +25235,7 @@ var awsPartition = partition{
|
||||||
Region: "ap-south-1",
|
Region: "ap-south-1",
|
||||||
Variant: dualStackVariant,
|
Variant: dualStackVariant,
|
||||||
}: endpoint{
|
}: endpoint{
|
||||||
Hostname: "servicediscovery.ap-south-1.amazonaws.com",
|
Hostname: "servicediscovery.ap-south-1.api.aws",
|
||||||
},
|
},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ap-south-2",
|
Region: "ap-south-2",
|
||||||
|
@ -25232,7 +25244,7 @@ var awsPartition = partition{
|
||||||
Region: "ap-south-2",
|
Region: "ap-south-2",
|
||||||
Variant: dualStackVariant,
|
Variant: dualStackVariant,
|
||||||
}: endpoint{
|
}: endpoint{
|
||||||
Hostname: "servicediscovery.ap-south-2.amazonaws.com",
|
Hostname: "servicediscovery.ap-south-2.api.aws",
|
||||||
},
|
},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ap-southeast-1",
|
Region: "ap-southeast-1",
|
||||||
|
@ -25241,7 +25253,7 @@ var awsPartition = partition{
|
||||||
Region: "ap-southeast-1",
|
Region: "ap-southeast-1",
|
||||||
Variant: dualStackVariant,
|
Variant: dualStackVariant,
|
||||||
}: endpoint{
|
}: endpoint{
|
||||||
Hostname: "servicediscovery.ap-southeast-1.amazonaws.com",
|
Hostname: "servicediscovery.ap-southeast-1.api.aws",
|
||||||
},
|
},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ap-southeast-2",
|
Region: "ap-southeast-2",
|
||||||
|
@ -25250,7 +25262,7 @@ var awsPartition = partition{
|
||||||
Region: "ap-southeast-2",
|
Region: "ap-southeast-2",
|
||||||
Variant: dualStackVariant,
|
Variant: dualStackVariant,
|
||||||
}: endpoint{
|
}: endpoint{
|
||||||
Hostname: "servicediscovery.ap-southeast-2.amazonaws.com",
|
Hostname: "servicediscovery.ap-southeast-2.api.aws",
|
||||||
},
|
},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ap-southeast-3",
|
Region: "ap-southeast-3",
|
||||||
|
@ -25259,7 +25271,7 @@ var awsPartition = partition{
|
||||||
Region: "ap-southeast-3",
|
Region: "ap-southeast-3",
|
||||||
Variant: dualStackVariant,
|
Variant: dualStackVariant,
|
||||||
}: endpoint{
|
}: endpoint{
|
||||||
Hostname: "servicediscovery.ap-southeast-3.amazonaws.com",
|
Hostname: "servicediscovery.ap-southeast-3.api.aws",
|
||||||
},
|
},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ap-southeast-4",
|
Region: "ap-southeast-4",
|
||||||
|
@ -25268,7 +25280,7 @@ var awsPartition = partition{
|
||||||
Region: "ap-southeast-4",
|
Region: "ap-southeast-4",
|
||||||
Variant: dualStackVariant,
|
Variant: dualStackVariant,
|
||||||
}: endpoint{
|
}: endpoint{
|
||||||
Hostname: "servicediscovery.ap-southeast-4.amazonaws.com",
|
Hostname: "servicediscovery.ap-southeast-4.api.aws",
|
||||||
},
|
},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ca-central-1",
|
Region: "ca-central-1",
|
||||||
|
@ -25277,7 +25289,7 @@ var awsPartition = partition{
|
||||||
Region: "ca-central-1",
|
Region: "ca-central-1",
|
||||||
Variant: dualStackVariant,
|
Variant: dualStackVariant,
|
||||||
}: endpoint{
|
}: endpoint{
|
||||||
Hostname: "servicediscovery.ca-central-1.amazonaws.com",
|
Hostname: "servicediscovery.ca-central-1.api.aws",
|
||||||
},
|
},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ca-central-1",
|
Region: "ca-central-1",
|
||||||
|
@ -25285,6 +25297,12 @@ var awsPartition = partition{
|
||||||
}: endpoint{
|
}: endpoint{
|
||||||
Hostname: "servicediscovery-fips.ca-central-1.amazonaws.com",
|
Hostname: "servicediscovery-fips.ca-central-1.amazonaws.com",
|
||||||
},
|
},
|
||||||
|
endpointKey{
|
||||||
|
Region: "ca-central-1",
|
||||||
|
Variant: fipsVariant | dualStackVariant,
|
||||||
|
}: endpoint{
|
||||||
|
Hostname: "servicediscovery-fips.ca-central-1.api.aws",
|
||||||
|
},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ca-central-1-fips",
|
Region: "ca-central-1-fips",
|
||||||
}: endpoint{
|
}: endpoint{
|
||||||
|
@ -25301,7 +25319,7 @@ var awsPartition = partition{
|
||||||
Region: "eu-central-1",
|
Region: "eu-central-1",
|
||||||
Variant: dualStackVariant,
|
Variant: dualStackVariant,
|
||||||
}: endpoint{
|
}: endpoint{
|
||||||
Hostname: "servicediscovery.eu-central-1.amazonaws.com",
|
Hostname: "servicediscovery.eu-central-1.api.aws",
|
||||||
},
|
},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "eu-central-2",
|
Region: "eu-central-2",
|
||||||
|
@ -25310,7 +25328,7 @@ var awsPartition = partition{
|
||||||
Region: "eu-central-2",
|
Region: "eu-central-2",
|
||||||
Variant: dualStackVariant,
|
Variant: dualStackVariant,
|
||||||
}: endpoint{
|
}: endpoint{
|
||||||
Hostname: "servicediscovery.eu-central-2.amazonaws.com",
|
Hostname: "servicediscovery.eu-central-2.api.aws",
|
||||||
},
|
},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "eu-north-1",
|
Region: "eu-north-1",
|
||||||
|
@ -25319,7 +25337,7 @@ var awsPartition = partition{
|
||||||
Region: "eu-north-1",
|
Region: "eu-north-1",
|
||||||
Variant: dualStackVariant,
|
Variant: dualStackVariant,
|
||||||
}: endpoint{
|
}: endpoint{
|
||||||
Hostname: "servicediscovery.eu-north-1.amazonaws.com",
|
Hostname: "servicediscovery.eu-north-1.api.aws",
|
||||||
},
|
},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "eu-south-1",
|
Region: "eu-south-1",
|
||||||
|
@ -25328,7 +25346,7 @@ var awsPartition = partition{
|
||||||
Region: "eu-south-1",
|
Region: "eu-south-1",
|
||||||
Variant: dualStackVariant,
|
Variant: dualStackVariant,
|
||||||
}: endpoint{
|
}: endpoint{
|
||||||
Hostname: "servicediscovery.eu-south-1.amazonaws.com",
|
Hostname: "servicediscovery.eu-south-1.api.aws",
|
||||||
},
|
},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "eu-south-2",
|
Region: "eu-south-2",
|
||||||
|
@ -25337,7 +25355,7 @@ var awsPartition = partition{
|
||||||
Region: "eu-south-2",
|
Region: "eu-south-2",
|
||||||
Variant: dualStackVariant,
|
Variant: dualStackVariant,
|
||||||
}: endpoint{
|
}: endpoint{
|
||||||
Hostname: "servicediscovery.eu-south-2.amazonaws.com",
|
Hostname: "servicediscovery.eu-south-2.api.aws",
|
||||||
},
|
},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "eu-west-1",
|
Region: "eu-west-1",
|
||||||
|
@ -25346,7 +25364,7 @@ var awsPartition = partition{
|
||||||
Region: "eu-west-1",
|
Region: "eu-west-1",
|
||||||
Variant: dualStackVariant,
|
Variant: dualStackVariant,
|
||||||
}: endpoint{
|
}: endpoint{
|
||||||
Hostname: "servicediscovery.eu-west-1.amazonaws.com",
|
Hostname: "servicediscovery.eu-west-1.api.aws",
|
||||||
},
|
},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "eu-west-2",
|
Region: "eu-west-2",
|
||||||
|
@ -25355,7 +25373,7 @@ var awsPartition = partition{
|
||||||
Region: "eu-west-2",
|
Region: "eu-west-2",
|
||||||
Variant: dualStackVariant,
|
Variant: dualStackVariant,
|
||||||
}: endpoint{
|
}: endpoint{
|
||||||
Hostname: "servicediscovery.eu-west-2.amazonaws.com",
|
Hostname: "servicediscovery.eu-west-2.api.aws",
|
||||||
},
|
},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "eu-west-3",
|
Region: "eu-west-3",
|
||||||
|
@ -25364,7 +25382,7 @@ var awsPartition = partition{
|
||||||
Region: "eu-west-3",
|
Region: "eu-west-3",
|
||||||
Variant: dualStackVariant,
|
Variant: dualStackVariant,
|
||||||
}: endpoint{
|
}: endpoint{
|
||||||
Hostname: "servicediscovery.eu-west-3.amazonaws.com",
|
Hostname: "servicediscovery.eu-west-3.api.aws",
|
||||||
},
|
},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "me-central-1",
|
Region: "me-central-1",
|
||||||
|
@ -25373,7 +25391,7 @@ var awsPartition = partition{
|
||||||
Region: "me-central-1",
|
Region: "me-central-1",
|
||||||
Variant: dualStackVariant,
|
Variant: dualStackVariant,
|
||||||
}: endpoint{
|
}: endpoint{
|
||||||
Hostname: "servicediscovery.me-central-1.amazonaws.com",
|
Hostname: "servicediscovery.me-central-1.api.aws",
|
||||||
},
|
},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "me-south-1",
|
Region: "me-south-1",
|
||||||
|
@ -25382,7 +25400,7 @@ var awsPartition = partition{
|
||||||
Region: "me-south-1",
|
Region: "me-south-1",
|
||||||
Variant: dualStackVariant,
|
Variant: dualStackVariant,
|
||||||
}: endpoint{
|
}: endpoint{
|
||||||
Hostname: "servicediscovery.me-south-1.amazonaws.com",
|
Hostname: "servicediscovery.me-south-1.api.aws",
|
||||||
},
|
},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "sa-east-1",
|
Region: "sa-east-1",
|
||||||
|
@ -25391,7 +25409,7 @@ var awsPartition = partition{
|
||||||
Region: "sa-east-1",
|
Region: "sa-east-1",
|
||||||
Variant: dualStackVariant,
|
Variant: dualStackVariant,
|
||||||
}: endpoint{
|
}: endpoint{
|
||||||
Hostname: "servicediscovery.sa-east-1.amazonaws.com",
|
Hostname: "servicediscovery.sa-east-1.api.aws",
|
||||||
},
|
},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "us-east-1",
|
Region: "us-east-1",
|
||||||
|
@ -25400,7 +25418,7 @@ var awsPartition = partition{
|
||||||
Region: "us-east-1",
|
Region: "us-east-1",
|
||||||
Variant: dualStackVariant,
|
Variant: dualStackVariant,
|
||||||
}: endpoint{
|
}: endpoint{
|
||||||
Hostname: "servicediscovery.us-east-1.amazonaws.com",
|
Hostname: "servicediscovery.us-east-1.api.aws",
|
||||||
},
|
},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "us-east-1",
|
Region: "us-east-1",
|
||||||
|
@ -25408,6 +25426,12 @@ var awsPartition = partition{
|
||||||
}: endpoint{
|
}: endpoint{
|
||||||
Hostname: "servicediscovery-fips.us-east-1.amazonaws.com",
|
Hostname: "servicediscovery-fips.us-east-1.amazonaws.com",
|
||||||
},
|
},
|
||||||
|
endpointKey{
|
||||||
|
Region: "us-east-1",
|
||||||
|
Variant: fipsVariant | dualStackVariant,
|
||||||
|
}: endpoint{
|
||||||
|
Hostname: "servicediscovery-fips.us-east-1.api.aws",
|
||||||
|
},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "us-east-1-fips",
|
Region: "us-east-1-fips",
|
||||||
}: endpoint{
|
}: endpoint{
|
||||||
|
@ -25424,7 +25448,7 @@ var awsPartition = partition{
|
||||||
Region: "us-east-2",
|
Region: "us-east-2",
|
||||||
Variant: dualStackVariant,
|
Variant: dualStackVariant,
|
||||||
}: endpoint{
|
}: endpoint{
|
||||||
Hostname: "servicediscovery.us-east-2.amazonaws.com",
|
Hostname: "servicediscovery.us-east-2.api.aws",
|
||||||
},
|
},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "us-east-2",
|
Region: "us-east-2",
|
||||||
|
@ -25432,6 +25456,12 @@ var awsPartition = partition{
|
||||||
}: endpoint{
|
}: endpoint{
|
||||||
Hostname: "servicediscovery-fips.us-east-2.amazonaws.com",
|
Hostname: "servicediscovery-fips.us-east-2.amazonaws.com",
|
||||||
},
|
},
|
||||||
|
endpointKey{
|
||||||
|
Region: "us-east-2",
|
||||||
|
Variant: fipsVariant | dualStackVariant,
|
||||||
|
}: endpoint{
|
||||||
|
Hostname: "servicediscovery-fips.us-east-2.api.aws",
|
||||||
|
},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "us-east-2-fips",
|
Region: "us-east-2-fips",
|
||||||
}: endpoint{
|
}: endpoint{
|
||||||
|
@ -25448,7 +25478,7 @@ var awsPartition = partition{
|
||||||
Region: "us-west-1",
|
Region: "us-west-1",
|
||||||
Variant: dualStackVariant,
|
Variant: dualStackVariant,
|
||||||
}: endpoint{
|
}: endpoint{
|
||||||
Hostname: "servicediscovery.us-west-1.amazonaws.com",
|
Hostname: "servicediscovery.us-west-1.api.aws",
|
||||||
},
|
},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "us-west-1",
|
Region: "us-west-1",
|
||||||
|
@ -25456,6 +25486,12 @@ var awsPartition = partition{
|
||||||
}: endpoint{
|
}: endpoint{
|
||||||
Hostname: "servicediscovery-fips.us-west-1.amazonaws.com",
|
Hostname: "servicediscovery-fips.us-west-1.amazonaws.com",
|
||||||
},
|
},
|
||||||
|
endpointKey{
|
||||||
|
Region: "us-west-1",
|
||||||
|
Variant: fipsVariant | dualStackVariant,
|
||||||
|
}: endpoint{
|
||||||
|
Hostname: "servicediscovery-fips.us-west-1.api.aws",
|
||||||
|
},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "us-west-1-fips",
|
Region: "us-west-1-fips",
|
||||||
}: endpoint{
|
}: endpoint{
|
||||||
|
@ -25472,7 +25508,7 @@ var awsPartition = partition{
|
||||||
Region: "us-west-2",
|
Region: "us-west-2",
|
||||||
Variant: dualStackVariant,
|
Variant: dualStackVariant,
|
||||||
}: endpoint{
|
}: endpoint{
|
||||||
Hostname: "servicediscovery.us-west-2.amazonaws.com",
|
Hostname: "servicediscovery.us-west-2.api.aws",
|
||||||
},
|
},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "us-west-2",
|
Region: "us-west-2",
|
||||||
|
@ -25480,6 +25516,12 @@ var awsPartition = partition{
|
||||||
}: endpoint{
|
}: endpoint{
|
||||||
Hostname: "servicediscovery-fips.us-west-2.amazonaws.com",
|
Hostname: "servicediscovery-fips.us-west-2.amazonaws.com",
|
||||||
},
|
},
|
||||||
|
endpointKey{
|
||||||
|
Region: "us-west-2",
|
||||||
|
Variant: fipsVariant | dualStackVariant,
|
||||||
|
}: endpoint{
|
||||||
|
Hostname: "servicediscovery-fips.us-west-2.api.aws",
|
||||||
|
},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "us-west-2-fips",
|
Region: "us-west-2-fips",
|
||||||
}: endpoint{
|
}: endpoint{
|
||||||
|
@ -32598,11 +32640,18 @@ var awscnPartition = partition{
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
"savingsplans": service{
|
"savingsplans": service{
|
||||||
PartitionEndpoint: "aws-cn",
|
IsRegionalized: boxedTrue,
|
||||||
IsRegionalized: boxedFalse,
|
|
||||||
Endpoints: serviceEndpoints{
|
Endpoints: serviceEndpoints{
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "aws-cn",
|
Region: "cn-north-1",
|
||||||
|
}: endpoint{
|
||||||
|
Hostname: "savingsplans.cn-north-1.amazonaws.com.cn",
|
||||||
|
CredentialScope: credentialScope{
|
||||||
|
Region: "cn-north-1",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
endpointKey{
|
||||||
|
Region: "cn-northwest-1",
|
||||||
}: endpoint{
|
}: endpoint{
|
||||||
Hostname: "savingsplans.cn-northwest-1.amazonaws.com.cn",
|
Hostname: "savingsplans.cn-northwest-1.amazonaws.com.cn",
|
||||||
CredentialScope: credentialScope{
|
CredentialScope: credentialScope{
|
||||||
|
@ -32669,7 +32718,7 @@ var awscnPartition = partition{
|
||||||
Region: "cn-north-1",
|
Region: "cn-north-1",
|
||||||
Variant: dualStackVariant,
|
Variant: dualStackVariant,
|
||||||
}: endpoint{
|
}: endpoint{
|
||||||
Hostname: "servicediscovery.cn-north-1.amazonaws.com.cn",
|
Hostname: "servicediscovery.cn-north-1.api.amazonwebservices.com.cn",
|
||||||
},
|
},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "cn-northwest-1",
|
Region: "cn-northwest-1",
|
||||||
|
@ -32678,7 +32727,7 @@ var awscnPartition = partition{
|
||||||
Region: "cn-northwest-1",
|
Region: "cn-northwest-1",
|
||||||
Variant: dualStackVariant,
|
Variant: dualStackVariant,
|
||||||
}: endpoint{
|
}: endpoint{
|
||||||
Hostname: "servicediscovery.cn-northwest-1.amazonaws.com.cn",
|
Hostname: "servicediscovery.cn-northwest-1.api.amazonwebservices.com.cn",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -37848,20 +37897,6 @@ var awsusgovPartition = partition{
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
"savingsplans": service{
|
|
||||||
PartitionEndpoint: "aws-us-gov-global",
|
|
||||||
IsRegionalized: boxedFalse,
|
|
||||||
Endpoints: serviceEndpoints{
|
|
||||||
endpointKey{
|
|
||||||
Region: "aws-us-gov-global",
|
|
||||||
}: endpoint{
|
|
||||||
Hostname: "savingsplans.amazonaws.com",
|
|
||||||
CredentialScope: credentialScope{
|
|
||||||
Region: "us-gov-west-1",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
"secretsmanager": service{
|
"secretsmanager": service{
|
||||||
Endpoints: serviceEndpoints{
|
Endpoints: serviceEndpoints{
|
||||||
endpointKey{
|
endpointKey{
|
||||||
|
@ -38095,6 +38130,12 @@ var awsusgovPartition = partition{
|
||||||
}: endpoint{
|
}: endpoint{
|
||||||
Hostname: "servicediscovery-fips.us-gov-east-1.amazonaws.com",
|
Hostname: "servicediscovery-fips.us-gov-east-1.amazonaws.com",
|
||||||
},
|
},
|
||||||
|
endpointKey{
|
||||||
|
Region: "us-gov-east-1",
|
||||||
|
Variant: fipsVariant | dualStackVariant,
|
||||||
|
}: endpoint{
|
||||||
|
Hostname: "servicediscovery-fips.us-gov-east-1.amazonaws.com",
|
||||||
|
},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "us-gov-east-1-fips",
|
Region: "us-gov-east-1-fips",
|
||||||
}: endpoint{
|
}: endpoint{
|
||||||
|
@ -38119,6 +38160,12 @@ var awsusgovPartition = partition{
|
||||||
}: endpoint{
|
}: endpoint{
|
||||||
Hostname: "servicediscovery-fips.us-gov-west-1.amazonaws.com",
|
Hostname: "servicediscovery-fips.us-gov-west-1.amazonaws.com",
|
||||||
},
|
},
|
||||||
|
endpointKey{
|
||||||
|
Region: "us-gov-west-1",
|
||||||
|
Variant: fipsVariant | dualStackVariant,
|
||||||
|
}: endpoint{
|
||||||
|
Hostname: "servicediscovery-fips.us-gov-west-1.amazonaws.com",
|
||||||
|
},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "us-gov-west-1-fips",
|
Region: "us-gov-west-1-fips",
|
||||||
}: endpoint{
|
}: endpoint{
|
||||||
|
|
2
vendor/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
|
@ -5,4 +5,4 @@ package aws
|
||||||
const SDKName = "aws-sdk-go"
|
const SDKName = "aws-sdk-go"
|
||||||
|
|
||||||
// SDKVersion is the version of this SDK
|
// SDKVersion is the version of this SDK
|
||||||
const SDKVersion = "1.44.302"
|
const SDKVersion = "1.44.309"
|
||||||
|
|
79
vendor/github.com/aws/aws-sdk-go/service/sts/api.go
generated
vendored
79
vendor/github.com/aws/aws-sdk-go/service/sts/api.go
generated
vendored
|
@ -1460,6 +1460,9 @@ type AssumeRoleInput struct {
|
||||||
// in the IAM User Guide.
|
// in the IAM User Guide.
|
||||||
PolicyArns []*PolicyDescriptorType `type:"list"`
|
PolicyArns []*PolicyDescriptorType `type:"list"`
|
||||||
|
|
||||||
|
// Reserved for future use.
|
||||||
|
ProvidedContexts []*ProvidedContext `type:"list"`
|
||||||
|
|
||||||
// The Amazon Resource Name (ARN) of the role to assume.
|
// The Amazon Resource Name (ARN) of the role to assume.
|
||||||
//
|
//
|
||||||
// RoleArn is a required field
|
// RoleArn is a required field
|
||||||
|
@ -1633,6 +1636,16 @@ func (s *AssumeRoleInput) Validate() error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if s.ProvidedContexts != nil {
|
||||||
|
for i, v := range s.ProvidedContexts {
|
||||||
|
if v == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if err := v.Validate(); err != nil {
|
||||||
|
invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ProvidedContexts", i), err.(request.ErrInvalidParams))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
if s.Tags != nil {
|
if s.Tags != nil {
|
||||||
for i, v := range s.Tags {
|
for i, v := range s.Tags {
|
||||||
if v == nil {
|
if v == nil {
|
||||||
|
@ -1674,6 +1687,12 @@ func (s *AssumeRoleInput) SetPolicyArns(v []*PolicyDescriptorType) *AssumeRoleIn
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetProvidedContexts sets the ProvidedContexts field's value.
|
||||||
|
func (s *AssumeRoleInput) SetProvidedContexts(v []*ProvidedContext) *AssumeRoleInput {
|
||||||
|
s.ProvidedContexts = v
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
// SetRoleArn sets the RoleArn field's value.
|
// SetRoleArn sets the RoleArn field's value.
|
||||||
func (s *AssumeRoleInput) SetRoleArn(v string) *AssumeRoleInput {
|
func (s *AssumeRoleInput) SetRoleArn(v string) *AssumeRoleInput {
|
||||||
s.RoleArn = &v
|
s.RoleArn = &v
|
||||||
|
@ -2266,7 +2285,8 @@ type AssumeRoleWithWebIdentityInput struct {
|
||||||
// The OAuth 2.0 access token or OpenID Connect ID token that is provided by
|
// The OAuth 2.0 access token or OpenID Connect ID token that is provided by
|
||||||
// the identity provider. Your application must get this token by authenticating
|
// the identity provider. Your application must get this token by authenticating
|
||||||
// the user who is using your application with a web identity provider before
|
// the user who is using your application with a web identity provider before
|
||||||
// the application makes an AssumeRoleWithWebIdentity call.
|
// the application makes an AssumeRoleWithWebIdentity call. Only tokens with
|
||||||
|
// RSA algorithms (RS256) are supported.
|
||||||
//
|
//
|
||||||
// WebIdentityToken is a sensitive parameter and its value will be
|
// WebIdentityToken is a sensitive parameter and its value will be
|
||||||
// replaced with "sensitive" in string returned by AssumeRoleWithWebIdentityInput's
|
// replaced with "sensitive" in string returned by AssumeRoleWithWebIdentityInput's
|
||||||
|
@ -3385,6 +3405,63 @@ func (s *PolicyDescriptorType) SetArn(v string) *PolicyDescriptorType {
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Reserved for future use.
|
||||||
|
type ProvidedContext struct {
|
||||||
|
_ struct{} `type:"structure"`
|
||||||
|
|
||||||
|
// Reserved for future use.
|
||||||
|
ContextAssertion *string `min:"4" type:"string"`
|
||||||
|
|
||||||
|
// Reserved for future use.
|
||||||
|
ProviderArn *string `min:"20" type:"string"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns the string representation.
|
||||||
|
//
|
||||||
|
// API parameter values that are decorated as "sensitive" in the API will not
|
||||||
|
// be included in the string output. The member name will be present, but the
|
||||||
|
// value will be replaced with "sensitive".
|
||||||
|
func (s ProvidedContext) String() string {
|
||||||
|
return awsutil.Prettify(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GoString returns the string representation.
|
||||||
|
//
|
||||||
|
// API parameter values that are decorated as "sensitive" in the API will not
|
||||||
|
// be included in the string output. The member name will be present, but the
|
||||||
|
// value will be replaced with "sensitive".
|
||||||
|
func (s ProvidedContext) GoString() string {
|
||||||
|
return s.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate inspects the fields of the type to determine if they are valid.
|
||||||
|
func (s *ProvidedContext) Validate() error {
|
||||||
|
invalidParams := request.ErrInvalidParams{Context: "ProvidedContext"}
|
||||||
|
if s.ContextAssertion != nil && len(*s.ContextAssertion) < 4 {
|
||||||
|
invalidParams.Add(request.NewErrParamMinLen("ContextAssertion", 4))
|
||||||
|
}
|
||||||
|
if s.ProviderArn != nil && len(*s.ProviderArn) < 20 {
|
||||||
|
invalidParams.Add(request.NewErrParamMinLen("ProviderArn", 20))
|
||||||
|
}
|
||||||
|
|
||||||
|
if invalidParams.Len() > 0 {
|
||||||
|
return invalidParams
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetContextAssertion sets the ContextAssertion field's value.
|
||||||
|
func (s *ProvidedContext) SetContextAssertion(v string) *ProvidedContext {
|
||||||
|
s.ContextAssertion = &v
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetProviderArn sets the ProviderArn field's value.
|
||||||
|
func (s *ProvidedContext) SetProviderArn(v string) *ProvidedContext {
|
||||||
|
s.ProviderArn = &v
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
// You can pass custom key-value pair attributes when you assume a role or federate
|
// You can pass custom key-value pair attributes when you assume a role or federate
|
||||||
// a user. These are called session tags. You can then use the session tags
|
// a user. These are called session tags. You can then use the session tags
|
||||||
// to control access to resources. For more information, see Tagging Amazon
|
// to control access to resources. For more information, see Tagging Amazon
|
||||||
|
|
8
vendor/github.com/prometheus/procfs/Makefile.common
generated
vendored
8
vendor/github.com/prometheus/procfs/Makefile.common
generated
vendored
|
@ -49,19 +49,19 @@ endif
|
||||||
GOTEST := $(GO) test
|
GOTEST := $(GO) test
|
||||||
GOTEST_DIR :=
|
GOTEST_DIR :=
|
||||||
ifneq ($(CIRCLE_JOB),)
|
ifneq ($(CIRCLE_JOB),)
|
||||||
ifneq ($(shell which gotestsum),)
|
ifneq ($(shell command -v gotestsum > /dev/null),)
|
||||||
GOTEST_DIR := test-results
|
GOTEST_DIR := test-results
|
||||||
GOTEST := gotestsum --junitfile $(GOTEST_DIR)/unit-tests.xml --
|
GOTEST := gotestsum --junitfile $(GOTEST_DIR)/unit-tests.xml --
|
||||||
endif
|
endif
|
||||||
endif
|
endif
|
||||||
|
|
||||||
PROMU_VERSION ?= 0.14.0
|
PROMU_VERSION ?= 0.15.0
|
||||||
PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz
|
PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz
|
||||||
|
|
||||||
SKIP_GOLANGCI_LINT :=
|
SKIP_GOLANGCI_LINT :=
|
||||||
GOLANGCI_LINT :=
|
GOLANGCI_LINT :=
|
||||||
GOLANGCI_LINT_OPTS ?=
|
GOLANGCI_LINT_OPTS ?=
|
||||||
GOLANGCI_LINT_VERSION ?= v1.51.2
|
GOLANGCI_LINT_VERSION ?= v1.53.3
|
||||||
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64.
|
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64.
|
||||||
# windows isn't included here because of the path separator being different.
|
# windows isn't included here because of the path separator being different.
|
||||||
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
|
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
|
||||||
|
@ -178,7 +178,7 @@ endif
|
||||||
.PHONY: common-yamllint
|
.PHONY: common-yamllint
|
||||||
common-yamllint:
|
common-yamllint:
|
||||||
@echo ">> running yamllint on all YAML files in the repository"
|
@echo ">> running yamllint on all YAML files in the repository"
|
||||||
ifeq (, $(shell which yamllint))
|
ifeq (, $(shell command -v yamllint > /dev/null))
|
||||||
@echo "yamllint not installed so skipping"
|
@echo "yamllint not installed so skipping"
|
||||||
else
|
else
|
||||||
yamllint .
|
yamllint .
|
||||||
|
|
8
vendor/github.com/prometheus/procfs/fs.go
generated
vendored
8
vendor/github.com/prometheus/procfs/fs.go
generated
vendored
|
@ -20,8 +20,8 @@ import (
|
||||||
// FS represents the pseudo-filesystem sys, which provides an interface to
|
// FS represents the pseudo-filesystem sys, which provides an interface to
|
||||||
// kernel data structures.
|
// kernel data structures.
|
||||||
type FS struct {
|
type FS struct {
|
||||||
proc fs.FS
|
proc fs.FS
|
||||||
real bool
|
isReal bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// DefaultMountPoint is the common mount point of the proc filesystem.
|
// DefaultMountPoint is the common mount point of the proc filesystem.
|
||||||
|
@ -41,10 +41,10 @@ func NewFS(mountPoint string) (FS, error) {
|
||||||
return FS{}, err
|
return FS{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
real, err := isRealProc(mountPoint)
|
isReal, err := isRealProc(mountPoint)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return FS{}, err
|
return FS{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return FS{fs, real}, nil
|
return FS{fs, isReal}, nil
|
||||||
}
|
}
|
||||||
|
|
4
vendor/github.com/prometheus/procfs/fs_statfs_notype.go
generated
vendored
4
vendor/github.com/prometheus/procfs/fs_statfs_notype.go
generated
vendored
|
@ -11,8 +11,8 @@
|
||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
//go:build netbsd || openbsd || solaris || windows
|
//go:build netbsd || openbsd || solaris || windows || nostatfs
|
||||||
// +build netbsd openbsd solaris windows
|
// +build netbsd openbsd solaris windows nostatfs
|
||||||
|
|
||||||
package procfs
|
package procfs
|
||||||
|
|
||||||
|
|
4
vendor/github.com/prometheus/procfs/fs_statfs_type.go
generated
vendored
4
vendor/github.com/prometheus/procfs/fs_statfs_type.go
generated
vendored
|
@ -11,8 +11,8 @@
|
||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
//go:build !netbsd && !openbsd && !solaris && !windows
|
//go:build !netbsd && !openbsd && !solaris && !windows && !nostatfs
|
||||||
// +build !netbsd,!openbsd,!solaris,!windows
|
// +build !netbsd,!openbsd,!solaris,!windows,!nostatfs
|
||||||
|
|
||||||
package procfs
|
package procfs
|
||||||
|
|
||||||
|
|
2
vendor/github.com/prometheus/procfs/proc.go
generated
vendored
2
vendor/github.com/prometheus/procfs/proc.go
generated
vendored
|
@ -244,7 +244,7 @@ func (p Proc) FileDescriptorTargets() ([]string, error) {
|
||||||
// a process.
|
// a process.
|
||||||
func (p Proc) FileDescriptorsLen() (int, error) {
|
func (p Proc) FileDescriptorsLen() (int, error) {
|
||||||
// Use fast path if available (Linux v6.2): https://github.com/torvalds/linux/commit/f1f1f2569901
|
// Use fast path if available (Linux v6.2): https://github.com/torvalds/linux/commit/f1f1f2569901
|
||||||
if p.fs.real {
|
if p.fs.isReal {
|
||||||
stat, err := os.Stat(p.path("fd"))
|
stat, err := os.Stat(p.path("fd"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
|
|
4
vendor/github.com/prometheus/procfs/proc_psi.go
generated
vendored
4
vendor/github.com/prometheus/procfs/proc_psi.go
generated
vendored
|
@ -64,11 +64,11 @@ func (fs FS) PSIStatsForResource(resource string) (PSIStats, error) {
|
||||||
return PSIStats{}, fmt.Errorf("%s: psi_stats: unavailable for %q: %w", ErrFileRead, resource, err)
|
return PSIStats{}, fmt.Errorf("%s: psi_stats: unavailable for %q: %w", ErrFileRead, resource, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return parsePSIStats(resource, bytes.NewReader(data))
|
return parsePSIStats(bytes.NewReader(data))
|
||||||
}
|
}
|
||||||
|
|
||||||
// parsePSIStats parses the specified file for pressure stall information.
|
// parsePSIStats parses the specified file for pressure stall information.
|
||||||
func parsePSIStats(resource string, r io.Reader) (PSIStats, error) {
|
func parsePSIStats(r io.Reader) (PSIStats, error) {
|
||||||
psiStats := PSIStats{}
|
psiStats := PSIStats{}
|
||||||
|
|
||||||
scanner := bufio.NewScanner(r)
|
scanner := bufio.NewScanner(r)
|
||||||
|
|
4
vendor/github.com/prometheus/procfs/proc_smaps.go
generated
vendored
4
vendor/github.com/prometheus/procfs/proc_smaps.go
generated
vendored
|
@ -135,12 +135,12 @@ func (s *ProcSMapsRollup) parseLine(line string) error {
|
||||||
}
|
}
|
||||||
vBytes := vKBytes * 1024
|
vBytes := vKBytes * 1024
|
||||||
|
|
||||||
s.addValue(k, v, vKBytes, vBytes)
|
s.addValue(k, vBytes)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *ProcSMapsRollup) addValue(k string, vString string, vUint uint64, vUintBytes uint64) {
|
func (s *ProcSMapsRollup) addValue(k string, vUintBytes uint64) {
|
||||||
switch k {
|
switch k {
|
||||||
case "Rss":
|
case "Rss":
|
||||||
s.Rss += vUintBytes
|
s.Rss += vUintBytes
|
||||||
|
|
4
vendor/github.com/prometheus/procfs/stat.go
generated
vendored
4
vendor/github.com/prometheus/procfs/stat.go
generated
vendored
|
@ -187,6 +187,10 @@ func parseStat(r io.Reader, fileName string) (Stat, error) {
|
||||||
err error
|
err error
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Increase default scanner buffer to handle very long `intr` lines.
|
||||||
|
buf := make([]byte, 0, 8*1024)
|
||||||
|
scanner.Buffer(buf, 1024*1024)
|
||||||
|
|
||||||
for scanner.Scan() {
|
for scanner.Scan() {
|
||||||
line := scanner.Text()
|
line := scanner.Text()
|
||||||
parts := strings.Fields(scanner.Text())
|
parts := strings.Fields(scanner.Text())
|
||||||
|
|
6
vendor/github.com/prometheus/procfs/thread.go
generated
vendored
6
vendor/github.com/prometheus/procfs/thread.go
generated
vendored
|
@ -55,7 +55,7 @@ func (fs FS) AllThreads(pid int) (Procs, error) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
t = append(t, Proc{PID: int(tid), fs: FS{fsi.FS(taskPath), fs.real}})
|
t = append(t, Proc{PID: int(tid), fs: FS{fsi.FS(taskPath), fs.isReal}})
|
||||||
}
|
}
|
||||||
|
|
||||||
return t, nil
|
return t, nil
|
||||||
|
@ -67,12 +67,12 @@ func (fs FS) Thread(pid, tid int) (Proc, error) {
|
||||||
if _, err := os.Stat(taskPath); err != nil {
|
if _, err := os.Stat(taskPath); err != nil {
|
||||||
return Proc{}, err
|
return Proc{}, err
|
||||||
}
|
}
|
||||||
return Proc{PID: tid, fs: FS{fsi.FS(taskPath), fs.real}}, nil
|
return Proc{PID: tid, fs: FS{fsi.FS(taskPath), fs.isReal}}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Thread returns a process for a given TID of Proc.
|
// Thread returns a process for a given TID of Proc.
|
||||||
func (proc Proc) Thread(tid int) (Proc, error) {
|
func (proc Proc) Thread(tid int) (Proc, error) {
|
||||||
tfs := FS{fsi.FS(proc.path("task")), proc.fs.real}
|
tfs := FS{fsi.FS(proc.path("task")), proc.fs.isReal}
|
||||||
if _, err := os.Stat(tfs.proc.Path(strconv.Itoa(tid))); err != nil {
|
if _, err := os.Stat(tfs.proc.Path(strconv.Itoa(tid))); err != nil {
|
||||||
return Proc{}, err
|
return Proc{}, err
|
||||||
}
|
}
|
||||||
|
|
46
vendor/github.com/prometheus/prometheus/model/histogram/float_histogram.go
generated
vendored
46
vendor/github.com/prometheus/prometheus/model/histogram/float_histogram.go
generated
vendored
|
@ -421,7 +421,7 @@ func addBucket(
|
||||||
// receiving histogram, but a pointer to it is returned for convenience.
|
// receiving histogram, but a pointer to it is returned for convenience.
|
||||||
//
|
//
|
||||||
// The ideal value for maxEmptyBuckets depends on circumstances. The motivation
|
// The ideal value for maxEmptyBuckets depends on circumstances. The motivation
|
||||||
// to set maxEmptyBuckets > 0 is the assumption that is is less overhead to
|
// to set maxEmptyBuckets > 0 is the assumption that is less overhead to
|
||||||
// represent very few empty buckets explicitly within one span than cutting the
|
// represent very few empty buckets explicitly within one span than cutting the
|
||||||
// one span into two to treat the empty buckets as a gap between the two spans,
|
// one span into two to treat the empty buckets as a gap between the two spans,
|
||||||
// both in terms of storage requirement as well as in terms of encoding and
|
// both in terms of storage requirement as well as in terms of encoding and
|
||||||
|
@ -615,10 +615,24 @@ func (h *FloatHistogram) NegativeReverseBucketIterator() BucketIterator[float64]
|
||||||
// set to the zero threshold.
|
// set to the zero threshold.
|
||||||
func (h *FloatHistogram) AllBucketIterator() BucketIterator[float64] {
|
func (h *FloatHistogram) AllBucketIterator() BucketIterator[float64] {
|
||||||
return &allFloatBucketIterator{
|
return &allFloatBucketIterator{
|
||||||
h: h,
|
h: h,
|
||||||
negIter: h.NegativeReverseBucketIterator(),
|
leftIter: h.NegativeReverseBucketIterator(),
|
||||||
posIter: h.PositiveBucketIterator(),
|
rightIter: h.PositiveBucketIterator(),
|
||||||
state: -1,
|
state: -1,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllReverseBucketIterator returns a BucketIterator to iterate over all negative,
|
||||||
|
// zero, and positive buckets in descending order (starting at the lowest bucket
|
||||||
|
// and going up). If the highest negative bucket or the lowest positive bucket
|
||||||
|
// overlap with the zero bucket, their upper or lower boundary, respectively, is
|
||||||
|
// set to the zero threshold.
|
||||||
|
func (h *FloatHistogram) AllReverseBucketIterator() BucketIterator[float64] {
|
||||||
|
return &allFloatBucketIterator{
|
||||||
|
h: h,
|
||||||
|
leftIter: h.PositiveReverseBucketIterator(),
|
||||||
|
rightIter: h.NegativeBucketIterator(),
|
||||||
|
state: -1,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -903,8 +917,8 @@ func (i *reverseFloatBucketIterator) Next() bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
type allFloatBucketIterator struct {
|
type allFloatBucketIterator struct {
|
||||||
h *FloatHistogram
|
h *FloatHistogram
|
||||||
negIter, posIter BucketIterator[float64]
|
leftIter, rightIter BucketIterator[float64]
|
||||||
// -1 means we are iterating negative buckets.
|
// -1 means we are iterating negative buckets.
|
||||||
// 0 means it is time for the zero bucket.
|
// 0 means it is time for the zero bucket.
|
||||||
// 1 means we are iterating positive buckets.
|
// 1 means we are iterating positive buckets.
|
||||||
|
@ -916,10 +930,13 @@ type allFloatBucketIterator struct {
|
||||||
func (i *allFloatBucketIterator) Next() bool {
|
func (i *allFloatBucketIterator) Next() bool {
|
||||||
switch i.state {
|
switch i.state {
|
||||||
case -1:
|
case -1:
|
||||||
if i.negIter.Next() {
|
if i.leftIter.Next() {
|
||||||
i.currBucket = i.negIter.At()
|
i.currBucket = i.leftIter.At()
|
||||||
if i.currBucket.Upper > -i.h.ZeroThreshold {
|
switch {
|
||||||
|
case i.currBucket.Upper < 0 && i.currBucket.Upper > -i.h.ZeroThreshold:
|
||||||
i.currBucket.Upper = -i.h.ZeroThreshold
|
i.currBucket.Upper = -i.h.ZeroThreshold
|
||||||
|
case i.currBucket.Lower > 0 && i.currBucket.Lower < i.h.ZeroThreshold:
|
||||||
|
i.currBucket.Lower = i.h.ZeroThreshold
|
||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
@ -940,10 +957,13 @@ func (i *allFloatBucketIterator) Next() bool {
|
||||||
}
|
}
|
||||||
return i.Next()
|
return i.Next()
|
||||||
case 1:
|
case 1:
|
||||||
if i.posIter.Next() {
|
if i.rightIter.Next() {
|
||||||
i.currBucket = i.posIter.At()
|
i.currBucket = i.rightIter.At()
|
||||||
if i.currBucket.Lower < i.h.ZeroThreshold {
|
switch {
|
||||||
|
case i.currBucket.Lower > 0 && i.currBucket.Lower < i.h.ZeroThreshold:
|
||||||
i.currBucket.Lower = i.h.ZeroThreshold
|
i.currBucket.Lower = i.h.ZeroThreshold
|
||||||
|
case i.currBucket.Upper < 0 && i.currBucket.Upper > -i.h.ZeroThreshold:
|
||||||
|
i.currBucket.Upper = -i.h.ZeroThreshold
|
||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
|
@ -273,13 +273,27 @@ func (ls Labels) Copy() Labels {
|
||||||
// Get returns the value for the label with the given name.
|
// Get returns the value for the label with the given name.
|
||||||
// Returns an empty string if the label doesn't exist.
|
// Returns an empty string if the label doesn't exist.
|
||||||
func (ls Labels) Get(name string) string {
|
func (ls Labels) Get(name string) string {
|
||||||
|
if name == "" { // Avoid crash in loop if someone asks for "".
|
||||||
|
return "" // Prometheus does not store blank label names.
|
||||||
|
}
|
||||||
for i := 0; i < len(ls.data); {
|
for i := 0; i < len(ls.data); {
|
||||||
var lName, lValue string
|
var size int
|
||||||
lName, i = decodeString(ls.data, i)
|
size, i = decodeSize(ls.data, i)
|
||||||
lValue, i = decodeString(ls.data, i)
|
if ls.data[i] == name[0] {
|
||||||
if lName == name {
|
lName := ls.data[i : i+size]
|
||||||
return lValue
|
i += size
|
||||||
|
if lName == name {
|
||||||
|
lValue, _ := decodeString(ls.data, i)
|
||||||
|
return lValue
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if ls.data[i] > name[0] { // Stop looking if we've gone past.
|
||||||
|
break
|
||||||
|
}
|
||||||
|
i += size
|
||||||
}
|
}
|
||||||
|
size, i = decodeSize(ls.data, i)
|
||||||
|
i += size
|
||||||
}
|
}
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
@ -422,37 +436,49 @@ func FromStrings(ss ...string) Labels {
|
||||||
|
|
||||||
// Compare compares the two label sets.
|
// Compare compares the two label sets.
|
||||||
// The result will be 0 if a==b, <0 if a < b, and >0 if a > b.
|
// The result will be 0 if a==b, <0 if a < b, and >0 if a > b.
|
||||||
// TODO: replace with Less function - Compare is never needed.
|
|
||||||
// TODO: just compare the underlying strings when we don't need alphanumeric sorting.
|
|
||||||
func Compare(a, b Labels) int {
|
func Compare(a, b Labels) int {
|
||||||
l := len(a.data)
|
// Find the first byte in the string where a and b differ.
|
||||||
if len(b.data) < l {
|
shorter, longer := a.data, b.data
|
||||||
l = len(b.data)
|
if len(b.data) < len(a.data) {
|
||||||
|
shorter, longer = b.data, a.data
|
||||||
|
}
|
||||||
|
i := 0
|
||||||
|
// First, go 8 bytes at a time. Data strings are expected to be 8-byte aligned.
|
||||||
|
sp := unsafe.Pointer((*reflect.StringHeader)(unsafe.Pointer(&shorter)).Data)
|
||||||
|
lp := unsafe.Pointer((*reflect.StringHeader)(unsafe.Pointer(&longer)).Data)
|
||||||
|
for ; i < len(shorter)-8; i += 8 {
|
||||||
|
if *(*uint64)(unsafe.Add(sp, i)) != *(*uint64)(unsafe.Add(lp, i)) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Now go 1 byte at a time.
|
||||||
|
for ; i < len(shorter); i++ {
|
||||||
|
if shorter[i] != longer[i] {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if i == len(shorter) {
|
||||||
|
// One Labels was a prefix of the other; the set with fewer labels compares lower.
|
||||||
|
return len(a.data) - len(b.data)
|
||||||
}
|
}
|
||||||
|
|
||||||
ia, ib := 0, 0
|
// Now we know that there is some difference before the end of a and b.
|
||||||
for ia < l {
|
// Go back through the fields and find which field that difference is in.
|
||||||
var aName, bName string
|
firstCharDifferent := i
|
||||||
aName, ia = decodeString(a.data, ia)
|
for i = 0; ; {
|
||||||
bName, ib = decodeString(b.data, ib)
|
size, nextI := decodeSize(a.data, i)
|
||||||
if aName != bName {
|
if nextI+size > firstCharDifferent {
|
||||||
if aName < bName {
|
break
|
||||||
return -1
|
|
||||||
}
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
var aValue, bValue string
|
|
||||||
aValue, ia = decodeString(a.data, ia)
|
|
||||||
bValue, ib = decodeString(b.data, ib)
|
|
||||||
if aValue != bValue {
|
|
||||||
if aValue < bValue {
|
|
||||||
return -1
|
|
||||||
}
|
|
||||||
return 1
|
|
||||||
}
|
}
|
||||||
|
i = nextI + size
|
||||||
}
|
}
|
||||||
// If all labels so far were in common, the set with fewer labels comes first.
|
// Difference is inside this entry.
|
||||||
return len(a.data) - len(b.data)
|
aStr, _ := decodeString(a.data, i)
|
||||||
|
bStr, _ := decodeString(b.data, i)
|
||||||
|
if aStr < bStr {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
return +1
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy labels from b on top of whatever was in ls previously, reusing memory or expanding if needed.
|
// Copy labels from b on top of whatever was in ls previously, reusing memory or expanding if needed.
|
17
vendor/github.com/prometheus/prometheus/model/labels/regexp.go
generated
vendored
17
vendor/github.com/prometheus/prometheus/model/labels/regexp.go
generated
vendored
|
@ -25,9 +25,16 @@ type FastRegexMatcher struct {
|
||||||
prefix string
|
prefix string
|
||||||
suffix string
|
suffix string
|
||||||
contains string
|
contains string
|
||||||
|
|
||||||
|
// shortcut for literals
|
||||||
|
literal bool
|
||||||
|
value string
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewFastRegexMatcher(v string) (*FastRegexMatcher, error) {
|
func NewFastRegexMatcher(v string) (*FastRegexMatcher, error) {
|
||||||
|
if isLiteral(v) {
|
||||||
|
return &FastRegexMatcher{literal: true, value: v}, nil
|
||||||
|
}
|
||||||
re, err := regexp.Compile("^(?:" + v + ")$")
|
re, err := regexp.Compile("^(?:" + v + ")$")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -50,6 +57,9 @@ func NewFastRegexMatcher(v string) (*FastRegexMatcher, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *FastRegexMatcher) MatchString(s string) bool {
|
func (m *FastRegexMatcher) MatchString(s string) bool {
|
||||||
|
if m.literal {
|
||||||
|
return s == m.value
|
||||||
|
}
|
||||||
if m.prefix != "" && !strings.HasPrefix(s, m.prefix) {
|
if m.prefix != "" && !strings.HasPrefix(s, m.prefix) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
@ -63,9 +73,16 @@ func (m *FastRegexMatcher) MatchString(s string) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *FastRegexMatcher) GetRegexString() string {
|
func (m *FastRegexMatcher) GetRegexString() string {
|
||||||
|
if m.literal {
|
||||||
|
return m.value
|
||||||
|
}
|
||||||
return m.re.String()
|
return m.re.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func isLiteral(re string) bool {
|
||||||
|
return regexp.QuoteMeta(re) == re
|
||||||
|
}
|
||||||
|
|
||||||
// optimizeConcatRegex returns literal prefix/suffix text that can be safely
|
// optimizeConcatRegex returns literal prefix/suffix text that can be safely
|
||||||
// checked against the label value before running the regexp matcher.
|
// checked against the label value before running the regexp matcher.
|
||||||
func optimizeConcatRegex(r *syntax.Regexp) (prefix, suffix, contains string) {
|
func optimizeConcatRegex(r *syntax.Regexp) (prefix, suffix, contains string) {
|
||||||
|
|
6
vendor/github.com/prometheus/prometheus/model/relabel/relabel.go
generated
vendored
6
vendor/github.com/prometheus/prometheus/model/relabel/relabel.go
generated
vendored
|
@ -202,10 +202,12 @@ func (re Regexp) String() string {
|
||||||
return str[4 : len(str)-2]
|
return str[4 : len(str)-2]
|
||||||
}
|
}
|
||||||
|
|
||||||
// Process returns a relabeled copy of the given label set. The relabel configurations
|
// Process returns a relabeled version of the given label set. The relabel configurations
|
||||||
// are applied in order of input.
|
// are applied in order of input.
|
||||||
|
// There are circumstances where Process will modify the input label.
|
||||||
|
// If you want to avoid issues with the input label set being modified, at the cost of
|
||||||
|
// higher memory usage, you can use lbls.Copy().
|
||||||
// If a label set is dropped, EmptyLabels and false is returned.
|
// If a label set is dropped, EmptyLabels and false is returned.
|
||||||
// May return the input labelSet modified.
|
|
||||||
func Process(lbls labels.Labels, cfgs ...*Config) (ret labels.Labels, keep bool) {
|
func Process(lbls labels.Labels, cfgs ...*Config) (ret labels.Labels, keep bool) {
|
||||||
lb := labels.NewBuilder(lbls)
|
lb := labels.NewBuilder(lbls)
|
||||||
if !ProcessBuilder(lb, cfgs...) {
|
if !ProcessBuilder(lb, cfgs...) {
|
||||||
|
|
4
vendor/github.com/prometheus/prometheus/model/textparse/interface.go
generated
vendored
4
vendor/github.com/prometheus/prometheus/model/textparse/interface.go
generated
vendored
|
@ -59,7 +59,9 @@ type Parser interface {
|
||||||
Metric(l *labels.Labels) string
|
Metric(l *labels.Labels) string
|
||||||
|
|
||||||
// Exemplar writes the exemplar of the current sample into the passed
|
// Exemplar writes the exemplar of the current sample into the passed
|
||||||
// exemplar. It returns if an exemplar exists or not.
|
// exemplar. It can be called repeatedly to retrieve multiple exemplars
|
||||||
|
// for the same sample. It returns false once all exemplars are
|
||||||
|
// retrieved (including the case where no exemplars exist at all).
|
||||||
Exemplar(l *exemplar.Exemplar) bool
|
Exemplar(l *exemplar.Exemplar) bool
|
||||||
|
|
||||||
// Next advances the parser to the next sample. It returns false if no
|
// Next advances the parser to the next sample. It returns false if no
|
||||||
|
|
8
vendor/github.com/prometheus/prometheus/model/textparse/openmetricsparse.go
generated
vendored
8
vendor/github.com/prometheus/prometheus/model/textparse/openmetricsparse.go
generated
vendored
|
@ -174,8 +174,10 @@ func (p *OpenMetricsParser) Metric(l *labels.Labels) string {
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
// Exemplar writes the exemplar of the current sample into the passed
|
// Exemplar writes the exemplar of the current sample into the passed exemplar.
|
||||||
// exemplar. It returns the whether an exemplar exists.
|
// It returns whether an exemplar exists. As OpenMetrics only ever has one
|
||||||
|
// exemplar per sample, every call after the first (for the same sample) will
|
||||||
|
// always return false.
|
||||||
func (p *OpenMetricsParser) Exemplar(e *exemplar.Exemplar) bool {
|
func (p *OpenMetricsParser) Exemplar(e *exemplar.Exemplar) bool {
|
||||||
if len(p.exemplar) == 0 {
|
if len(p.exemplar) == 0 {
|
||||||
return false
|
return false
|
||||||
|
@ -204,6 +206,8 @@ func (p *OpenMetricsParser) Exemplar(e *exemplar.Exemplar) bool {
|
||||||
p.builder.Sort()
|
p.builder.Sort()
|
||||||
e.Labels = p.builder.Labels()
|
e.Labels = p.builder.Labels()
|
||||||
|
|
||||||
|
// Wipe exemplar so that future calls return false.
|
||||||
|
p.exemplar = p.exemplar[:0]
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
20
vendor/github.com/prometheus/prometheus/model/textparse/protobufparse.go
generated
vendored
20
vendor/github.com/prometheus/prometheus/model/textparse/protobufparse.go
generated
vendored
|
@ -54,7 +54,7 @@ type ProtobufParser struct {
|
||||||
// quantiles/buckets.
|
// quantiles/buckets.
|
||||||
fieldPos int
|
fieldPos int
|
||||||
fieldsDone bool // true if no more fields of a Summary or (legacy) Histogram to be processed.
|
fieldsDone bool // true if no more fields of a Summary or (legacy) Histogram to be processed.
|
||||||
redoClassic bool // true after parsing a native histogram if we need to parse it again as a classit histogram.
|
redoClassic bool // true after parsing a native histogram if we need to parse it again as a classic histogram.
|
||||||
|
|
||||||
// state is marked by the entry we are processing. EntryInvalid implies
|
// state is marked by the entry we are processing. EntryInvalid implies
|
||||||
// that we have to decode the next MetricFamily.
|
// that we have to decode the next MetricFamily.
|
||||||
|
@ -105,7 +105,7 @@ func (p *ProtobufParser) Series() ([]byte, *int64, float64) {
|
||||||
v = float64(s.GetSampleCount())
|
v = float64(s.GetSampleCount())
|
||||||
case -1:
|
case -1:
|
||||||
v = s.GetSampleSum()
|
v = s.GetSampleSum()
|
||||||
// Need to detect a summaries without quantile here.
|
// Need to detect summaries without quantile here.
|
||||||
if len(s.GetQuantile()) == 0 {
|
if len(s.GetQuantile()) == 0 {
|
||||||
p.fieldsDone = true
|
p.fieldsDone = true
|
||||||
}
|
}
|
||||||
|
@ -411,6 +411,14 @@ func (p *ProtobufParser) Next() (Entry, error) {
|
||||||
p.metricPos++
|
p.metricPos++
|
||||||
p.fieldPos = -2
|
p.fieldPos = -2
|
||||||
p.fieldsDone = false
|
p.fieldsDone = false
|
||||||
|
// If this is a metric family containing native
|
||||||
|
// histograms, we have to switch back to native
|
||||||
|
// histograms after parsing a classic histogram.
|
||||||
|
if p.state == EntrySeries &&
|
||||||
|
(t == dto.MetricType_HISTOGRAM || t == dto.MetricType_GAUGE_HISTOGRAM) &&
|
||||||
|
isNativeHistogram(p.mf.GetMetric()[0].GetHistogram()) {
|
||||||
|
p.state = EntryHistogram
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if p.metricPos >= len(p.mf.GetMetric()) {
|
if p.metricPos >= len(p.mf.GetMetric()) {
|
||||||
p.state = EntryInvalid
|
p.state = EntryInvalid
|
||||||
|
@ -556,8 +564,10 @@ func formatOpenMetricsFloat(f float64) string {
|
||||||
// deciding if a histogram should be ingested as a conventional one or a native
|
// deciding if a histogram should be ingested as a conventional one or a native
|
||||||
// one.
|
// one.
|
||||||
func isNativeHistogram(h *dto.Histogram) bool {
|
func isNativeHistogram(h *dto.Histogram) bool {
|
||||||
return len(h.GetNegativeDelta()) > 0 ||
|
return h.GetZeroThreshold() > 0 ||
|
||||||
len(h.GetPositiveDelta()) > 0 ||
|
|
||||||
h.GetZeroCount() > 0 ||
|
h.GetZeroCount() > 0 ||
|
||||||
h.GetZeroThreshold() > 0
|
len(h.GetNegativeDelta()) > 0 ||
|
||||||
|
len(h.GetPositiveDelta()) > 0 ||
|
||||||
|
len(h.GetNegativeCount()) > 0 ||
|
||||||
|
len(h.GetPositiveCount()) > 0
|
||||||
}
|
}
|
||||||
|
|
8
vendor/github.com/prometheus/prometheus/scrape/scrape.go
generated
vendored
8
vendor/github.com/prometheus/prometheus/scrape/scrape.go
generated
vendored
|
@ -23,7 +23,6 @@ import (
|
||||||
"math"
|
"math"
|
||||||
"net/http"
|
"net/http"
|
||||||
"reflect"
|
"reflect"
|
||||||
"sort"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
@ -35,6 +34,7 @@ import (
|
||||||
config_util "github.com/prometheus/common/config"
|
config_util "github.com/prometheus/common/config"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"github.com/prometheus/common/version"
|
"github.com/prometheus/common/version"
|
||||||
|
"golang.org/x/exp/slices"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/config"
|
"github.com/prometheus/prometheus/config"
|
||||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||||
|
@ -720,8 +720,8 @@ func mutateSampleLabels(lset labels.Labels, target *Target, honor bool, rc []*re
|
||||||
}
|
}
|
||||||
|
|
||||||
func resolveConflictingExposedLabels(lb *labels.Builder, conflictingExposedLabels []labels.Label) {
|
func resolveConflictingExposedLabels(lb *labels.Builder, conflictingExposedLabels []labels.Label) {
|
||||||
sort.SliceStable(conflictingExposedLabels, func(i, j int) bool {
|
slices.SortStableFunc(conflictingExposedLabels, func(a, b labels.Label) bool {
|
||||||
return len(conflictingExposedLabels[i].Name) < len(conflictingExposedLabels[j].Name)
|
return len(a.Name) < len(b.Name)
|
||||||
})
|
})
|
||||||
|
|
||||||
for _, l := range conflictingExposedLabels {
|
for _, l := range conflictingExposedLabels {
|
||||||
|
@ -1685,7 +1685,7 @@ loop:
|
||||||
// number of samples remaining after relabeling.
|
// number of samples remaining after relabeling.
|
||||||
added++
|
added++
|
||||||
|
|
||||||
if hasExemplar := p.Exemplar(&e); hasExemplar {
|
for hasExemplar := p.Exemplar(&e); hasExemplar; hasExemplar = p.Exemplar(&e) {
|
||||||
if !e.HasTs {
|
if !e.HasTs {
|
||||||
e.Ts = t
|
e.Ts = t
|
||||||
}
|
}
|
||||||
|
|
6
vendor/github.com/prometheus/prometheus/scrape/target.go
generated
vendored
6
vendor/github.com/prometheus/prometheus/scrape/target.go
generated
vendored
|
@ -154,14 +154,14 @@ func (t *Target) hash() uint64 {
|
||||||
}
|
}
|
||||||
|
|
||||||
// offset returns the time until the next scrape cycle for the target.
|
// offset returns the time until the next scrape cycle for the target.
|
||||||
// It includes the global server jitterSeed for scrapes from multiple Prometheus to try to be at different times.
|
// It includes the global server offsetSeed for scrapes from multiple Prometheus to try to be at different times.
|
||||||
func (t *Target) offset(interval time.Duration, jitterSeed uint64) time.Duration {
|
func (t *Target) offset(interval time.Duration, offsetSeed uint64) time.Duration {
|
||||||
now := time.Now().UnixNano()
|
now := time.Now().UnixNano()
|
||||||
|
|
||||||
// Base is a pinned to absolute time, no matter how often offset is called.
|
// Base is a pinned to absolute time, no matter how often offset is called.
|
||||||
var (
|
var (
|
||||||
base = int64(interval) - now%int64(interval)
|
base = int64(interval) - now%int64(interval)
|
||||||
offset = (t.hash() ^ jitterSeed) % uint64(interval)
|
offset = (t.hash() ^ offsetSeed) % uint64(interval)
|
||||||
next = base + int64(offset)
|
next = base + int64(offset)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
11
vendor/github.com/prometheus/prometheus/storage/remote/codec.go
generated
vendored
11
vendor/github.com/prometheus/prometheus/storage/remote/codec.go
generated
vendored
|
@ -26,6 +26,7 @@ import (
|
||||||
"github.com/gogo/protobuf/proto"
|
"github.com/gogo/protobuf/proto"
|
||||||
"github.com/golang/snappy"
|
"github.com/golang/snappy"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
"golang.org/x/exp/slices"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/model/exemplar"
|
"github.com/prometheus/prometheus/model/exemplar"
|
||||||
"github.com/prometheus/prometheus/model/histogram"
|
"github.com/prometheus/prometheus/model/histogram"
|
||||||
|
@ -178,7 +179,9 @@ func FromQueryResult(sortSeries bool, res *prompb.QueryResult) storage.SeriesSet
|
||||||
}
|
}
|
||||||
|
|
||||||
if sortSeries {
|
if sortSeries {
|
||||||
sort.Sort(byLabel(series))
|
slices.SortFunc(series, func(a, b storage.Series) bool {
|
||||||
|
return labels.Compare(a.Labels(), b.Labels()) < 0
|
||||||
|
})
|
||||||
}
|
}
|
||||||
return &concreteSeriesSet{
|
return &concreteSeriesSet{
|
||||||
series: series,
|
series: series,
|
||||||
|
@ -313,12 +316,6 @@ func MergeLabels(primary, secondary []prompb.Label) []prompb.Label {
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
type byLabel []storage.Series
|
|
||||||
|
|
||||||
func (a byLabel) Len() int { return len(a) }
|
|
||||||
func (a byLabel) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
|
||||||
func (a byLabel) Less(i, j int) bool { return labels.Compare(a[i].Labels(), a[j].Labels()) < 0 }
|
|
||||||
|
|
||||||
// errSeriesSet implements storage.SeriesSet, just returning an error.
|
// errSeriesSet implements storage.SeriesSet, just returning an error.
|
||||||
type errSeriesSet struct {
|
type errSeriesSet struct {
|
||||||
err error
|
err error
|
||||||
|
|
6
vendor/github.com/prometheus/prometheus/storage/remote/read_handler.go
generated
vendored
6
vendor/github.com/prometheus/prometheus/storage/remote/read_handler.go
generated
vendored
|
@ -16,12 +16,12 @@ package remote
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"net/http"
|
"net/http"
|
||||||
"sort"
|
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
"github.com/go-kit/log"
|
||||||
"github.com/go-kit/log/level"
|
"github.com/go-kit/log/level"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
"golang.org/x/exp/slices"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/config"
|
"github.com/prometheus/prometheus/config"
|
||||||
"github.com/prometheus/prometheus/model/labels"
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
|
@ -92,8 +92,8 @@ func (h *readHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||||
Value: value,
|
Value: value,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
sort.Slice(sortedExternalLabels, func(i, j int) bool {
|
slices.SortFunc(sortedExternalLabels, func(a, b prompb.Label) bool {
|
||||||
return sortedExternalLabels[i].Name < sortedExternalLabels[j].Name
|
return a.Name < b.Name
|
||||||
})
|
})
|
||||||
|
|
||||||
responseType, err := NegotiateResponseType(req.AcceptedResponseTypes)
|
responseType, err := NegotiateResponseType(req.AcceptedResponseTypes)
|
||||||
|
|
28
vendor/github.com/prometheus/prometheus/storage/remote/write_handler.go
generated
vendored
28
vendor/github.com/prometheus/prometheus/storage/remote/write_handler.go
generated
vendored
|
@ -22,6 +22,8 @@ import (
|
||||||
"github.com/go-kit/log"
|
"github.com/go-kit/log"
|
||||||
"github.com/go-kit/log/level"
|
"github.com/go-kit/log/level"
|
||||||
|
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/model/exemplar"
|
"github.com/prometheus/prometheus/model/exemplar"
|
||||||
"github.com/prometheus/prometheus/prompb"
|
"github.com/prometheus/prometheus/prompb"
|
||||||
"github.com/prometheus/prometheus/storage"
|
"github.com/prometheus/prometheus/storage"
|
||||||
|
@ -30,15 +32,28 @@ import (
|
||||||
type writeHandler struct {
|
type writeHandler struct {
|
||||||
logger log.Logger
|
logger log.Logger
|
||||||
appendable storage.Appendable
|
appendable storage.Appendable
|
||||||
|
|
||||||
|
samplesWithInvalidLabelsTotal prometheus.Counter
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewWriteHandler creates a http.Handler that accepts remote write requests and
|
// NewWriteHandler creates a http.Handler that accepts remote write requests and
|
||||||
// writes them to the provided appendable.
|
// writes them to the provided appendable.
|
||||||
func NewWriteHandler(logger log.Logger, appendable storage.Appendable) http.Handler {
|
func NewWriteHandler(logger log.Logger, reg prometheus.Registerer, appendable storage.Appendable) http.Handler {
|
||||||
return &writeHandler{
|
h := &writeHandler{
|
||||||
logger: logger,
|
logger: logger,
|
||||||
appendable: appendable,
|
appendable: appendable,
|
||||||
|
|
||||||
|
samplesWithInvalidLabelsTotal: prometheus.NewCounter(prometheus.CounterOpts{
|
||||||
|
Namespace: "prometheus",
|
||||||
|
Subsystem: "api",
|
||||||
|
Name: "remote_write_invalid_labels_samples_total",
|
||||||
|
Help: "The total number of remote write samples which contains invalid labels.",
|
||||||
|
}),
|
||||||
}
|
}
|
||||||
|
if reg != nil {
|
||||||
|
reg.MustRegister(h.samplesWithInvalidLabelsTotal)
|
||||||
|
}
|
||||||
|
return h
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *writeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
func (h *writeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||||
|
@ -85,6 +100,7 @@ func (h *writeHandler) checkAppendExemplarError(err error, e exemplar.Exemplar,
|
||||||
|
|
||||||
func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err error) {
|
func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err error) {
|
||||||
outOfOrderExemplarErrs := 0
|
outOfOrderExemplarErrs := 0
|
||||||
|
samplesWithInvalidLabels := 0
|
||||||
|
|
||||||
app := h.appendable.Appender(ctx)
|
app := h.appendable.Appender(ctx)
|
||||||
defer func() {
|
defer func() {
|
||||||
|
@ -98,6 +114,11 @@ func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err
|
||||||
var exemplarErr error
|
var exemplarErr error
|
||||||
for _, ts := range req.Timeseries {
|
for _, ts := range req.Timeseries {
|
||||||
labels := labelProtosToLabels(ts.Labels)
|
labels := labelProtosToLabels(ts.Labels)
|
||||||
|
if !labels.IsValid() {
|
||||||
|
level.Warn(h.logger).Log("msg", "Invalid metric names or labels", "got", labels.String())
|
||||||
|
samplesWithInvalidLabels++
|
||||||
|
continue
|
||||||
|
}
|
||||||
for _, s := range ts.Samples {
|
for _, s := range ts.Samples {
|
||||||
_, err = app.Append(0, labels, s.Timestamp, s.Value)
|
_, err = app.Append(0, labels, s.Timestamp, s.Value)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -150,6 +171,9 @@ func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err
|
||||||
if outOfOrderExemplarErrs > 0 {
|
if outOfOrderExemplarErrs > 0 {
|
||||||
_ = level.Warn(h.logger).Log("msg", "Error on ingesting out-of-order exemplars", "num_dropped", outOfOrderExemplarErrs)
|
_ = level.Warn(h.logger).Log("msg", "Error on ingesting out-of-order exemplars", "num_dropped", outOfOrderExemplarErrs)
|
||||||
}
|
}
|
||||||
|
if samplesWithInvalidLabels > 0 {
|
||||||
|
h.samplesWithInvalidLabelsTotal.Add(float64(samplesWithInvalidLabels))
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
20
vendor/github.com/prometheus/prometheus/tsdb/chunks/head_chunks.go
generated
vendored
20
vendor/github.com/prometheus/prometheus/tsdb/chunks/head_chunks.go
generated
vendored
|
@ -948,12 +948,22 @@ func (cdm *ChunkDiskMapper) Truncate(fileNo uint32) error {
|
||||||
if len(chkFileIndices) == len(removedFiles) {
|
if len(chkFileIndices) == len(removedFiles) {
|
||||||
// All files were deleted. Reset the current sequence.
|
// All files were deleted. Reset the current sequence.
|
||||||
cdm.evtlPosMtx.Lock()
|
cdm.evtlPosMtx.Lock()
|
||||||
if err == nil {
|
|
||||||
cdm.evtlPos.setSeq(0)
|
// We can safely reset the sequence only if the write queue is empty. If it's not empty,
|
||||||
} else {
|
// then there may be a job in the queue that will create a new segment file with an ID
|
||||||
// In case of error, set it to the last file number on the disk that was not deleted.
|
// generated before the sequence reset.
|
||||||
cdm.evtlPos.setSeq(uint64(pendingDeletes[len(pendingDeletes)-1]))
|
//
|
||||||
|
// The queueIsEmpty() function must be called while holding the cdm.evtlPosMtx to avoid
|
||||||
|
// a race condition with WriteChunk().
|
||||||
|
if cdm.writeQueue == nil || cdm.writeQueue.queueIsEmpty() {
|
||||||
|
if err == nil {
|
||||||
|
cdm.evtlPos.setSeq(0)
|
||||||
|
} else {
|
||||||
|
// In case of error, set it to the last file number on the disk that was not deleted.
|
||||||
|
cdm.evtlPos.setSeq(uint64(pendingDeletes[len(pendingDeletes)-1]))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
cdm.evtlPosMtx.Unlock()
|
cdm.evtlPosMtx.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
10
vendor/github.com/prometheus/prometheus/tsdb/compact.go
generated
vendored
10
vendor/github.com/prometheus/prometheus/tsdb/compact.go
generated
vendored
|
@ -20,7 +20,6 @@ import (
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"sort"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
"github.com/go-kit/log"
|
||||||
|
@ -28,6 +27,7 @@ import (
|
||||||
"github.com/oklog/ulid"
|
"github.com/oklog/ulid"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
"golang.org/x/exp/slices"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/storage"
|
"github.com/prometheus/prometheus/storage"
|
||||||
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||||
|
@ -200,8 +200,8 @@ func (c *LeveledCompactor) Plan(dir string) ([]string, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *LeveledCompactor) plan(dms []dirMeta) ([]string, error) {
|
func (c *LeveledCompactor) plan(dms []dirMeta) ([]string, error) {
|
||||||
sort.Slice(dms, func(i, j int) bool {
|
slices.SortFunc(dms, func(a, b dirMeta) bool {
|
||||||
return dms[i].meta.MinTime < dms[j].meta.MinTime
|
return a.meta.MinTime < b.meta.MinTime
|
||||||
})
|
})
|
||||||
|
|
||||||
res := c.selectOverlappingDirs(dms)
|
res := c.selectOverlappingDirs(dms)
|
||||||
|
@ -380,8 +380,8 @@ func CompactBlockMetas(uid ulid.ULID, blocks ...*BlockMeta) *BlockMeta {
|
||||||
for s := range sources {
|
for s := range sources {
|
||||||
res.Compaction.Sources = append(res.Compaction.Sources, s)
|
res.Compaction.Sources = append(res.Compaction.Sources, s)
|
||||||
}
|
}
|
||||||
sort.Slice(res.Compaction.Sources, func(i, j int) bool {
|
slices.SortFunc(res.Compaction.Sources, func(a, b ulid.ULID) bool {
|
||||||
return res.Compaction.Sources[i].Compare(res.Compaction.Sources[j]) < 0
|
return a.Compare(b) < 0
|
||||||
})
|
})
|
||||||
|
|
||||||
res.MinTime = mint
|
res.MinTime = mint
|
||||||
|
|
20
vendor/github.com/prometheus/prometheus/tsdb/db.go
generated
vendored
20
vendor/github.com/prometheus/prometheus/tsdb/db.go
generated
vendored
|
@ -22,7 +22,6 @@ import (
|
||||||
"math"
|
"math"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"sort"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
@ -34,6 +33,7 @@ import (
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"go.uber.org/atomic"
|
"go.uber.org/atomic"
|
||||||
|
"golang.org/x/exp/slices"
|
||||||
"golang.org/x/sync/errgroup"
|
"golang.org/x/sync/errgroup"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/config"
|
"github.com/prometheus/prometheus/config"
|
||||||
|
@ -77,8 +77,8 @@ func DefaultOptions() *Options {
|
||||||
MaxBlockDuration: DefaultBlockDuration,
|
MaxBlockDuration: DefaultBlockDuration,
|
||||||
NoLockfile: false,
|
NoLockfile: false,
|
||||||
AllowOverlappingCompaction: true,
|
AllowOverlappingCompaction: true,
|
||||||
WALCompression: false,
|
|
||||||
SamplesPerChunk: DefaultSamplesPerChunk,
|
SamplesPerChunk: DefaultSamplesPerChunk,
|
||||||
|
WALCompression: wlog.CompressionNone,
|
||||||
StripeSize: DefaultStripeSize,
|
StripeSize: DefaultStripeSize,
|
||||||
HeadChunksWriteBufferSize: chunks.DefaultWriteBufferSize,
|
HeadChunksWriteBufferSize: chunks.DefaultWriteBufferSize,
|
||||||
IsolationDisabled: defaultIsolationDisabled,
|
IsolationDisabled: defaultIsolationDisabled,
|
||||||
|
@ -123,8 +123,8 @@ type Options struct {
|
||||||
// For Prometheus, this will always be true.
|
// For Prometheus, this will always be true.
|
||||||
AllowOverlappingCompaction bool
|
AllowOverlappingCompaction bool
|
||||||
|
|
||||||
// WALCompression will turn on Snappy compression for records on the WAL.
|
// WALCompression configures the compression type to use on records in the WAL.
|
||||||
WALCompression bool
|
WALCompression wlog.CompressionType
|
||||||
|
|
||||||
// Maximum number of CPUs that can simultaneously processes WAL replay.
|
// Maximum number of CPUs that can simultaneously processes WAL replay.
|
||||||
// If it is <=0, then GOMAXPROCS is used.
|
// If it is <=0, then GOMAXPROCS is used.
|
||||||
|
@ -579,8 +579,8 @@ func (db *DBReadOnly) Blocks() ([]BlockReader, error) {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
sort.Slice(loadable, func(i, j int) bool {
|
slices.SortFunc(loadable, func(a, b *Block) bool {
|
||||||
return loadable[i].Meta().MinTime < loadable[j].Meta().MinTime
|
return a.Meta().MinTime < b.Meta().MinTime
|
||||||
})
|
})
|
||||||
|
|
||||||
blockMetas := make([]BlockMeta, 0, len(loadable))
|
blockMetas := make([]BlockMeta, 0, len(loadable))
|
||||||
|
@ -1445,8 +1445,8 @@ func (db *DB) reloadBlocks() (err error) {
|
||||||
}
|
}
|
||||||
db.metrics.blocksBytes.Set(float64(blocksSize))
|
db.metrics.blocksBytes.Set(float64(blocksSize))
|
||||||
|
|
||||||
sort.Slice(toLoad, func(i, j int) bool {
|
slices.SortFunc(toLoad, func(a, b *Block) bool {
|
||||||
return toLoad[i].Meta().MinTime < toLoad[j].Meta().MinTime
|
return a.Meta().MinTime < b.Meta().MinTime
|
||||||
})
|
})
|
||||||
|
|
||||||
// Swap new blocks first for subsequently created readers to be seen.
|
// Swap new blocks first for subsequently created readers to be seen.
|
||||||
|
@ -1515,8 +1515,8 @@ func deletableBlocks(db *DB, blocks []*Block) map[ulid.ULID]struct{} {
|
||||||
|
|
||||||
// Sort the blocks by time - newest to oldest (largest to smallest timestamp).
|
// Sort the blocks by time - newest to oldest (largest to smallest timestamp).
|
||||||
// This ensures that the retentions will remove the oldest blocks.
|
// This ensures that the retentions will remove the oldest blocks.
|
||||||
sort.Slice(blocks, func(i, j int) bool {
|
slices.SortFunc(blocks, func(a, b *Block) bool {
|
||||||
return blocks[i].Meta().MaxTime > blocks[j].Meta().MaxTime
|
return a.Meta().MaxTime > b.Meta().MaxTime
|
||||||
})
|
})
|
||||||
|
|
||||||
for _, block := range blocks {
|
for _, block := range blocks {
|
||||||
|
|
10
vendor/github.com/prometheus/prometheus/tsdb/exemplar.go
generated
vendored
10
vendor/github.com/prometheus/prometheus/tsdb/exemplar.go
generated
vendored
|
@ -15,11 +15,11 @@ package tsdb
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"sort"
|
|
||||||
"sync"
|
"sync"
|
||||||
"unicode/utf8"
|
"unicode/utf8"
|
||||||
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
"golang.org/x/exp/slices"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/config"
|
"github.com/prometheus/prometheus/config"
|
||||||
"github.com/prometheus/prometheus/model/exemplar"
|
"github.com/prometheus/prometheus/model/exemplar"
|
||||||
|
@ -185,8 +185,8 @@ func (ce *CircularExemplarStorage) Select(start, end int64, matchers ...[]*label
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
sort.Slice(ret, func(i, j int) bool {
|
slices.SortFunc(ret, func(a, b exemplar.QueryResult) bool {
|
||||||
return labels.Compare(ret[i].SeriesLabels, ret[j].SeriesLabels) < 0
|
return labels.Compare(a.SeriesLabels, b.SeriesLabels) < 0
|
||||||
})
|
})
|
||||||
|
|
||||||
return ret, nil
|
return ret, nil
|
||||||
|
@ -365,8 +365,8 @@ func (ce *CircularExemplarStorage) AddExemplar(l labels.Labels, e exemplar.Exemp
|
||||||
if prev := ce.exemplars[ce.nextIndex]; prev == nil {
|
if prev := ce.exemplars[ce.nextIndex]; prev == nil {
|
||||||
ce.exemplars[ce.nextIndex] = &circularBufferEntry{}
|
ce.exemplars[ce.nextIndex] = &circularBufferEntry{}
|
||||||
} else {
|
} else {
|
||||||
// There exists exemplar already on this ce.nextIndex entry, drop it, to make place
|
// There exists an exemplar already on this ce.nextIndex entry,
|
||||||
// for others.
|
// drop it, to make place for others.
|
||||||
var buf [1024]byte
|
var buf [1024]byte
|
||||||
prevLabels := prev.ref.seriesLabels.Bytes(buf[:])
|
prevLabels := prev.ref.seriesLabels.Bytes(buf[:])
|
||||||
if prev.next == noExemplar {
|
if prev.next == noExemplar {
|
||||||
|
|
2
vendor/github.com/prometheus/prometheus/tsdb/head.go
generated
vendored
2
vendor/github.com/prometheus/prometheus/tsdb/head.go
generated
vendored
|
@ -977,7 +977,7 @@ func (h *Head) DisableNativeHistograms() {
|
||||||
h.opts.EnableNativeHistograms.Store(false)
|
h.opts.EnableNativeHistograms.Store(false)
|
||||||
}
|
}
|
||||||
|
|
||||||
// PostingsCardinalityStats returns top 10 highest cardinality stats By label and value names.
|
// PostingsCardinalityStats returns highest cardinality stats by label and value names.
|
||||||
func (h *Head) PostingsCardinalityStats(statsByLabelName string, limit int) *index.PostingsStats {
|
func (h *Head) PostingsCardinalityStats(statsByLabelName string, limit int) *index.PostingsStats {
|
||||||
h.cardinalityMutex.Lock()
|
h.cardinalityMutex.Lock()
|
||||||
defer h.cardinalityMutex.Unlock()
|
defer h.cardinalityMutex.Unlock()
|
||||||
|
|
53
vendor/github.com/prometheus/prometheus/tsdb/head_append.go
generated
vendored
53
vendor/github.com/prometheus/prometheus/tsdb/head_append.go
generated
vendored
|
@ -881,9 +881,13 @@ func (a *headAppender) Commit() (err error) {
|
||||||
oooMmapMarkers map[chunks.HeadSeriesRef]chunks.ChunkDiskMapperRef
|
oooMmapMarkers map[chunks.HeadSeriesRef]chunks.ChunkDiskMapperRef
|
||||||
oooRecords [][]byte
|
oooRecords [][]byte
|
||||||
oooCapMax = a.head.opts.OutOfOrderCapMax.Load()
|
oooCapMax = a.head.opts.OutOfOrderCapMax.Load()
|
||||||
chunkRange = a.head.chunkRange.Load()
|
|
||||||
series *memSeries
|
series *memSeries
|
||||||
enc record.Encoder
|
appendChunkOpts = chunkOpts{
|
||||||
|
chunkDiskMapper: a.head.chunkDiskMapper,
|
||||||
|
chunkRange: a.head.chunkRange.Load(),
|
||||||
|
samplesPerChunk: a.head.opts.SamplesPerChunk,
|
||||||
|
}
|
||||||
|
enc record.Encoder
|
||||||
)
|
)
|
||||||
defer func() {
|
defer func() {
|
||||||
for i := range oooRecords {
|
for i := range oooRecords {
|
||||||
|
@ -987,7 +991,7 @@ func (a *headAppender) Commit() (err error) {
|
||||||
samplesAppended--
|
samplesAppended--
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
ok, chunkCreated = series.append(s.T, s.V, a.appendID, a.head.chunkDiskMapper, chunkRange, a.head.opts.SamplesPerChunk)
|
ok, chunkCreated = series.append(s.T, s.V, a.appendID, appendChunkOpts)
|
||||||
if ok {
|
if ok {
|
||||||
if s.T < inOrderMint {
|
if s.T < inOrderMint {
|
||||||
inOrderMint = s.T
|
inOrderMint = s.T
|
||||||
|
@ -1016,7 +1020,7 @@ func (a *headAppender) Commit() (err error) {
|
||||||
for i, s := range a.histograms {
|
for i, s := range a.histograms {
|
||||||
series = a.histogramSeries[i]
|
series = a.histogramSeries[i]
|
||||||
series.Lock()
|
series.Lock()
|
||||||
ok, chunkCreated := series.appendHistogram(s.T, s.H, a.appendID, a.head.chunkDiskMapper, chunkRange, a.head.opts.SamplesPerChunk)
|
ok, chunkCreated := series.appendHistogram(s.T, s.H, a.appendID, appendChunkOpts)
|
||||||
series.cleanupAppendIDsBelow(a.cleanupAppendIDsBelow)
|
series.cleanupAppendIDsBelow(a.cleanupAppendIDsBelow)
|
||||||
series.pendingCommit = false
|
series.pendingCommit = false
|
||||||
series.Unlock()
|
series.Unlock()
|
||||||
|
@ -1042,7 +1046,7 @@ func (a *headAppender) Commit() (err error) {
|
||||||
for i, s := range a.floatHistograms {
|
for i, s := range a.floatHistograms {
|
||||||
series = a.floatHistogramSeries[i]
|
series = a.floatHistogramSeries[i]
|
||||||
series.Lock()
|
series.Lock()
|
||||||
ok, chunkCreated := series.appendFloatHistogram(s.T, s.FH, a.appendID, a.head.chunkDiskMapper, chunkRange, a.head.opts.SamplesPerChunk)
|
ok, chunkCreated := series.appendFloatHistogram(s.T, s.FH, a.appendID, appendChunkOpts)
|
||||||
series.cleanupAppendIDsBelow(a.cleanupAppendIDsBelow)
|
series.cleanupAppendIDsBelow(a.cleanupAppendIDsBelow)
|
||||||
series.pendingCommit = false
|
series.pendingCommit = false
|
||||||
series.Unlock()
|
series.Unlock()
|
||||||
|
@ -1118,12 +1122,19 @@ func (s *memSeries) insert(t int64, v float64, chunkDiskMapper *chunks.ChunkDisk
|
||||||
return ok, chunkCreated, mmapRef
|
return ok, chunkCreated, mmapRef
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// chunkOpts are chunk-level options that are passed when appending to a memSeries.
|
||||||
|
type chunkOpts struct {
|
||||||
|
chunkDiskMapper *chunks.ChunkDiskMapper
|
||||||
|
chunkRange int64
|
||||||
|
samplesPerChunk int
|
||||||
|
}
|
||||||
|
|
||||||
// append adds the sample (t, v) to the series. The caller also has to provide
|
// append adds the sample (t, v) to the series. The caller also has to provide
|
||||||
// the appendID for isolation. (The appendID can be zero, which results in no
|
// the appendID for isolation. (The appendID can be zero, which results in no
|
||||||
// isolation for this append.)
|
// isolation for this append.)
|
||||||
// It is unsafe to call this concurrently with s.iterator(...) without holding the series lock.
|
// It is unsafe to call this concurrently with s.iterator(...) without holding the series lock.
|
||||||
func (s *memSeries) append(t int64, v float64, appendID uint64, chunkDiskMapper *chunks.ChunkDiskMapper, chunkRange int64, samplesPerChunk int) (sampleInOrder, chunkCreated bool) {
|
func (s *memSeries) append(t int64, v float64, appendID uint64, o chunkOpts) (sampleInOrder, chunkCreated bool) {
|
||||||
c, sampleInOrder, chunkCreated := s.appendPreprocessor(t, chunkenc.EncXOR, chunkDiskMapper, chunkRange, samplesPerChunk)
|
c, sampleInOrder, chunkCreated := s.appendPreprocessor(t, chunkenc.EncXOR, o)
|
||||||
if !sampleInOrder {
|
if !sampleInOrder {
|
||||||
return sampleInOrder, chunkCreated
|
return sampleInOrder, chunkCreated
|
||||||
}
|
}
|
||||||
|
@ -1144,7 +1155,7 @@ func (s *memSeries) append(t int64, v float64, appendID uint64, chunkDiskMapper
|
||||||
|
|
||||||
// appendHistogram adds the histogram.
|
// appendHistogram adds the histogram.
|
||||||
// It is unsafe to call this concurrently with s.iterator(...) without holding the series lock.
|
// It is unsafe to call this concurrently with s.iterator(...) without holding the series lock.
|
||||||
func (s *memSeries) appendHistogram(t int64, h *histogram.Histogram, appendID uint64, chunkDiskMapper *chunks.ChunkDiskMapper, chunkRange int64, samplesPerChunk int) (sampleInOrder, chunkCreated bool) {
|
func (s *memSeries) appendHistogram(t int64, h *histogram.Histogram, appendID uint64, o chunkOpts) (sampleInOrder, chunkCreated bool) {
|
||||||
// Head controls the execution of recoding, so that we own the proper
|
// Head controls the execution of recoding, so that we own the proper
|
||||||
// chunk reference afterwards. We check for Appendable from appender before
|
// chunk reference afterwards. We check for Appendable from appender before
|
||||||
// appendPreprocessor because in case it ends up creating a new chunk,
|
// appendPreprocessor because in case it ends up creating a new chunk,
|
||||||
|
@ -1157,7 +1168,7 @@ func (s *memSeries) appendHistogram(t int64, h *histogram.Histogram, appendID ui
|
||||||
pMergedSpans, nMergedSpans []histogram.Span
|
pMergedSpans, nMergedSpans []histogram.Span
|
||||||
okToAppend, counterReset, gauge bool
|
okToAppend, counterReset, gauge bool
|
||||||
)
|
)
|
||||||
c, sampleInOrder, chunkCreated := s.appendPreprocessor(t, chunkenc.EncHistogram, chunkDiskMapper, chunkRange, samplesPerChunk)
|
c, sampleInOrder, chunkCreated := s.appendPreprocessor(t, chunkenc.EncHistogram, o)
|
||||||
if !sampleInOrder {
|
if !sampleInOrder {
|
||||||
return sampleInOrder, chunkCreated
|
return sampleInOrder, chunkCreated
|
||||||
}
|
}
|
||||||
|
@ -1193,7 +1204,7 @@ func (s *memSeries) appendHistogram(t int64, h *histogram.Histogram, appendID ui
|
||||||
// - okToAppend and no inserts → Chunk is ready to support our histogram.
|
// - okToAppend and no inserts → Chunk is ready to support our histogram.
|
||||||
switch {
|
switch {
|
||||||
case !okToAppend || counterReset:
|
case !okToAppend || counterReset:
|
||||||
c = s.cutNewHeadChunk(t, chunkenc.EncHistogram, chunkDiskMapper, chunkRange)
|
c = s.cutNewHeadChunk(t, chunkenc.EncHistogram, o.chunkDiskMapper, o.chunkRange)
|
||||||
chunkCreated = true
|
chunkCreated = true
|
||||||
case len(pForwardInserts) > 0 || len(nForwardInserts) > 0:
|
case len(pForwardInserts) > 0 || len(nForwardInserts) > 0:
|
||||||
// New buckets have appeared. We need to recode all
|
// New buckets have appeared. We need to recode all
|
||||||
|
@ -1238,7 +1249,7 @@ func (s *memSeries) appendHistogram(t int64, h *histogram.Histogram, appendID ui
|
||||||
|
|
||||||
// appendFloatHistogram adds the float histogram.
|
// appendFloatHistogram adds the float histogram.
|
||||||
// It is unsafe to call this concurrently with s.iterator(...) without holding the series lock.
|
// It is unsafe to call this concurrently with s.iterator(...) without holding the series lock.
|
||||||
func (s *memSeries) appendFloatHistogram(t int64, fh *histogram.FloatHistogram, appendID uint64, chunkDiskMapper *chunks.ChunkDiskMapper, chunkRange int64, samplesPerChunk int) (sampleInOrder, chunkCreated bool) {
|
func (s *memSeries) appendFloatHistogram(t int64, fh *histogram.FloatHistogram, appendID uint64, o chunkOpts) (sampleInOrder, chunkCreated bool) {
|
||||||
// Head controls the execution of recoding, so that we own the proper
|
// Head controls the execution of recoding, so that we own the proper
|
||||||
// chunk reference afterwards. We check for Appendable from appender before
|
// chunk reference afterwards. We check for Appendable from appender before
|
||||||
// appendPreprocessor because in case it ends up creating a new chunk,
|
// appendPreprocessor because in case it ends up creating a new chunk,
|
||||||
|
@ -1251,7 +1262,7 @@ func (s *memSeries) appendFloatHistogram(t int64, fh *histogram.FloatHistogram,
|
||||||
pMergedSpans, nMergedSpans []histogram.Span
|
pMergedSpans, nMergedSpans []histogram.Span
|
||||||
okToAppend, counterReset, gauge bool
|
okToAppend, counterReset, gauge bool
|
||||||
)
|
)
|
||||||
c, sampleInOrder, chunkCreated := s.appendPreprocessor(t, chunkenc.EncFloatHistogram, chunkDiskMapper, chunkRange, samplesPerChunk)
|
c, sampleInOrder, chunkCreated := s.appendPreprocessor(t, chunkenc.EncFloatHistogram, o)
|
||||||
if !sampleInOrder {
|
if !sampleInOrder {
|
||||||
return sampleInOrder, chunkCreated
|
return sampleInOrder, chunkCreated
|
||||||
}
|
}
|
||||||
|
@ -1287,7 +1298,7 @@ func (s *memSeries) appendFloatHistogram(t int64, fh *histogram.FloatHistogram,
|
||||||
// - okToAppend and no inserts → Chunk is ready to support our histogram.
|
// - okToAppend and no inserts → Chunk is ready to support our histogram.
|
||||||
switch {
|
switch {
|
||||||
case !okToAppend || counterReset:
|
case !okToAppend || counterReset:
|
||||||
c = s.cutNewHeadChunk(t, chunkenc.EncFloatHistogram, chunkDiskMapper, chunkRange)
|
c = s.cutNewHeadChunk(t, chunkenc.EncFloatHistogram, o.chunkDiskMapper, o.chunkRange)
|
||||||
chunkCreated = true
|
chunkCreated = true
|
||||||
case len(pForwardInserts) > 0 || len(nForwardInserts) > 0:
|
case len(pForwardInserts) > 0 || len(nForwardInserts) > 0:
|
||||||
// New buckets have appeared. We need to recode all
|
// New buckets have appeared. We need to recode all
|
||||||
|
@ -1333,9 +1344,7 @@ func (s *memSeries) appendFloatHistogram(t int64, fh *histogram.FloatHistogram,
|
||||||
// appendPreprocessor takes care of cutting new chunks and m-mapping old chunks.
|
// appendPreprocessor takes care of cutting new chunks and m-mapping old chunks.
|
||||||
// It is unsafe to call this concurrently with s.iterator(...) without holding the series lock.
|
// It is unsafe to call this concurrently with s.iterator(...) without holding the series lock.
|
||||||
// This should be called only when appending data.
|
// This should be called only when appending data.
|
||||||
func (s *memSeries) appendPreprocessor(
|
func (s *memSeries) appendPreprocessor(t int64, e chunkenc.Encoding, o chunkOpts) (c *memChunk, sampleInOrder, chunkCreated bool) {
|
||||||
t int64, e chunkenc.Encoding, chunkDiskMapper *chunks.ChunkDiskMapper, chunkRange int64, samplesPerChunk int,
|
|
||||||
) (c *memChunk, sampleInOrder, chunkCreated bool) {
|
|
||||||
c = s.head()
|
c = s.head()
|
||||||
|
|
||||||
if c == nil {
|
if c == nil {
|
||||||
|
@ -1344,7 +1353,7 @@ func (s *memSeries) appendPreprocessor(
|
||||||
return c, false, false
|
return c, false, false
|
||||||
}
|
}
|
||||||
// There is no head chunk in this series yet, create the first chunk for the sample.
|
// There is no head chunk in this series yet, create the first chunk for the sample.
|
||||||
c = s.cutNewHeadChunk(t, e, chunkDiskMapper, chunkRange)
|
c = s.cutNewHeadChunk(t, e, o.chunkDiskMapper, o.chunkRange)
|
||||||
chunkCreated = true
|
chunkCreated = true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1356,7 +1365,7 @@ func (s *memSeries) appendPreprocessor(
|
||||||
if c.chunk.Encoding() != e {
|
if c.chunk.Encoding() != e {
|
||||||
// The chunk encoding expected by this append is different than the head chunk's
|
// The chunk encoding expected by this append is different than the head chunk's
|
||||||
// encoding. So we cut a new chunk with the expected encoding.
|
// encoding. So we cut a new chunk with the expected encoding.
|
||||||
c = s.cutNewHeadChunk(t, e, chunkDiskMapper, chunkRange)
|
c = s.cutNewHeadChunk(t, e, o.chunkDiskMapper, o.chunkRange)
|
||||||
chunkCreated = true
|
chunkCreated = true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1365,14 +1374,14 @@ func (s *memSeries) appendPreprocessor(
|
||||||
// It could be the new chunk created after reading the chunk snapshot,
|
// It could be the new chunk created after reading the chunk snapshot,
|
||||||
// hence we fix the minTime of the chunk here.
|
// hence we fix the minTime of the chunk here.
|
||||||
c.minTime = t
|
c.minTime = t
|
||||||
s.nextAt = rangeForTimestamp(c.minTime, chunkRange)
|
s.nextAt = rangeForTimestamp(c.minTime, o.chunkRange)
|
||||||
}
|
}
|
||||||
|
|
||||||
// If we reach 25% of a chunk's desired sample count, predict an end time
|
// If we reach 25% of a chunk's desired sample count, predict an end time
|
||||||
// for this chunk that will try to make samples equally distributed within
|
// for this chunk that will try to make samples equally distributed within
|
||||||
// the remaining chunks in the current chunk range.
|
// the remaining chunks in the current chunk range.
|
||||||
// At latest it must happen at the timestamp set when the chunk was cut.
|
// At latest it must happen at the timestamp set when the chunk was cut.
|
||||||
if numSamples == samplesPerChunk/4 {
|
if numSamples == o.samplesPerChunk/4 {
|
||||||
s.nextAt = computeChunkEndTime(c.minTime, c.maxTime, s.nextAt)
|
s.nextAt = computeChunkEndTime(c.minTime, c.maxTime, s.nextAt)
|
||||||
}
|
}
|
||||||
// If numSamples > samplesPerChunk*2 then our previous prediction was invalid,
|
// If numSamples > samplesPerChunk*2 then our previous prediction was invalid,
|
||||||
|
@ -1380,8 +1389,8 @@ func (s *memSeries) appendPreprocessor(
|
||||||
// Since we assume that the rate is higher, we're being conservative and cutting at 2*samplesPerChunk
|
// Since we assume that the rate is higher, we're being conservative and cutting at 2*samplesPerChunk
|
||||||
// as we expect more chunks to come.
|
// as we expect more chunks to come.
|
||||||
// Note that next chunk will have its nextAt recalculated for the new rate.
|
// Note that next chunk will have its nextAt recalculated for the new rate.
|
||||||
if t >= s.nextAt || numSamples >= samplesPerChunk*2 {
|
if t >= s.nextAt || numSamples >= o.samplesPerChunk*2 {
|
||||||
c = s.cutNewHeadChunk(t, e, chunkDiskMapper, chunkRange)
|
c = s.cutNewHeadChunk(t, e, o.chunkDiskMapper, o.chunkRange)
|
||||||
chunkCreated = true
|
chunkCreated = true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
7
vendor/github.com/prometheus/prometheus/tsdb/head_read.go
generated
vendored
7
vendor/github.com/prometheus/prometheus/tsdb/head_read.go
generated
vendored
|
@ -16,7 +16,6 @@ package tsdb
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"math"
|
"math"
|
||||||
"sort"
|
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/go-kit/log/level"
|
"github.com/go-kit/log/level"
|
||||||
|
@ -137,8 +136,8 @@ func (h *headIndexReader) SortedPostings(p index.Postings) index.Postings {
|
||||||
return index.ErrPostings(errors.Wrap(err, "expand postings"))
|
return index.ErrPostings(errors.Wrap(err, "expand postings"))
|
||||||
}
|
}
|
||||||
|
|
||||||
sort.Slice(series, func(i, j int) bool {
|
slices.SortFunc(series, func(a, b *memSeries) bool {
|
||||||
return labels.Compare(series[i].lset, series[j].lset) < 0
|
return labels.Compare(a.lset, b.lset) < 0
|
||||||
})
|
})
|
||||||
|
|
||||||
// Convert back to list.
|
// Convert back to list.
|
||||||
|
@ -450,7 +449,7 @@ func (s *memSeries) oooMergedChunk(meta chunks.Meta, cdm *chunks.ChunkDiskMapper
|
||||||
|
|
||||||
// Next we want to sort all the collected chunks by min time so we can find
|
// Next we want to sort all the collected chunks by min time so we can find
|
||||||
// those that overlap and stop when we know the rest don't.
|
// those that overlap and stop when we know the rest don't.
|
||||||
sort.Sort(byMinTimeAndMinRef(tmpChks))
|
slices.SortFunc(tmpChks, refLessByMinTimeAndMinRef)
|
||||||
|
|
||||||
mc := &mergedOOOChunks{}
|
mc := &mergedOOOChunks{}
|
||||||
absoluteMax := int64(math.MinInt64)
|
absoluteMax := int64(math.MinInt64)
|
||||||
|
|
66
vendor/github.com/prometheus/prometheus/tsdb/head_wal.go
generated
vendored
66
vendor/github.com/prometheus/prometheus/tsdb/head_wal.go
generated
vendored
|
@ -564,7 +564,11 @@ func (wp *walSubsetProcessor) processWALSamples(h *Head, mmappedChunks, oooMmapp
|
||||||
|
|
||||||
minValidTime := h.minValidTime.Load()
|
minValidTime := h.minValidTime.Load()
|
||||||
mint, maxt := int64(math.MaxInt64), int64(math.MinInt64)
|
mint, maxt := int64(math.MaxInt64), int64(math.MinInt64)
|
||||||
chunkRange := h.chunkRange.Load()
|
appendChunkOpts := chunkOpts{
|
||||||
|
chunkDiskMapper: h.chunkDiskMapper,
|
||||||
|
chunkRange: h.chunkRange.Load(),
|
||||||
|
samplesPerChunk: h.opts.SamplesPerChunk,
|
||||||
|
}
|
||||||
|
|
||||||
for in := range wp.input {
|
for in := range wp.input {
|
||||||
if in.existingSeries != nil {
|
if in.existingSeries != nil {
|
||||||
|
@ -588,7 +592,7 @@ func (wp *walSubsetProcessor) processWALSamples(h *Head, mmappedChunks, oooMmapp
|
||||||
if s.T <= ms.mmMaxTime {
|
if s.T <= ms.mmMaxTime {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if _, chunkCreated := ms.append(s.T, s.V, 0, h.chunkDiskMapper, chunkRange, h.opts.SamplesPerChunk); chunkCreated {
|
if _, chunkCreated := ms.append(s.T, s.V, 0, appendChunkOpts); chunkCreated {
|
||||||
h.metrics.chunksCreated.Inc()
|
h.metrics.chunksCreated.Inc()
|
||||||
h.metrics.chunks.Inc()
|
h.metrics.chunks.Inc()
|
||||||
}
|
}
|
||||||
|
@ -618,9 +622,9 @@ func (wp *walSubsetProcessor) processWALSamples(h *Head, mmappedChunks, oooMmapp
|
||||||
}
|
}
|
||||||
var chunkCreated bool
|
var chunkCreated bool
|
||||||
if s.h != nil {
|
if s.h != nil {
|
||||||
_, chunkCreated = ms.appendHistogram(s.t, s.h, 0, h.chunkDiskMapper, chunkRange, h.opts.SamplesPerChunk)
|
_, chunkCreated = ms.appendHistogram(s.t, s.h, 0, appendChunkOpts)
|
||||||
} else {
|
} else {
|
||||||
_, chunkCreated = ms.appendFloatHistogram(s.t, s.fh, 0, h.chunkDiskMapper, chunkRange, h.opts.SamplesPerChunk)
|
_, chunkCreated = ms.appendFloatHistogram(s.t, s.fh, 0, appendChunkOpts)
|
||||||
}
|
}
|
||||||
if chunkCreated {
|
if chunkCreated {
|
||||||
h.metrics.chunksCreated.Inc()
|
h.metrics.chunksCreated.Inc()
|
||||||
|
@ -939,10 +943,12 @@ const (
|
||||||
)
|
)
|
||||||
|
|
||||||
type chunkSnapshotRecord struct {
|
type chunkSnapshotRecord struct {
|
||||||
ref chunks.HeadSeriesRef
|
ref chunks.HeadSeriesRef
|
||||||
lset labels.Labels
|
lset labels.Labels
|
||||||
mc *memChunk
|
mc *memChunk
|
||||||
lastValue float64
|
lastValue float64
|
||||||
|
lastHistogramValue *histogram.Histogram
|
||||||
|
lastFloatHistogramValue *histogram.FloatHistogram
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *memSeries) encodeToSnapshotRecord(b []byte) []byte {
|
func (s *memSeries) encodeToSnapshotRecord(b []byte) []byte {
|
||||||
|
@ -957,18 +963,27 @@ func (s *memSeries) encodeToSnapshotRecord(b []byte) []byte {
|
||||||
if s.headChunk == nil {
|
if s.headChunk == nil {
|
||||||
buf.PutUvarint(0)
|
buf.PutUvarint(0)
|
||||||
} else {
|
} else {
|
||||||
|
enc := s.headChunk.chunk.Encoding()
|
||||||
buf.PutUvarint(1)
|
buf.PutUvarint(1)
|
||||||
buf.PutBE64int64(s.headChunk.minTime)
|
buf.PutBE64int64(s.headChunk.minTime)
|
||||||
buf.PutBE64int64(s.headChunk.maxTime)
|
buf.PutBE64int64(s.headChunk.maxTime)
|
||||||
buf.PutByte(byte(s.headChunk.chunk.Encoding()))
|
buf.PutByte(byte(enc))
|
||||||
buf.PutUvarintBytes(s.headChunk.chunk.Bytes())
|
buf.PutUvarintBytes(s.headChunk.chunk.Bytes())
|
||||||
// Backwards compatibility for old sampleBuf which had last 4 samples.
|
|
||||||
for i := 0; i < 3; i++ {
|
switch enc {
|
||||||
|
case chunkenc.EncXOR:
|
||||||
|
// Backwards compatibility for old sampleBuf which had last 4 samples.
|
||||||
|
for i := 0; i < 3; i++ {
|
||||||
|
buf.PutBE64int64(0)
|
||||||
|
buf.PutBEFloat64(0)
|
||||||
|
}
|
||||||
buf.PutBE64int64(0)
|
buf.PutBE64int64(0)
|
||||||
buf.PutBEFloat64(0)
|
buf.PutBEFloat64(s.lastValue)
|
||||||
|
case chunkenc.EncHistogram:
|
||||||
|
record.EncodeHistogram(&buf, s.lastHistogramValue)
|
||||||
|
default: // chunkenc.FloatHistogram.
|
||||||
|
record.EncodeFloatHistogram(&buf, s.lastFloatHistogramValue)
|
||||||
}
|
}
|
||||||
buf.PutBE64int64(0)
|
|
||||||
buf.PutBEFloat64(s.lastValue)
|
|
||||||
}
|
}
|
||||||
s.Unlock()
|
s.Unlock()
|
||||||
|
|
||||||
|
@ -1008,13 +1023,22 @@ func decodeSeriesFromChunkSnapshot(d *record.Decoder, b []byte) (csr chunkSnapsh
|
||||||
}
|
}
|
||||||
csr.mc.chunk = chk
|
csr.mc.chunk = chk
|
||||||
|
|
||||||
// Backwards-compatibility for old sampleBuf which had last 4 samples.
|
switch enc {
|
||||||
for i := 0; i < 3; i++ {
|
case chunkenc.EncXOR:
|
||||||
|
// Backwards-compatibility for old sampleBuf which had last 4 samples.
|
||||||
|
for i := 0; i < 3; i++ {
|
||||||
|
_ = dec.Be64int64()
|
||||||
|
_ = dec.Be64Float64()
|
||||||
|
}
|
||||||
_ = dec.Be64int64()
|
_ = dec.Be64int64()
|
||||||
_ = dec.Be64Float64()
|
csr.lastValue = dec.Be64Float64()
|
||||||
|
case chunkenc.EncHistogram:
|
||||||
|
csr.lastHistogramValue = &histogram.Histogram{}
|
||||||
|
record.DecodeHistogram(&dec, csr.lastHistogramValue)
|
||||||
|
default: // chunkenc.FloatHistogram.
|
||||||
|
csr.lastFloatHistogramValue = &histogram.FloatHistogram{}
|
||||||
|
record.DecodeFloatHistogram(&dec, csr.lastFloatHistogramValue)
|
||||||
}
|
}
|
||||||
_ = dec.Be64int64()
|
|
||||||
csr.lastValue = dec.Be64Float64()
|
|
||||||
|
|
||||||
err = dec.Err()
|
err = dec.Err()
|
||||||
if err != nil && len(dec.B) > 0 {
|
if err != nil && len(dec.B) > 0 {
|
||||||
|
@ -1095,7 +1119,7 @@ func (h *Head) ChunkSnapshot() (*ChunkSnapshotStats, error) {
|
||||||
if err := os.MkdirAll(cpdirtmp, 0o777); err != nil {
|
if err := os.MkdirAll(cpdirtmp, 0o777); err != nil {
|
||||||
return stats, errors.Wrap(err, "create chunk snapshot dir")
|
return stats, errors.Wrap(err, "create chunk snapshot dir")
|
||||||
}
|
}
|
||||||
cp, err := wlog.New(nil, nil, cpdirtmp, h.wal.CompressionEnabled())
|
cp, err := wlog.New(nil, nil, cpdirtmp, h.wal.CompressionType())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return stats, errors.Wrap(err, "open chunk snapshot")
|
return stats, errors.Wrap(err, "open chunk snapshot")
|
||||||
}
|
}
|
||||||
|
@ -1392,6 +1416,8 @@ func (h *Head) loadChunkSnapshot() (int, int, map[chunks.HeadSeriesRef]*memSerie
|
||||||
series.nextAt = csr.mc.maxTime // This will create a new chunk on append.
|
series.nextAt = csr.mc.maxTime // This will create a new chunk on append.
|
||||||
series.headChunk = csr.mc
|
series.headChunk = csr.mc
|
||||||
series.lastValue = csr.lastValue
|
series.lastValue = csr.lastValue
|
||||||
|
series.lastHistogramValue = csr.lastHistogramValue
|
||||||
|
series.lastFloatHistogramValue = csr.lastFloatHistogramValue
|
||||||
|
|
||||||
app, err := series.headChunk.chunk.Appender()
|
app, err := series.headChunk.chunk.Appender()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
22
vendor/github.com/prometheus/prometheus/tsdb/index/index.go
generated
vendored
22
vendor/github.com/prometheus/prometheus/tsdb/index/index.go
generated
vendored
|
@ -175,18 +175,15 @@ func NewTOCFromByteSlice(bs ByteSlice) (*TOC, error) {
|
||||||
return nil, errors.Wrap(encoding.ErrInvalidChecksum, "read TOC")
|
return nil, errors.Wrap(encoding.ErrInvalidChecksum, "read TOC")
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := d.Err(); err != nil {
|
toc := &TOC{
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return &TOC{
|
|
||||||
Symbols: d.Be64(),
|
Symbols: d.Be64(),
|
||||||
Series: d.Be64(),
|
Series: d.Be64(),
|
||||||
LabelIndices: d.Be64(),
|
LabelIndices: d.Be64(),
|
||||||
LabelIndicesTable: d.Be64(),
|
LabelIndicesTable: d.Be64(),
|
||||||
Postings: d.Be64(),
|
Postings: d.Be64(),
|
||||||
PostingsTable: d.Be64(),
|
PostingsTable: d.Be64(),
|
||||||
}, nil
|
}
|
||||||
|
return toc, d.Err()
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewWriter returns a new Writer to the given filename. It serializes data in format version 2.
|
// NewWriter returns a new Writer to the given filename. It serializes data in format version 2.
|
||||||
|
@ -864,7 +861,10 @@ func (w *Writer) writePostingsToTmpFiles() error {
|
||||||
// using more memory than a single label name can.
|
// using more memory than a single label name can.
|
||||||
for len(names) > 0 {
|
for len(names) > 0 {
|
||||||
if w.labelNames[names[0]]+c > maxPostings {
|
if w.labelNames[names[0]]+c > maxPostings {
|
||||||
break
|
if c > 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
return fmt.Errorf("corruption detected when writing postings to index: label %q has %d uses, but maxPostings is %d", names[0], w.labelNames[names[0]], maxPostings)
|
||||||
}
|
}
|
||||||
batchNames = append(batchNames, names[0])
|
batchNames = append(batchNames, names[0])
|
||||||
c += w.labelNames[names[0]]
|
c += w.labelNames[names[0]]
|
||||||
|
@ -921,7 +921,7 @@ func (w *Writer) writePostingsToTmpFiles() error {
|
||||||
values = append(values, v)
|
values = append(values, v)
|
||||||
}
|
}
|
||||||
// Symbol numbers are in order, so the strings will also be in order.
|
// Symbol numbers are in order, so the strings will also be in order.
|
||||||
sort.Sort(uint32slice(values))
|
slices.Sort(values)
|
||||||
for _, v := range values {
|
for _, v := range values {
|
||||||
value, err := w.symbols.Lookup(v)
|
value, err := w.symbols.Lookup(v)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -1014,12 +1014,6 @@ func (w *Writer) writePostings() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type uint32slice []uint32
|
|
||||||
|
|
||||||
func (s uint32slice) Len() int { return len(s) }
|
|
||||||
func (s uint32slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
|
||||||
func (s uint32slice) Less(i, j int) bool { return s[i] < s[j] }
|
|
||||||
|
|
||||||
type labelIndexHashEntry struct {
|
type labelIndexHashEntry struct {
|
||||||
keys []string
|
keys []string
|
||||||
offset uint64
|
offset uint64
|
||||||
|
|
8
vendor/github.com/prometheus/prometheus/tsdb/index/postings.go
generated
vendored
8
vendor/github.com/prometheus/prometheus/tsdb/index/postings.go
generated
vendored
|
@ -107,11 +107,11 @@ func (p *MemPostings) SortedKeys() []labels.Label {
|
||||||
}
|
}
|
||||||
p.mtx.RUnlock()
|
p.mtx.RUnlock()
|
||||||
|
|
||||||
sort.Slice(keys, func(i, j int) bool {
|
slices.SortFunc(keys, func(a, b labels.Label) bool {
|
||||||
if keys[i].Name != keys[j].Name {
|
if a.Name != b.Name {
|
||||||
return keys[i].Name < keys[j].Name
|
return a.Name < b.Name
|
||||||
}
|
}
|
||||||
return keys[i].Value < keys[j].Value
|
return a.Value < b.Value
|
||||||
})
|
})
|
||||||
return keys
|
return keys
|
||||||
}
|
}
|
||||||
|
|
7
vendor/github.com/prometheus/prometheus/tsdb/index/postingsstats.go
generated
vendored
7
vendor/github.com/prometheus/prometheus/tsdb/index/postingsstats.go
generated
vendored
|
@ -15,7 +15,8 @@ package index
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"math"
|
"math"
|
||||||
"sort"
|
|
||||||
|
"golang.org/x/exp/slices"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Stat holds values for a single cardinality statistic.
|
// Stat holds values for a single cardinality statistic.
|
||||||
|
@ -62,8 +63,8 @@ func (m *maxHeap) push(item Stat) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *maxHeap) get() []Stat {
|
func (m *maxHeap) get() []Stat {
|
||||||
sort.Slice(m.Items, func(i, j int) bool {
|
slices.SortFunc(m.Items, func(a, b Stat) bool {
|
||||||
return m.Items[i].Count > m.Items[j].Count
|
return a.Count > b.Count
|
||||||
})
|
})
|
||||||
return m.Items
|
return m.Items
|
||||||
}
|
}
|
||||||
|
|
33
vendor/github.com/prometheus/prometheus/tsdb/ooo_head_read.go
generated
vendored
33
vendor/github.com/prometheus/prometheus/tsdb/ooo_head_read.go
generated
vendored
|
@ -17,7 +17,8 @@ package tsdb
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"math"
|
"math"
|
||||||
"sort"
|
|
||||||
|
"golang.org/x/exp/slices"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/model/labels"
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
"github.com/prometheus/prometheus/storage"
|
"github.com/prometheus/prometheus/storage"
|
||||||
|
@ -130,13 +131,13 @@ func (oh *OOOHeadIndexReader) series(ref storage.SeriesRef, builder *labels.Scra
|
||||||
|
|
||||||
// Next we want to sort all the collected chunks by min time so we can find
|
// Next we want to sort all the collected chunks by min time so we can find
|
||||||
// those that overlap.
|
// those that overlap.
|
||||||
sort.Sort(metaByMinTimeAndMinRef(tmpChks))
|
slices.SortFunc(tmpChks, lessByMinTimeAndMinRef)
|
||||||
|
|
||||||
// Next we want to iterate the sorted collected chunks and only return the
|
// Next we want to iterate the sorted collected chunks and only return the
|
||||||
// chunks Meta the first chunk that overlaps with others.
|
// chunks Meta the first chunk that overlaps with others.
|
||||||
// Example chunks of a series: 5:(100, 200) 6:(500, 600) 7:(150, 250) 8:(550, 650)
|
// Example chunks of a series: 5:(100, 200) 6:(500, 600) 7:(150, 250) 8:(550, 650)
|
||||||
// In the example 5 overlaps with 7 and 6 overlaps with 8 so we only want to
|
// In the example 5 overlaps with 7 and 6 overlaps with 8 so we only want to
|
||||||
// to return chunk Metas for chunk 5 and chunk 6e
|
// return chunk Metas for chunk 5 and chunk 6e
|
||||||
*chks = append(*chks, tmpChks[0])
|
*chks = append(*chks, tmpChks[0])
|
||||||
maxTime := tmpChks[0].MaxTime // Tracks the maxTime of the previous "to be merged chunk".
|
maxTime := tmpChks[0].MaxTime // Tracks the maxTime of the previous "to be merged chunk".
|
||||||
for _, c := range tmpChks[1:] {
|
for _, c := range tmpChks[1:] {
|
||||||
|
@ -175,30 +176,20 @@ type chunkMetaAndChunkDiskMapperRef struct {
|
||||||
origMaxT int64
|
origMaxT int64
|
||||||
}
|
}
|
||||||
|
|
||||||
type byMinTimeAndMinRef []chunkMetaAndChunkDiskMapperRef
|
func refLessByMinTimeAndMinRef(a, b chunkMetaAndChunkDiskMapperRef) bool {
|
||||||
|
if a.meta.MinTime == b.meta.MinTime {
|
||||||
func (b byMinTimeAndMinRef) Len() int { return len(b) }
|
return a.meta.Ref < b.meta.Ref
|
||||||
func (b byMinTimeAndMinRef) Less(i, j int) bool {
|
|
||||||
if b[i].meta.MinTime == b[j].meta.MinTime {
|
|
||||||
return b[i].meta.Ref < b[j].meta.Ref
|
|
||||||
}
|
}
|
||||||
return b[i].meta.MinTime < b[j].meta.MinTime
|
return a.meta.MinTime < b.meta.MinTime
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b byMinTimeAndMinRef) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
|
func lessByMinTimeAndMinRef(a, b chunks.Meta) bool {
|
||||||
|
if a.MinTime == b.MinTime {
|
||||||
type metaByMinTimeAndMinRef []chunks.Meta
|
return a.Ref < b.Ref
|
||||||
|
|
||||||
func (b metaByMinTimeAndMinRef) Len() int { return len(b) }
|
|
||||||
func (b metaByMinTimeAndMinRef) Less(i, j int) bool {
|
|
||||||
if b[i].MinTime == b[j].MinTime {
|
|
||||||
return b[i].Ref < b[j].Ref
|
|
||||||
}
|
}
|
||||||
return b[i].MinTime < b[j].MinTime
|
return a.MinTime < b.MinTime
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b metaByMinTimeAndMinRef) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
|
|
||||||
|
|
||||||
func (oh *OOOHeadIndexReader) Postings(name string, values ...string) (index.Postings, error) {
|
func (oh *OOOHeadIndexReader) Postings(name string, values ...string) (index.Postings, error) {
|
||||||
switch len(values) {
|
switch len(values) {
|
||||||
case 0:
|
case 0:
|
||||||
|
|
20
vendor/github.com/prometheus/prometheus/tsdb/querier.go
generated
vendored
20
vendor/github.com/prometheus/prometheus/tsdb/querier.go
generated
vendored
|
@ -414,6 +414,26 @@ func labelValuesWithMatchers(r IndexReader, name string, matchers ...*labels.Mat
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "fetching values of label %s", name)
|
return nil, errors.Wrapf(err, "fetching values of label %s", name)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If we have a matcher for the label name, we can filter out values that don't match
|
||||||
|
// before we fetch postings. This is especially useful for labels with many values.
|
||||||
|
// e.g. __name__ with a selector like {__name__="xyz"}
|
||||||
|
for _, m := range matchers {
|
||||||
|
if m.Name != name {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// re-use the allValues slice to avoid allocations
|
||||||
|
// this is safe because the iteration is always ahead of the append
|
||||||
|
filteredValues := allValues[:0]
|
||||||
|
for _, v := range allValues {
|
||||||
|
if m.Matches(v) {
|
||||||
|
filteredValues = append(filteredValues, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
allValues = filteredValues
|
||||||
|
}
|
||||||
|
|
||||||
valuesPostings := make([]index.Postings, len(allValues))
|
valuesPostings := make([]index.Postings, len(allValues))
|
||||||
for i, value := range allValues {
|
for i, value := range allValues {
|
||||||
valuesPostings[i], err = r.Postings(name, value)
|
valuesPostings[i], err = r.Postings(name, value)
|
||||||
|
|
310
vendor/github.com/prometheus/prometheus/tsdb/record/record.go
generated
vendored
310
vendor/github.com/prometheus/prometheus/tsdb/record/record.go
generated
vendored
|
@ -441,49 +441,7 @@ func (d *Decoder) HistogramSamples(rec []byte, histograms []RefHistogramSample)
|
||||||
H: &histogram.Histogram{},
|
H: &histogram.Histogram{},
|
||||||
}
|
}
|
||||||
|
|
||||||
rh.H.CounterResetHint = histogram.CounterResetHint(dec.Byte())
|
DecodeHistogram(&dec, rh.H)
|
||||||
|
|
||||||
rh.H.Schema = int32(dec.Varint64())
|
|
||||||
rh.H.ZeroThreshold = math.Float64frombits(dec.Be64())
|
|
||||||
|
|
||||||
rh.H.ZeroCount = dec.Uvarint64()
|
|
||||||
rh.H.Count = dec.Uvarint64()
|
|
||||||
rh.H.Sum = math.Float64frombits(dec.Be64())
|
|
||||||
|
|
||||||
l := dec.Uvarint()
|
|
||||||
if l > 0 {
|
|
||||||
rh.H.PositiveSpans = make([]histogram.Span, l)
|
|
||||||
}
|
|
||||||
for i := range rh.H.PositiveSpans {
|
|
||||||
rh.H.PositiveSpans[i].Offset = int32(dec.Varint64())
|
|
||||||
rh.H.PositiveSpans[i].Length = dec.Uvarint32()
|
|
||||||
}
|
|
||||||
|
|
||||||
l = dec.Uvarint()
|
|
||||||
if l > 0 {
|
|
||||||
rh.H.NegativeSpans = make([]histogram.Span, l)
|
|
||||||
}
|
|
||||||
for i := range rh.H.NegativeSpans {
|
|
||||||
rh.H.NegativeSpans[i].Offset = int32(dec.Varint64())
|
|
||||||
rh.H.NegativeSpans[i].Length = dec.Uvarint32()
|
|
||||||
}
|
|
||||||
|
|
||||||
l = dec.Uvarint()
|
|
||||||
if l > 0 {
|
|
||||||
rh.H.PositiveBuckets = make([]int64, l)
|
|
||||||
}
|
|
||||||
for i := range rh.H.PositiveBuckets {
|
|
||||||
rh.H.PositiveBuckets[i] = dec.Varint64()
|
|
||||||
}
|
|
||||||
|
|
||||||
l = dec.Uvarint()
|
|
||||||
if l > 0 {
|
|
||||||
rh.H.NegativeBuckets = make([]int64, l)
|
|
||||||
}
|
|
||||||
for i := range rh.H.NegativeBuckets {
|
|
||||||
rh.H.NegativeBuckets[i] = dec.Varint64()
|
|
||||||
}
|
|
||||||
|
|
||||||
histograms = append(histograms, rh)
|
histograms = append(histograms, rh)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -496,6 +454,52 @@ func (d *Decoder) HistogramSamples(rec []byte, histograms []RefHistogramSample)
|
||||||
return histograms, nil
|
return histograms, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DecodeHistogram decodes a Histogram from a byte slice.
|
||||||
|
func DecodeHistogram(buf *encoding.Decbuf, h *histogram.Histogram) {
|
||||||
|
h.CounterResetHint = histogram.CounterResetHint(buf.Byte())
|
||||||
|
|
||||||
|
h.Schema = int32(buf.Varint64())
|
||||||
|
h.ZeroThreshold = math.Float64frombits(buf.Be64())
|
||||||
|
|
||||||
|
h.ZeroCount = buf.Uvarint64()
|
||||||
|
h.Count = buf.Uvarint64()
|
||||||
|
h.Sum = math.Float64frombits(buf.Be64())
|
||||||
|
|
||||||
|
l := buf.Uvarint()
|
||||||
|
if l > 0 {
|
||||||
|
h.PositiveSpans = make([]histogram.Span, l)
|
||||||
|
}
|
||||||
|
for i := range h.PositiveSpans {
|
||||||
|
h.PositiveSpans[i].Offset = int32(buf.Varint64())
|
||||||
|
h.PositiveSpans[i].Length = buf.Uvarint32()
|
||||||
|
}
|
||||||
|
|
||||||
|
l = buf.Uvarint()
|
||||||
|
if l > 0 {
|
||||||
|
h.NegativeSpans = make([]histogram.Span, l)
|
||||||
|
}
|
||||||
|
for i := range h.NegativeSpans {
|
||||||
|
h.NegativeSpans[i].Offset = int32(buf.Varint64())
|
||||||
|
h.NegativeSpans[i].Length = buf.Uvarint32()
|
||||||
|
}
|
||||||
|
|
||||||
|
l = buf.Uvarint()
|
||||||
|
if l > 0 {
|
||||||
|
h.PositiveBuckets = make([]int64, l)
|
||||||
|
}
|
||||||
|
for i := range h.PositiveBuckets {
|
||||||
|
h.PositiveBuckets[i] = buf.Varint64()
|
||||||
|
}
|
||||||
|
|
||||||
|
l = buf.Uvarint()
|
||||||
|
if l > 0 {
|
||||||
|
h.NegativeBuckets = make([]int64, l)
|
||||||
|
}
|
||||||
|
for i := range h.NegativeBuckets {
|
||||||
|
h.NegativeBuckets[i] = buf.Varint64()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (d *Decoder) FloatHistogramSamples(rec []byte, histograms []RefFloatHistogramSample) ([]RefFloatHistogramSample, error) {
|
func (d *Decoder) FloatHistogramSamples(rec []byte, histograms []RefFloatHistogramSample) ([]RefFloatHistogramSample, error) {
|
||||||
dec := encoding.Decbuf{B: rec}
|
dec := encoding.Decbuf{B: rec}
|
||||||
t := Type(dec.Byte())
|
t := Type(dec.Byte())
|
||||||
|
@ -519,49 +523,7 @@ func (d *Decoder) FloatHistogramSamples(rec []byte, histograms []RefFloatHistogr
|
||||||
FH: &histogram.FloatHistogram{},
|
FH: &histogram.FloatHistogram{},
|
||||||
}
|
}
|
||||||
|
|
||||||
rh.FH.CounterResetHint = histogram.CounterResetHint(dec.Byte())
|
DecodeFloatHistogram(&dec, rh.FH)
|
||||||
|
|
||||||
rh.FH.Schema = int32(dec.Varint64())
|
|
||||||
rh.FH.ZeroThreshold = dec.Be64Float64()
|
|
||||||
|
|
||||||
rh.FH.ZeroCount = dec.Be64Float64()
|
|
||||||
rh.FH.Count = dec.Be64Float64()
|
|
||||||
rh.FH.Sum = dec.Be64Float64()
|
|
||||||
|
|
||||||
l := dec.Uvarint()
|
|
||||||
if l > 0 {
|
|
||||||
rh.FH.PositiveSpans = make([]histogram.Span, l)
|
|
||||||
}
|
|
||||||
for i := range rh.FH.PositiveSpans {
|
|
||||||
rh.FH.PositiveSpans[i].Offset = int32(dec.Varint64())
|
|
||||||
rh.FH.PositiveSpans[i].Length = dec.Uvarint32()
|
|
||||||
}
|
|
||||||
|
|
||||||
l = dec.Uvarint()
|
|
||||||
if l > 0 {
|
|
||||||
rh.FH.NegativeSpans = make([]histogram.Span, l)
|
|
||||||
}
|
|
||||||
for i := range rh.FH.NegativeSpans {
|
|
||||||
rh.FH.NegativeSpans[i].Offset = int32(dec.Varint64())
|
|
||||||
rh.FH.NegativeSpans[i].Length = dec.Uvarint32()
|
|
||||||
}
|
|
||||||
|
|
||||||
l = dec.Uvarint()
|
|
||||||
if l > 0 {
|
|
||||||
rh.FH.PositiveBuckets = make([]float64, l)
|
|
||||||
}
|
|
||||||
for i := range rh.FH.PositiveBuckets {
|
|
||||||
rh.FH.PositiveBuckets[i] = dec.Be64Float64()
|
|
||||||
}
|
|
||||||
|
|
||||||
l = dec.Uvarint()
|
|
||||||
if l > 0 {
|
|
||||||
rh.FH.NegativeBuckets = make([]float64, l)
|
|
||||||
}
|
|
||||||
for i := range rh.FH.NegativeBuckets {
|
|
||||||
rh.FH.NegativeBuckets[i] = dec.Be64Float64()
|
|
||||||
}
|
|
||||||
|
|
||||||
histograms = append(histograms, rh)
|
histograms = append(histograms, rh)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -574,6 +536,52 @@ func (d *Decoder) FloatHistogramSamples(rec []byte, histograms []RefFloatHistogr
|
||||||
return histograms, nil
|
return histograms, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Decode decodes a Histogram from a byte slice.
|
||||||
|
func DecodeFloatHistogram(buf *encoding.Decbuf, fh *histogram.FloatHistogram) {
|
||||||
|
fh.CounterResetHint = histogram.CounterResetHint(buf.Byte())
|
||||||
|
|
||||||
|
fh.Schema = int32(buf.Varint64())
|
||||||
|
fh.ZeroThreshold = buf.Be64Float64()
|
||||||
|
|
||||||
|
fh.ZeroCount = buf.Be64Float64()
|
||||||
|
fh.Count = buf.Be64Float64()
|
||||||
|
fh.Sum = buf.Be64Float64()
|
||||||
|
|
||||||
|
l := buf.Uvarint()
|
||||||
|
if l > 0 {
|
||||||
|
fh.PositiveSpans = make([]histogram.Span, l)
|
||||||
|
}
|
||||||
|
for i := range fh.PositiveSpans {
|
||||||
|
fh.PositiveSpans[i].Offset = int32(buf.Varint64())
|
||||||
|
fh.PositiveSpans[i].Length = buf.Uvarint32()
|
||||||
|
}
|
||||||
|
|
||||||
|
l = buf.Uvarint()
|
||||||
|
if l > 0 {
|
||||||
|
fh.NegativeSpans = make([]histogram.Span, l)
|
||||||
|
}
|
||||||
|
for i := range fh.NegativeSpans {
|
||||||
|
fh.NegativeSpans[i].Offset = int32(buf.Varint64())
|
||||||
|
fh.NegativeSpans[i].Length = buf.Uvarint32()
|
||||||
|
}
|
||||||
|
|
||||||
|
l = buf.Uvarint()
|
||||||
|
if l > 0 {
|
||||||
|
fh.PositiveBuckets = make([]float64, l)
|
||||||
|
}
|
||||||
|
for i := range fh.PositiveBuckets {
|
||||||
|
fh.PositiveBuckets[i] = buf.Be64Float64()
|
||||||
|
}
|
||||||
|
|
||||||
|
l = buf.Uvarint()
|
||||||
|
if l > 0 {
|
||||||
|
fh.NegativeBuckets = make([]float64, l)
|
||||||
|
}
|
||||||
|
for i := range fh.NegativeBuckets {
|
||||||
|
fh.NegativeBuckets[i] = buf.Be64Float64()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Encoder encodes series, sample, and tombstones records.
|
// Encoder encodes series, sample, and tombstones records.
|
||||||
// The zero value is ready to use.
|
// The zero value is ready to use.
|
||||||
type Encoder struct{}
|
type Encoder struct{}
|
||||||
|
@ -719,41 +727,46 @@ func (e *Encoder) HistogramSamples(histograms []RefHistogramSample, b []byte) []
|
||||||
buf.PutVarint64(int64(h.Ref) - int64(first.Ref))
|
buf.PutVarint64(int64(h.Ref) - int64(first.Ref))
|
||||||
buf.PutVarint64(h.T - first.T)
|
buf.PutVarint64(h.T - first.T)
|
||||||
|
|
||||||
buf.PutByte(byte(h.H.CounterResetHint))
|
EncodeHistogram(&buf, h.H)
|
||||||
|
|
||||||
buf.PutVarint64(int64(h.H.Schema))
|
|
||||||
buf.PutBE64(math.Float64bits(h.H.ZeroThreshold))
|
|
||||||
|
|
||||||
buf.PutUvarint64(h.H.ZeroCount)
|
|
||||||
buf.PutUvarint64(h.H.Count)
|
|
||||||
buf.PutBE64(math.Float64bits(h.H.Sum))
|
|
||||||
|
|
||||||
buf.PutUvarint(len(h.H.PositiveSpans))
|
|
||||||
for _, s := range h.H.PositiveSpans {
|
|
||||||
buf.PutVarint64(int64(s.Offset))
|
|
||||||
buf.PutUvarint32(s.Length)
|
|
||||||
}
|
|
||||||
|
|
||||||
buf.PutUvarint(len(h.H.NegativeSpans))
|
|
||||||
for _, s := range h.H.NegativeSpans {
|
|
||||||
buf.PutVarint64(int64(s.Offset))
|
|
||||||
buf.PutUvarint32(s.Length)
|
|
||||||
}
|
|
||||||
|
|
||||||
buf.PutUvarint(len(h.H.PositiveBuckets))
|
|
||||||
for _, b := range h.H.PositiveBuckets {
|
|
||||||
buf.PutVarint64(b)
|
|
||||||
}
|
|
||||||
|
|
||||||
buf.PutUvarint(len(h.H.NegativeBuckets))
|
|
||||||
for _, b := range h.H.NegativeBuckets {
|
|
||||||
buf.PutVarint64(b)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return buf.Get()
|
return buf.Get()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// EncodeHistogram encodes a Histogram into a byte slice.
|
||||||
|
func EncodeHistogram(buf *encoding.Encbuf, h *histogram.Histogram) {
|
||||||
|
buf.PutByte(byte(h.CounterResetHint))
|
||||||
|
|
||||||
|
buf.PutVarint64(int64(h.Schema))
|
||||||
|
buf.PutBE64(math.Float64bits(h.ZeroThreshold))
|
||||||
|
|
||||||
|
buf.PutUvarint64(h.ZeroCount)
|
||||||
|
buf.PutUvarint64(h.Count)
|
||||||
|
buf.PutBE64(math.Float64bits(h.Sum))
|
||||||
|
|
||||||
|
buf.PutUvarint(len(h.PositiveSpans))
|
||||||
|
for _, s := range h.PositiveSpans {
|
||||||
|
buf.PutVarint64(int64(s.Offset))
|
||||||
|
buf.PutUvarint32(s.Length)
|
||||||
|
}
|
||||||
|
|
||||||
|
buf.PutUvarint(len(h.NegativeSpans))
|
||||||
|
for _, s := range h.NegativeSpans {
|
||||||
|
buf.PutVarint64(int64(s.Offset))
|
||||||
|
buf.PutUvarint32(s.Length)
|
||||||
|
}
|
||||||
|
|
||||||
|
buf.PutUvarint(len(h.PositiveBuckets))
|
||||||
|
for _, b := range h.PositiveBuckets {
|
||||||
|
buf.PutVarint64(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
buf.PutUvarint(len(h.NegativeBuckets))
|
||||||
|
for _, b := range h.NegativeBuckets {
|
||||||
|
buf.PutVarint64(b)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (e *Encoder) FloatHistogramSamples(histograms []RefFloatHistogramSample, b []byte) []byte {
|
func (e *Encoder) FloatHistogramSamples(histograms []RefFloatHistogramSample, b []byte) []byte {
|
||||||
buf := encoding.Encbuf{B: b}
|
buf := encoding.Encbuf{B: b}
|
||||||
buf.PutByte(byte(FloatHistogramSamples))
|
buf.PutByte(byte(FloatHistogramSamples))
|
||||||
|
@ -772,37 +785,42 @@ func (e *Encoder) FloatHistogramSamples(histograms []RefFloatHistogramSample, b
|
||||||
buf.PutVarint64(int64(h.Ref) - int64(first.Ref))
|
buf.PutVarint64(int64(h.Ref) - int64(first.Ref))
|
||||||
buf.PutVarint64(h.T - first.T)
|
buf.PutVarint64(h.T - first.T)
|
||||||
|
|
||||||
buf.PutByte(byte(h.FH.CounterResetHint))
|
EncodeFloatHistogram(&buf, h.FH)
|
||||||
|
|
||||||
buf.PutVarint64(int64(h.FH.Schema))
|
|
||||||
buf.PutBEFloat64(h.FH.ZeroThreshold)
|
|
||||||
|
|
||||||
buf.PutBEFloat64(h.FH.ZeroCount)
|
|
||||||
buf.PutBEFloat64(h.FH.Count)
|
|
||||||
buf.PutBEFloat64(h.FH.Sum)
|
|
||||||
|
|
||||||
buf.PutUvarint(len(h.FH.PositiveSpans))
|
|
||||||
for _, s := range h.FH.PositiveSpans {
|
|
||||||
buf.PutVarint64(int64(s.Offset))
|
|
||||||
buf.PutUvarint32(s.Length)
|
|
||||||
}
|
|
||||||
|
|
||||||
buf.PutUvarint(len(h.FH.NegativeSpans))
|
|
||||||
for _, s := range h.FH.NegativeSpans {
|
|
||||||
buf.PutVarint64(int64(s.Offset))
|
|
||||||
buf.PutUvarint32(s.Length)
|
|
||||||
}
|
|
||||||
|
|
||||||
buf.PutUvarint(len(h.FH.PositiveBuckets))
|
|
||||||
for _, b := range h.FH.PositiveBuckets {
|
|
||||||
buf.PutBEFloat64(b)
|
|
||||||
}
|
|
||||||
|
|
||||||
buf.PutUvarint(len(h.FH.NegativeBuckets))
|
|
||||||
for _, b := range h.FH.NegativeBuckets {
|
|
||||||
buf.PutBEFloat64(b)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return buf.Get()
|
return buf.Get()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Encode encodes the Float Histogram into a byte slice.
|
||||||
|
func EncodeFloatHistogram(buf *encoding.Encbuf, h *histogram.FloatHistogram) {
|
||||||
|
buf.PutByte(byte(h.CounterResetHint))
|
||||||
|
|
||||||
|
buf.PutVarint64(int64(h.Schema))
|
||||||
|
buf.PutBEFloat64(h.ZeroThreshold)
|
||||||
|
|
||||||
|
buf.PutBEFloat64(h.ZeroCount)
|
||||||
|
buf.PutBEFloat64(h.Count)
|
||||||
|
buf.PutBEFloat64(h.Sum)
|
||||||
|
|
||||||
|
buf.PutUvarint(len(h.PositiveSpans))
|
||||||
|
for _, s := range h.PositiveSpans {
|
||||||
|
buf.PutVarint64(int64(s.Offset))
|
||||||
|
buf.PutUvarint32(s.Length)
|
||||||
|
}
|
||||||
|
|
||||||
|
buf.PutUvarint(len(h.NegativeSpans))
|
||||||
|
for _, s := range h.NegativeSpans {
|
||||||
|
buf.PutVarint64(int64(s.Offset))
|
||||||
|
buf.PutUvarint32(s.Length)
|
||||||
|
}
|
||||||
|
|
||||||
|
buf.PutUvarint(len(h.PositiveBuckets))
|
||||||
|
for _, b := range h.PositiveBuckets {
|
||||||
|
buf.PutBEFloat64(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
buf.PutUvarint(len(h.NegativeBuckets))
|
||||||
|
for _, b := range h.NegativeBuckets {
|
||||||
|
buf.PutBEFloat64(b)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
2
vendor/github.com/prometheus/prometheus/tsdb/wal.go
generated
vendored
2
vendor/github.com/prometheus/prometheus/tsdb/wal.go
generated
vendored
|
@ -1226,7 +1226,7 @@ func MigrateWAL(logger log.Logger, dir string) (err error) {
|
||||||
if err := os.RemoveAll(tmpdir); err != nil {
|
if err := os.RemoveAll(tmpdir); err != nil {
|
||||||
return errors.Wrap(err, "cleanup replacement dir")
|
return errors.Wrap(err, "cleanup replacement dir")
|
||||||
}
|
}
|
||||||
repl, err := wlog.New(logger, nil, tmpdir, false)
|
repl, err := wlog.New(logger, nil, tmpdir, wlog.CompressionNone)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "open new WAL")
|
return errors.Wrap(err, "open new WAL")
|
||||||
}
|
}
|
||||||
|
|
8
vendor/github.com/prometheus/prometheus/tsdb/wlog/checkpoint.go
generated
vendored
8
vendor/github.com/prometheus/prometheus/tsdb/wlog/checkpoint.go
generated
vendored
|
@ -20,13 +20,13 @@ import (
|
||||||
"math"
|
"math"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"sort"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
"github.com/go-kit/log"
|
||||||
"github.com/go-kit/log/level"
|
"github.com/go-kit/log/level"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"golang.org/x/exp/slices"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/tsdb/chunks"
|
"github.com/prometheus/prometheus/tsdb/chunks"
|
||||||
tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
|
tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
|
||||||
|
@ -134,7 +134,7 @@ func Checkpoint(logger log.Logger, w *WL, from, to int, keep func(id chunks.Head
|
||||||
if err := os.MkdirAll(cpdirtmp, 0o777); err != nil {
|
if err := os.MkdirAll(cpdirtmp, 0o777); err != nil {
|
||||||
return nil, errors.Wrap(err, "create checkpoint dir")
|
return nil, errors.Wrap(err, "create checkpoint dir")
|
||||||
}
|
}
|
||||||
cp, err := New(nil, nil, cpdirtmp, w.CompressionEnabled())
|
cp, err := New(nil, nil, cpdirtmp, w.CompressionType())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "open checkpoint")
|
return nil, errors.Wrap(err, "open checkpoint")
|
||||||
}
|
}
|
||||||
|
@ -374,8 +374,8 @@ func listCheckpoints(dir string) (refs []checkpointRef, err error) {
|
||||||
refs = append(refs, checkpointRef{name: fi.Name(), index: idx})
|
refs = append(refs, checkpointRef{name: fi.Name(), index: idx})
|
||||||
}
|
}
|
||||||
|
|
||||||
sort.Slice(refs, func(i, j int) bool {
|
slices.SortFunc(refs, func(a, b checkpointRef) bool {
|
||||||
return refs[i].index < refs[j].index
|
return a.index < b.index
|
||||||
})
|
})
|
||||||
|
|
||||||
return refs, nil
|
return refs, nil
|
||||||
|
|
53
vendor/github.com/prometheus/prometheus/tsdb/wlog/live_reader.go
generated
vendored
53
vendor/github.com/prometheus/prometheus/tsdb/wlog/live_reader.go
generated
vendored
|
@ -23,6 +23,7 @@ import (
|
||||||
"github.com/go-kit/log"
|
"github.com/go-kit/log"
|
||||||
"github.com/go-kit/log/level"
|
"github.com/go-kit/log/level"
|
||||||
"github.com/golang/snappy"
|
"github.com/golang/snappy"
|
||||||
|
"github.com/klauspost/compress/zstd"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
)
|
)
|
||||||
|
@ -51,10 +52,14 @@ func NewLiveReaderMetrics(reg prometheus.Registerer) *LiveReaderMetrics {
|
||||||
|
|
||||||
// NewLiveReader returns a new live reader.
|
// NewLiveReader returns a new live reader.
|
||||||
func NewLiveReader(logger log.Logger, metrics *LiveReaderMetrics, r io.Reader) *LiveReader {
|
func NewLiveReader(logger log.Logger, metrics *LiveReaderMetrics, r io.Reader) *LiveReader {
|
||||||
|
// Calling zstd.NewReader with a nil io.Reader and no options cannot return an error.
|
||||||
|
zstdReader, _ := zstd.NewReader(nil)
|
||||||
|
|
||||||
lr := &LiveReader{
|
lr := &LiveReader{
|
||||||
logger: logger,
|
logger: logger,
|
||||||
rdr: r,
|
rdr: r,
|
||||||
metrics: metrics,
|
zstdReader: zstdReader,
|
||||||
|
metrics: metrics,
|
||||||
|
|
||||||
// Until we understand how they come about, make readers permissive
|
// Until we understand how they come about, make readers permissive
|
||||||
// to records spanning pages.
|
// to records spanning pages.
|
||||||
|
@ -68,17 +73,18 @@ func NewLiveReader(logger log.Logger, metrics *LiveReaderMetrics, r io.Reader) *
|
||||||
// that are still in the process of being written, and returns records as soon
|
// that are still in the process of being written, and returns records as soon
|
||||||
// as they can be read.
|
// as they can be read.
|
||||||
type LiveReader struct {
|
type LiveReader struct {
|
||||||
logger log.Logger
|
logger log.Logger
|
||||||
rdr io.Reader
|
rdr io.Reader
|
||||||
err error
|
err error
|
||||||
rec []byte
|
rec []byte
|
||||||
snappyBuf []byte
|
compressBuf []byte
|
||||||
hdr [recordHeaderSize]byte
|
zstdReader *zstd.Decoder
|
||||||
buf [pageSize]byte
|
hdr [recordHeaderSize]byte
|
||||||
readIndex int // Index in buf to start at for next read.
|
buf [pageSize]byte
|
||||||
writeIndex int // Index in buf to start at for next write.
|
readIndex int // Index in buf to start at for next read.
|
||||||
total int64 // Total bytes processed during reading in calls to Next().
|
writeIndex int // Index in buf to start at for next write.
|
||||||
index int // Used to track partial records, should be 0 at the start of every new record.
|
total int64 // Total bytes processed during reading in calls to Next().
|
||||||
|
index int // Used to track partial records, should be 0 at the start of every new record.
|
||||||
|
|
||||||
// For testing, we can treat EOF as a non-error.
|
// For testing, we can treat EOF as a non-error.
|
||||||
eofNonErr bool
|
eofNonErr bool
|
||||||
|
@ -191,12 +197,14 @@ func (r *LiveReader) buildRecord() (bool, error) {
|
||||||
rt := recTypeFromHeader(r.hdr[0])
|
rt := recTypeFromHeader(r.hdr[0])
|
||||||
if rt == recFirst || rt == recFull {
|
if rt == recFirst || rt == recFull {
|
||||||
r.rec = r.rec[:0]
|
r.rec = r.rec[:0]
|
||||||
r.snappyBuf = r.snappyBuf[:0]
|
r.compressBuf = r.compressBuf[:0]
|
||||||
}
|
}
|
||||||
|
|
||||||
compressed := r.hdr[0]&snappyMask != 0
|
isSnappyCompressed := r.hdr[0]&snappyMask == snappyMask
|
||||||
if compressed {
|
isZstdCompressed := r.hdr[0]&zstdMask == zstdMask
|
||||||
r.snappyBuf = append(r.snappyBuf, temp...)
|
|
||||||
|
if isSnappyCompressed || isZstdCompressed {
|
||||||
|
r.compressBuf = append(r.compressBuf, temp...)
|
||||||
} else {
|
} else {
|
||||||
r.rec = append(r.rec, temp...)
|
r.rec = append(r.rec, temp...)
|
||||||
}
|
}
|
||||||
|
@ -207,12 +215,17 @@ func (r *LiveReader) buildRecord() (bool, error) {
|
||||||
}
|
}
|
||||||
if rt == recLast || rt == recFull {
|
if rt == recLast || rt == recFull {
|
||||||
r.index = 0
|
r.index = 0
|
||||||
if compressed && len(r.snappyBuf) > 0 {
|
if isSnappyCompressed && len(r.compressBuf) > 0 {
|
||||||
// The snappy library uses `len` to calculate if we need a new buffer.
|
// The snappy library uses `len` to calculate if we need a new buffer.
|
||||||
// In order to allocate as few buffers as possible make the length
|
// In order to allocate as few buffers as possible make the length
|
||||||
// equal to the capacity.
|
// equal to the capacity.
|
||||||
r.rec = r.rec[:cap(r.rec)]
|
r.rec = r.rec[:cap(r.rec)]
|
||||||
r.rec, err = snappy.Decode(r.rec, r.snappyBuf)
|
r.rec, err = snappy.Decode(r.rec, r.compressBuf)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
} else if isZstdCompressed && len(r.compressBuf) > 0 {
|
||||||
|
r.rec, err = r.zstdReader.DecodeAll(r.compressBuf, r.rec[:0])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
36
vendor/github.com/prometheus/prometheus/tsdb/wlog/reader.go
generated
vendored
36
vendor/github.com/prometheus/prometheus/tsdb/wlog/reader.go
generated
vendored
|
@ -20,23 +20,27 @@ import (
|
||||||
"io"
|
"io"
|
||||||
|
|
||||||
"github.com/golang/snappy"
|
"github.com/golang/snappy"
|
||||||
|
"github.com/klauspost/compress/zstd"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Reader reads WAL records from an io.Reader.
|
// Reader reads WAL records from an io.Reader.
|
||||||
type Reader struct {
|
type Reader struct {
|
||||||
rdr io.Reader
|
rdr io.Reader
|
||||||
err error
|
err error
|
||||||
rec []byte
|
rec []byte
|
||||||
snappyBuf []byte
|
compressBuf []byte
|
||||||
buf [pageSize]byte
|
zstdReader *zstd.Decoder
|
||||||
total int64 // Total bytes processed.
|
buf [pageSize]byte
|
||||||
curRecTyp recType // Used for checking that the last record is not torn.
|
total int64 // Total bytes processed.
|
||||||
|
curRecTyp recType // Used for checking that the last record is not torn.
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewReader returns a new reader.
|
// NewReader returns a new reader.
|
||||||
func NewReader(r io.Reader) *Reader {
|
func NewReader(r io.Reader) *Reader {
|
||||||
return &Reader{rdr: r}
|
// Calling zstd.NewReader with a nil io.Reader and no options cannot return an error.
|
||||||
|
zstdReader, _ := zstd.NewReader(nil)
|
||||||
|
return &Reader{rdr: r, zstdReader: zstdReader}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Next advances the reader to the next records and returns true if it exists.
|
// Next advances the reader to the next records and returns true if it exists.
|
||||||
|
@ -63,7 +67,7 @@ func (r *Reader) next() (err error) {
|
||||||
buf := r.buf[recordHeaderSize:]
|
buf := r.buf[recordHeaderSize:]
|
||||||
|
|
||||||
r.rec = r.rec[:0]
|
r.rec = r.rec[:0]
|
||||||
r.snappyBuf = r.snappyBuf[:0]
|
r.compressBuf = r.compressBuf[:0]
|
||||||
|
|
||||||
i := 0
|
i := 0
|
||||||
for {
|
for {
|
||||||
|
@ -72,7 +76,8 @@ func (r *Reader) next() (err error) {
|
||||||
}
|
}
|
||||||
r.total++
|
r.total++
|
||||||
r.curRecTyp = recTypeFromHeader(hdr[0])
|
r.curRecTyp = recTypeFromHeader(hdr[0])
|
||||||
compressed := hdr[0]&snappyMask != 0
|
isSnappyCompressed := hdr[0]&snappyMask == snappyMask
|
||||||
|
isZstdCompressed := hdr[0]&zstdMask == zstdMask
|
||||||
|
|
||||||
// Gobble up zero bytes.
|
// Gobble up zero bytes.
|
||||||
if r.curRecTyp == recPageTerm {
|
if r.curRecTyp == recPageTerm {
|
||||||
|
@ -128,8 +133,8 @@ func (r *Reader) next() (err error) {
|
||||||
return errors.Errorf("unexpected checksum %x, expected %x", c, crc)
|
return errors.Errorf("unexpected checksum %x, expected %x", c, crc)
|
||||||
}
|
}
|
||||||
|
|
||||||
if compressed {
|
if isSnappyCompressed || isZstdCompressed {
|
||||||
r.snappyBuf = append(r.snappyBuf, buf[:length]...)
|
r.compressBuf = append(r.compressBuf, buf[:length]...)
|
||||||
} else {
|
} else {
|
||||||
r.rec = append(r.rec, buf[:length]...)
|
r.rec = append(r.rec, buf[:length]...)
|
||||||
}
|
}
|
||||||
|
@ -138,12 +143,15 @@ func (r *Reader) next() (err error) {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if r.curRecTyp == recLast || r.curRecTyp == recFull {
|
if r.curRecTyp == recLast || r.curRecTyp == recFull {
|
||||||
if compressed && len(r.snappyBuf) > 0 {
|
if isSnappyCompressed && len(r.compressBuf) > 0 {
|
||||||
// The snappy library uses `len` to calculate if we need a new buffer.
|
// The snappy library uses `len` to calculate if we need a new buffer.
|
||||||
// In order to allocate as few buffers as possible make the length
|
// In order to allocate as few buffers as possible make the length
|
||||||
// equal to the capacity.
|
// equal to the capacity.
|
||||||
r.rec = r.rec[:cap(r.rec)]
|
r.rec = r.rec[:cap(r.rec)]
|
||||||
r.rec, err = snappy.Decode(r.rec, r.snappyBuf)
|
r.rec, err = snappy.Decode(r.rec, r.compressBuf)
|
||||||
|
return err
|
||||||
|
} else if isZstdCompressed && len(r.compressBuf) > 0 {
|
||||||
|
r.rec, err = r.zstdReader.DecodeAll(r.compressBuf, r.rec[:0])
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
|
96
vendor/github.com/prometheus/prometheus/tsdb/wlog/wlog.go
generated
vendored
96
vendor/github.com/prometheus/prometheus/tsdb/wlog/wlog.go
generated
vendored
|
@ -22,7 +22,6 @@ import (
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"sort"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
@ -30,8 +29,10 @@ import (
|
||||||
"github.com/go-kit/log"
|
"github.com/go-kit/log"
|
||||||
"github.com/go-kit/log/level"
|
"github.com/go-kit/log/level"
|
||||||
"github.com/golang/snappy"
|
"github.com/golang/snappy"
|
||||||
|
"github.com/klauspost/compress/zstd"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
"golang.org/x/exp/slices"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/tsdb/fileutil"
|
"github.com/prometheus/prometheus/tsdb/fileutil"
|
||||||
)
|
)
|
||||||
|
@ -164,6 +165,26 @@ func OpenReadSegment(fn string) (*Segment, error) {
|
||||||
return &Segment{SegmentFile: f, i: k, dir: filepath.Dir(fn)}, nil
|
return &Segment{SegmentFile: f, i: k, dir: filepath.Dir(fn)}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type CompressionType string
|
||||||
|
|
||||||
|
const (
|
||||||
|
CompressionNone CompressionType = "none"
|
||||||
|
CompressionSnappy CompressionType = "snappy"
|
||||||
|
CompressionZstd CompressionType = "zstd"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ParseCompressionType parses the two compression-related configuration values and returns the CompressionType. If
|
||||||
|
// compression is enabled but the compressType is unrecognized, we default to Snappy compression.
|
||||||
|
func ParseCompressionType(compress bool, compressType string) CompressionType {
|
||||||
|
if compress {
|
||||||
|
if compressType == "zstd" {
|
||||||
|
return CompressionZstd
|
||||||
|
}
|
||||||
|
return CompressionSnappy
|
||||||
|
}
|
||||||
|
return CompressionNone
|
||||||
|
}
|
||||||
|
|
||||||
// WL is a write log that stores records in segment files.
|
// WL is a write log that stores records in segment files.
|
||||||
// It must be read from start to end once before logging new data.
|
// It must be read from start to end once before logging new data.
|
||||||
// If an error occurs during read, the repair procedure must be called
|
// If an error occurs during read, the repair procedure must be called
|
||||||
|
@ -185,8 +206,9 @@ type WL struct {
|
||||||
stopc chan chan struct{}
|
stopc chan chan struct{}
|
||||||
actorc chan func()
|
actorc chan func()
|
||||||
closed bool // To allow calling Close() more than once without blocking.
|
closed bool // To allow calling Close() more than once without blocking.
|
||||||
compress bool
|
compress CompressionType
|
||||||
snappyBuf []byte
|
compressBuf []byte
|
||||||
|
zstdWriter *zstd.Encoder
|
||||||
|
|
||||||
WriteNotified WriteNotified
|
WriteNotified WriteNotified
|
||||||
|
|
||||||
|
@ -265,13 +287,13 @@ func newWLMetrics(w *WL, r prometheus.Registerer) *wlMetrics {
|
||||||
}
|
}
|
||||||
|
|
||||||
// New returns a new WAL over the given directory.
|
// New returns a new WAL over the given directory.
|
||||||
func New(logger log.Logger, reg prometheus.Registerer, dir string, compress bool) (*WL, error) {
|
func New(logger log.Logger, reg prometheus.Registerer, dir string, compress CompressionType) (*WL, error) {
|
||||||
return NewSize(logger, reg, dir, DefaultSegmentSize, compress)
|
return NewSize(logger, reg, dir, DefaultSegmentSize, compress)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewSize returns a new write log over the given directory.
|
// NewSize returns a new write log over the given directory.
|
||||||
// New segments are created with the specified size.
|
// New segments are created with the specified size.
|
||||||
func NewSize(logger log.Logger, reg prometheus.Registerer, dir string, segmentSize int, compress bool) (*WL, error) {
|
func NewSize(logger log.Logger, reg prometheus.Registerer, dir string, segmentSize int, compress CompressionType) (*WL, error) {
|
||||||
if segmentSize%pageSize != 0 {
|
if segmentSize%pageSize != 0 {
|
||||||
return nil, errors.New("invalid segment size")
|
return nil, errors.New("invalid segment size")
|
||||||
}
|
}
|
||||||
|
@ -281,6 +303,16 @@ func NewSize(logger log.Logger, reg prometheus.Registerer, dir string, segmentSi
|
||||||
if logger == nil {
|
if logger == nil {
|
||||||
logger = log.NewNopLogger()
|
logger = log.NewNopLogger()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var zstdWriter *zstd.Encoder
|
||||||
|
if compress == CompressionZstd {
|
||||||
|
var err error
|
||||||
|
zstdWriter, err = zstd.NewWriter(nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
w := &WL{
|
w := &WL{
|
||||||
dir: dir,
|
dir: dir,
|
||||||
logger: logger,
|
logger: logger,
|
||||||
|
@ -289,6 +321,7 @@ func NewSize(logger log.Logger, reg prometheus.Registerer, dir string, segmentSi
|
||||||
actorc: make(chan func(), 100),
|
actorc: make(chan func(), 100),
|
||||||
stopc: make(chan chan struct{}),
|
stopc: make(chan chan struct{}),
|
||||||
compress: compress,
|
compress: compress,
|
||||||
|
zstdWriter: zstdWriter,
|
||||||
}
|
}
|
||||||
prefix := "prometheus_tsdb_wal_"
|
prefix := "prometheus_tsdb_wal_"
|
||||||
if filepath.Base(dir) == WblDirName {
|
if filepath.Base(dir) == WblDirName {
|
||||||
|
@ -327,16 +360,22 @@ func Open(logger log.Logger, dir string) (*WL, error) {
|
||||||
if logger == nil {
|
if logger == nil {
|
||||||
logger = log.NewNopLogger()
|
logger = log.NewNopLogger()
|
||||||
}
|
}
|
||||||
|
zstdWriter, err := zstd.NewWriter(nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
w := &WL{
|
w := &WL{
|
||||||
dir: dir,
|
dir: dir,
|
||||||
logger: logger,
|
logger: logger,
|
||||||
|
zstdWriter: zstdWriter,
|
||||||
}
|
}
|
||||||
|
|
||||||
return w, nil
|
return w, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// CompressionEnabled returns if compression is enabled on this WAL.
|
// CompressionType returns if compression is enabled on this WAL.
|
||||||
func (w *WL) CompressionEnabled() bool {
|
func (w *WL) CompressionType() CompressionType {
|
||||||
return w.compress
|
return w.compress
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -583,9 +622,10 @@ func (w *WL) flushPage(clear bool) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// First Byte of header format:
|
// First Byte of header format:
|
||||||
// [ 4 bits unallocated] [1 bit snappy compression flag] [ 3 bit record type ]
|
// [3 bits unallocated] [1 bit zstd compression flag] [1 bit snappy compression flag] [3 bit record type ]
|
||||||
const (
|
const (
|
||||||
snappyMask = 1 << 3
|
snappyMask = 1 << 3
|
||||||
|
zstdMask = 1 << 4
|
||||||
recTypeMask = snappyMask - 1
|
recTypeMask = snappyMask - 1
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -655,17 +695,23 @@ func (w *WL) log(rec []byte, final bool) error {
|
||||||
|
|
||||||
// Compress the record before calculating if a new segment is needed.
|
// Compress the record before calculating if a new segment is needed.
|
||||||
compressed := false
|
compressed := false
|
||||||
if w.compress &&
|
if w.compress == CompressionSnappy && len(rec) > 0 {
|
||||||
len(rec) > 0 &&
|
|
||||||
// If MaxEncodedLen is less than 0 the record is too large to be compressed.
|
// If MaxEncodedLen is less than 0 the record is too large to be compressed.
|
||||||
snappy.MaxEncodedLen(len(rec)) >= 0 {
|
if len(rec) > 0 && snappy.MaxEncodedLen(len(rec)) >= 0 {
|
||||||
// The snappy library uses `len` to calculate if we need a new buffer.
|
// The snappy library uses `len` to calculate if we need a new buffer.
|
||||||
// In order to allocate as few buffers as possible make the length
|
// In order to allocate as few buffers as possible make the length
|
||||||
// equal to the capacity.
|
// equal to the capacity.
|
||||||
w.snappyBuf = w.snappyBuf[:cap(w.snappyBuf)]
|
w.compressBuf = w.compressBuf[:cap(w.compressBuf)]
|
||||||
w.snappyBuf = snappy.Encode(w.snappyBuf, rec)
|
w.compressBuf = snappy.Encode(w.compressBuf, rec)
|
||||||
if len(w.snappyBuf) < len(rec) {
|
if len(w.compressBuf) < len(rec) {
|
||||||
rec = w.snappyBuf
|
rec = w.compressBuf
|
||||||
|
compressed = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else if w.compress == CompressionZstd && len(rec) > 0 {
|
||||||
|
w.compressBuf = w.zstdWriter.EncodeAll(rec, w.compressBuf[:0])
|
||||||
|
if len(w.compressBuf) < len(rec) {
|
||||||
|
rec = w.compressBuf
|
||||||
compressed = true
|
compressed = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -706,7 +752,11 @@ func (w *WL) log(rec []byte, final bool) error {
|
||||||
typ = recMiddle
|
typ = recMiddle
|
||||||
}
|
}
|
||||||
if compressed {
|
if compressed {
|
||||||
typ |= snappyMask
|
if w.compress == CompressionSnappy {
|
||||||
|
typ |= snappyMask
|
||||||
|
} else if w.compress == CompressionZstd {
|
||||||
|
typ |= zstdMask
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
buf[0] = byte(typ)
|
buf[0] = byte(typ)
|
||||||
|
@ -859,8 +909,8 @@ func listSegments(dir string) (refs []segmentRef, err error) {
|
||||||
}
|
}
|
||||||
refs = append(refs, segmentRef{name: fn, index: k})
|
refs = append(refs, segmentRef{name: fn, index: k})
|
||||||
}
|
}
|
||||||
sort.Slice(refs, func(i, j int) bool {
|
slices.SortFunc(refs, func(a, b segmentRef) bool {
|
||||||
return refs[i].index < refs[j].index
|
return a.index < b.index
|
||||||
})
|
})
|
||||||
for i := 0; i < len(refs)-1; i++ {
|
for i := 0; i < len(refs)-1; i++ {
|
||||||
if refs[i].index+1 != refs[i+1].index {
|
if refs[i].index+1 != refs[i+1].index {
|
||||||
|
|
136
vendor/golang.org/x/sync/semaphore/semaphore.go
generated
vendored
Normal file
136
vendor/golang.org/x/sync/semaphore/semaphore.go
generated
vendored
Normal file
|
@ -0,0 +1,136 @@
|
||||||
|
// Copyright 2017 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Package semaphore provides a weighted semaphore implementation.
|
||||||
|
package semaphore // import "golang.org/x/sync/semaphore"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"container/list"
|
||||||
|
"context"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
type waiter struct {
|
||||||
|
n int64
|
||||||
|
ready chan<- struct{} // Closed when semaphore acquired.
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewWeighted creates a new weighted semaphore with the given
|
||||||
|
// maximum combined weight for concurrent access.
|
||||||
|
func NewWeighted(n int64) *Weighted {
|
||||||
|
w := &Weighted{size: n}
|
||||||
|
return w
|
||||||
|
}
|
||||||
|
|
||||||
|
// Weighted provides a way to bound concurrent access to a resource.
|
||||||
|
// The callers can request access with a given weight.
|
||||||
|
type Weighted struct {
|
||||||
|
size int64
|
||||||
|
cur int64
|
||||||
|
mu sync.Mutex
|
||||||
|
waiters list.List
|
||||||
|
}
|
||||||
|
|
||||||
|
// Acquire acquires the semaphore with a weight of n, blocking until resources
|
||||||
|
// are available or ctx is done. On success, returns nil. On failure, returns
|
||||||
|
// ctx.Err() and leaves the semaphore unchanged.
|
||||||
|
//
|
||||||
|
// If ctx is already done, Acquire may still succeed without blocking.
|
||||||
|
func (s *Weighted) Acquire(ctx context.Context, n int64) error {
|
||||||
|
s.mu.Lock()
|
||||||
|
if s.size-s.cur >= n && s.waiters.Len() == 0 {
|
||||||
|
s.cur += n
|
||||||
|
s.mu.Unlock()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if n > s.size {
|
||||||
|
// Don't make other Acquire calls block on one that's doomed to fail.
|
||||||
|
s.mu.Unlock()
|
||||||
|
<-ctx.Done()
|
||||||
|
return ctx.Err()
|
||||||
|
}
|
||||||
|
|
||||||
|
ready := make(chan struct{})
|
||||||
|
w := waiter{n: n, ready: ready}
|
||||||
|
elem := s.waiters.PushBack(w)
|
||||||
|
s.mu.Unlock()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
err := ctx.Err()
|
||||||
|
s.mu.Lock()
|
||||||
|
select {
|
||||||
|
case <-ready:
|
||||||
|
// Acquired the semaphore after we were canceled. Rather than trying to
|
||||||
|
// fix up the queue, just pretend we didn't notice the cancelation.
|
||||||
|
err = nil
|
||||||
|
default:
|
||||||
|
isFront := s.waiters.Front() == elem
|
||||||
|
s.waiters.Remove(elem)
|
||||||
|
// If we're at the front and there're extra tokens left, notify other waiters.
|
||||||
|
if isFront && s.size > s.cur {
|
||||||
|
s.notifyWaiters()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
s.mu.Unlock()
|
||||||
|
return err
|
||||||
|
|
||||||
|
case <-ready:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TryAcquire acquires the semaphore with a weight of n without blocking.
|
||||||
|
// On success, returns true. On failure, returns false and leaves the semaphore unchanged.
|
||||||
|
func (s *Weighted) TryAcquire(n int64) bool {
|
||||||
|
s.mu.Lock()
|
||||||
|
success := s.size-s.cur >= n && s.waiters.Len() == 0
|
||||||
|
if success {
|
||||||
|
s.cur += n
|
||||||
|
}
|
||||||
|
s.mu.Unlock()
|
||||||
|
return success
|
||||||
|
}
|
||||||
|
|
||||||
|
// Release releases the semaphore with a weight of n.
|
||||||
|
func (s *Weighted) Release(n int64) {
|
||||||
|
s.mu.Lock()
|
||||||
|
s.cur -= n
|
||||||
|
if s.cur < 0 {
|
||||||
|
s.mu.Unlock()
|
||||||
|
panic("semaphore: released more than held")
|
||||||
|
}
|
||||||
|
s.notifyWaiters()
|
||||||
|
s.mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Weighted) notifyWaiters() {
|
||||||
|
for {
|
||||||
|
next := s.waiters.Front()
|
||||||
|
if next == nil {
|
||||||
|
break // No more waiters blocked.
|
||||||
|
}
|
||||||
|
|
||||||
|
w := next.Value.(waiter)
|
||||||
|
if s.size-s.cur < w.n {
|
||||||
|
// Not enough tokens for the next waiter. We could keep going (to try to
|
||||||
|
// find a waiter with a smaller request), but under load that could cause
|
||||||
|
// starvation for large requests; instead, we leave all remaining waiters
|
||||||
|
// blocked.
|
||||||
|
//
|
||||||
|
// Consider a semaphore used as a read-write lock, with N tokens, N
|
||||||
|
// readers, and one writer. Each reader can Acquire(1) to obtain a read
|
||||||
|
// lock. The writer can Acquire(N) to obtain a write lock, excluding all
|
||||||
|
// of the readers. If we allow the readers to jump ahead in the queue,
|
||||||
|
// the writer will starve — there is always one token available for every
|
||||||
|
// reader.
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
s.cur += w.n
|
||||||
|
s.waiters.Remove(next)
|
||||||
|
close(w.ready)
|
||||||
|
}
|
||||||
|
}
|
2
vendor/google.golang.org/api/internal/version.go
generated
vendored
2
vendor/google.golang.org/api/internal/version.go
generated
vendored
|
@ -5,4 +5,4 @@
|
||||||
package internal
|
package internal
|
||||||
|
|
||||||
// Version is the current tagged release of the library.
|
// Version is the current tagged release of the library.
|
||||||
const Version = "0.132.0"
|
const Version = "0.134.0"
|
||||||
|
|
28
vendor/google.golang.org/grpc/attributes/attributes.go
generated
vendored
28
vendor/google.golang.org/grpc/attributes/attributes.go
generated
vendored
|
@ -112,19 +112,31 @@ func (a *Attributes) String() string {
|
||||||
sb.WriteString("{")
|
sb.WriteString("{")
|
||||||
first := true
|
first := true
|
||||||
for k, v := range a.m {
|
for k, v := range a.m {
|
||||||
var key, val string
|
|
||||||
if str, ok := k.(interface{ String() string }); ok {
|
|
||||||
key = str.String()
|
|
||||||
}
|
|
||||||
if str, ok := v.(interface{ String() string }); ok {
|
|
||||||
val = str.String()
|
|
||||||
}
|
|
||||||
if !first {
|
if !first {
|
||||||
sb.WriteString(", ")
|
sb.WriteString(", ")
|
||||||
}
|
}
|
||||||
sb.WriteString(fmt.Sprintf("%q: %q, ", key, val))
|
sb.WriteString(fmt.Sprintf("%q: %q ", str(k), str(v)))
|
||||||
first = false
|
first = false
|
||||||
}
|
}
|
||||||
sb.WriteString("}")
|
sb.WriteString("}")
|
||||||
return sb.String()
|
return sb.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func str(x interface{}) string {
|
||||||
|
if v, ok := x.(fmt.Stringer); ok {
|
||||||
|
return v.String()
|
||||||
|
} else if v, ok := x.(string); ok {
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("<%p>", x)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalJSON helps implement the json.Marshaler interface, thereby rendering
|
||||||
|
// the Attributes correctly when printing (via pretty.JSON) structs containing
|
||||||
|
// Attributes as fields.
|
||||||
|
//
|
||||||
|
// Is it impossible to unmarshal attributes from a JSON representation and this
|
||||||
|
// method is meant only for debugging purposes.
|
||||||
|
func (a *Attributes) MarshalJSON() ([]byte, error) {
|
||||||
|
return []byte(a.String()), nil
|
||||||
|
}
|
||||||
|
|
91
vendor/google.golang.org/grpc/clientconn.go
generated
vendored
91
vendor/google.golang.org/grpc/clientconn.go
generated
vendored
|
@ -37,6 +37,7 @@ import (
|
||||||
"google.golang.org/grpc/internal/backoff"
|
"google.golang.org/grpc/internal/backoff"
|
||||||
"google.golang.org/grpc/internal/channelz"
|
"google.golang.org/grpc/internal/channelz"
|
||||||
"google.golang.org/grpc/internal/grpcsync"
|
"google.golang.org/grpc/internal/grpcsync"
|
||||||
|
"google.golang.org/grpc/internal/pretty"
|
||||||
iresolver "google.golang.org/grpc/internal/resolver"
|
iresolver "google.golang.org/grpc/internal/resolver"
|
||||||
"google.golang.org/grpc/internal/transport"
|
"google.golang.org/grpc/internal/transport"
|
||||||
"google.golang.org/grpc/keepalive"
|
"google.golang.org/grpc/keepalive"
|
||||||
|
@ -867,6 +868,20 @@ func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivi
|
||||||
cc.balancerWrapper.updateSubConnState(sc, s, err)
|
cc.balancerWrapper.updateSubConnState(sc, s, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Makes a copy of the input addresses slice and clears out the balancer
|
||||||
|
// attributes field. Addresses are passed during subconn creation and address
|
||||||
|
// update operations. In both cases, we will clear the balancer attributes by
|
||||||
|
// calling this function, and therefore we will be able to use the Equal method
|
||||||
|
// provided by the resolver.Address type for comparison.
|
||||||
|
func copyAddressesWithoutBalancerAttributes(in []resolver.Address) []resolver.Address {
|
||||||
|
out := make([]resolver.Address, len(in))
|
||||||
|
for i := range in {
|
||||||
|
out[i] = in[i]
|
||||||
|
out[i].BalancerAttributes = nil
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
// newAddrConn creates an addrConn for addrs and adds it to cc.conns.
|
// newAddrConn creates an addrConn for addrs and adds it to cc.conns.
|
||||||
//
|
//
|
||||||
// Caller needs to make sure len(addrs) > 0.
|
// Caller needs to make sure len(addrs) > 0.
|
||||||
|
@ -874,7 +889,7 @@ func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSub
|
||||||
ac := &addrConn{
|
ac := &addrConn{
|
||||||
state: connectivity.Idle,
|
state: connectivity.Idle,
|
||||||
cc: cc,
|
cc: cc,
|
||||||
addrs: addrs,
|
addrs: copyAddressesWithoutBalancerAttributes(addrs),
|
||||||
scopts: opts,
|
scopts: opts,
|
||||||
dopts: cc.dopts,
|
dopts: cc.dopts,
|
||||||
czData: new(channelzData),
|
czData: new(channelzData),
|
||||||
|
@ -995,8 +1010,9 @@ func equalAddresses(a, b []resolver.Address) bool {
|
||||||
// connections or connection attempts.
|
// connections or connection attempts.
|
||||||
func (ac *addrConn) updateAddrs(addrs []resolver.Address) {
|
func (ac *addrConn) updateAddrs(addrs []resolver.Address) {
|
||||||
ac.mu.Lock()
|
ac.mu.Lock()
|
||||||
channelz.Infof(logger, ac.channelzID, "addrConn: updateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs)
|
channelz.Infof(logger, ac.channelzID, "addrConn: updateAddrs curAddr: %v, addrs: %v", pretty.ToJSON(ac.curAddr), pretty.ToJSON(addrs))
|
||||||
|
|
||||||
|
addrs = copyAddressesWithoutBalancerAttributes(addrs)
|
||||||
if equalAddresses(ac.addrs, addrs) {
|
if equalAddresses(ac.addrs, addrs) {
|
||||||
ac.mu.Unlock()
|
ac.mu.Unlock()
|
||||||
return
|
return
|
||||||
|
@ -1807,19 +1823,70 @@ func (cc *ClientConn) parseTargetAndFindResolver() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// parseTarget uses RFC 3986 semantics to parse the given target into a
|
// parseTarget uses RFC 3986 semantics to parse the given target into a
|
||||||
// resolver.Target struct containing scheme, authority and url. Query
|
// resolver.Target struct containing url. Query params are stripped from the
|
||||||
// params are stripped from the endpoint.
|
// endpoint.
|
||||||
func parseTarget(target string) (resolver.Target, error) {
|
func parseTarget(target string) (resolver.Target, error) {
|
||||||
u, err := url.Parse(target)
|
u, err := url.Parse(target)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return resolver.Target{}, err
|
return resolver.Target{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return resolver.Target{
|
return resolver.Target{URL: *u}, nil
|
||||||
Scheme: u.Scheme,
|
}
|
||||||
Authority: u.Host,
|
|
||||||
URL: *u,
|
func encodeAuthority(authority string) string {
|
||||||
}, nil
|
const upperhex = "0123456789ABCDEF"
|
||||||
|
|
||||||
|
// Return for characters that must be escaped as per
|
||||||
|
// Valid chars are mentioned here:
|
||||||
|
// https://datatracker.ietf.org/doc/html/rfc3986#section-3.2
|
||||||
|
shouldEscape := func(c byte) bool {
|
||||||
|
// Alphanum are always allowed.
|
||||||
|
if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
switch c {
|
||||||
|
case '-', '_', '.', '~': // Unreserved characters
|
||||||
|
return false
|
||||||
|
case '!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=': // Subdelim characters
|
||||||
|
return false
|
||||||
|
case ':', '[', ']', '@': // Authority related delimeters
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
// Everything else must be escaped.
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
hexCount := 0
|
||||||
|
for i := 0; i < len(authority); i++ {
|
||||||
|
c := authority[i]
|
||||||
|
if shouldEscape(c) {
|
||||||
|
hexCount++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if hexCount == 0 {
|
||||||
|
return authority
|
||||||
|
}
|
||||||
|
|
||||||
|
required := len(authority) + 2*hexCount
|
||||||
|
t := make([]byte, required)
|
||||||
|
|
||||||
|
j := 0
|
||||||
|
// This logic is a barebones version of escape in the go net/url library.
|
||||||
|
for i := 0; i < len(authority); i++ {
|
||||||
|
switch c := authority[i]; {
|
||||||
|
case shouldEscape(c):
|
||||||
|
t[j] = '%'
|
||||||
|
t[j+1] = upperhex[c>>4]
|
||||||
|
t[j+2] = upperhex[c&15]
|
||||||
|
j += 3
|
||||||
|
default:
|
||||||
|
t[j] = authority[i]
|
||||||
|
j++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return string(t)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Determine channel authority. The order of precedence is as follows:
|
// Determine channel authority. The order of precedence is as follows:
|
||||||
|
@ -1872,7 +1939,11 @@ func (cc *ClientConn) determineAuthority() error {
|
||||||
// the channel authority given the user's dial target. For resolvers
|
// the channel authority given the user's dial target. For resolvers
|
||||||
// which don't implement this interface, we will use the endpoint from
|
// which don't implement this interface, we will use the endpoint from
|
||||||
// "scheme://authority/endpoint" as the default authority.
|
// "scheme://authority/endpoint" as the default authority.
|
||||||
cc.authority = endpoint
|
// Escape the endpoint to handle use cases where the endpoint
|
||||||
|
// might not be a valid authority by default.
|
||||||
|
// For example an endpoint which has multiple paths like
|
||||||
|
// 'a/b/c', which is not a valid authority by default.
|
||||||
|
cc.authority = encodeAuthority(endpoint)
|
||||||
}
|
}
|
||||||
channelz.Infof(logger, cc.channelzID, "Channel authority set to %q", cc.authority)
|
channelz.Infof(logger, cc.channelzID, "Channel authority set to %q", cc.authority)
|
||||||
return nil
|
return nil
|
||||||
|
|
57
vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go
generated
vendored
57
vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go
generated
vendored
|
@ -25,8 +25,8 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net"
|
"net"
|
||||||
"sync"
|
|
||||||
|
|
||||||
|
"golang.org/x/sync/semaphore"
|
||||||
grpc "google.golang.org/grpc"
|
grpc "google.golang.org/grpc"
|
||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
"google.golang.org/grpc/credentials"
|
"google.golang.org/grpc/credentials"
|
||||||
|
@ -35,15 +35,13 @@ import (
|
||||||
"google.golang.org/grpc/credentials/alts/internal/conn"
|
"google.golang.org/grpc/credentials/alts/internal/conn"
|
||||||
altsgrpc "google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp"
|
altsgrpc "google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp"
|
||||||
altspb "google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp"
|
altspb "google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp"
|
||||||
|
"google.golang.org/grpc/internal/envconfig"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// The maximum byte size of receive frames.
|
// The maximum byte size of receive frames.
|
||||||
frameLimit = 64 * 1024 // 64 KB
|
frameLimit = 64 * 1024 // 64 KB
|
||||||
rekeyRecordProtocolName = "ALTSRP_GCM_AES128_REKEY"
|
rekeyRecordProtocolName = "ALTSRP_GCM_AES128_REKEY"
|
||||||
// maxPendingHandshakes represents the maximum number of concurrent
|
|
||||||
// handshakes.
|
|
||||||
maxPendingHandshakes = 100
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -59,9 +57,9 @@ var (
|
||||||
return conn.NewAES128GCMRekey(s, keyData)
|
return conn.NewAES128GCMRekey(s, keyData)
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
// control number of concurrent created (but not closed) handshakers.
|
// control number of concurrent created (but not closed) handshakes.
|
||||||
mu sync.Mutex
|
clientHandshakes = semaphore.NewWeighted(int64(envconfig.ALTSMaxConcurrentHandshakes))
|
||||||
concurrentHandshakes = int64(0)
|
serverHandshakes = semaphore.NewWeighted(int64(envconfig.ALTSMaxConcurrentHandshakes))
|
||||||
// errDropped occurs when maxPendingHandshakes is reached.
|
// errDropped occurs when maxPendingHandshakes is reached.
|
||||||
errDropped = errors.New("maximum number of concurrent ALTS handshakes is reached")
|
errDropped = errors.New("maximum number of concurrent ALTS handshakes is reached")
|
||||||
// errOutOfBound occurs when the handshake service returns a consumed
|
// errOutOfBound occurs when the handshake service returns a consumed
|
||||||
|
@ -77,30 +75,6 @@ func init() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func acquire() bool {
|
|
||||||
mu.Lock()
|
|
||||||
// If we need n to be configurable, we can pass it as an argument.
|
|
||||||
n := int64(1)
|
|
||||||
success := maxPendingHandshakes-concurrentHandshakes >= n
|
|
||||||
if success {
|
|
||||||
concurrentHandshakes += n
|
|
||||||
}
|
|
||||||
mu.Unlock()
|
|
||||||
return success
|
|
||||||
}
|
|
||||||
|
|
||||||
func release() {
|
|
||||||
mu.Lock()
|
|
||||||
// If we need n to be configurable, we can pass it as an argument.
|
|
||||||
n := int64(1)
|
|
||||||
concurrentHandshakes -= n
|
|
||||||
if concurrentHandshakes < 0 {
|
|
||||||
mu.Unlock()
|
|
||||||
panic("bad release")
|
|
||||||
}
|
|
||||||
mu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
// ClientHandshakerOptions contains the client handshaker options that can
|
// ClientHandshakerOptions contains the client handshaker options that can
|
||||||
// provided by the caller.
|
// provided by the caller.
|
||||||
type ClientHandshakerOptions struct {
|
type ClientHandshakerOptions struct {
|
||||||
|
@ -134,10 +108,6 @@ func DefaultServerHandshakerOptions() *ServerHandshakerOptions {
|
||||||
return &ServerHandshakerOptions{}
|
return &ServerHandshakerOptions{}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: add support for future local and remote endpoint in both client options
|
|
||||||
// and server options (server options struct does not exist now. When
|
|
||||||
// caller can provide endpoints, it should be created.
|
|
||||||
|
|
||||||
// altsHandshaker is used to complete an ALTS handshake between client and
|
// altsHandshaker is used to complete an ALTS handshake between client and
|
||||||
// server. This handshaker talks to the ALTS handshaker service in the metadata
|
// server. This handshaker talks to the ALTS handshaker service in the metadata
|
||||||
// server.
|
// server.
|
||||||
|
@ -185,10 +155,10 @@ func NewServerHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn,
|
||||||
// ClientHandshake starts and completes a client ALTS handshake for GCP. Once
|
// ClientHandshake starts and completes a client ALTS handshake for GCP. Once
|
||||||
// done, ClientHandshake returns a secure connection.
|
// done, ClientHandshake returns a secure connection.
|
||||||
func (h *altsHandshaker) ClientHandshake(ctx context.Context) (net.Conn, credentials.AuthInfo, error) {
|
func (h *altsHandshaker) ClientHandshake(ctx context.Context) (net.Conn, credentials.AuthInfo, error) {
|
||||||
if !acquire() {
|
if !clientHandshakes.TryAcquire(1) {
|
||||||
return nil, nil, errDropped
|
return nil, nil, errDropped
|
||||||
}
|
}
|
||||||
defer release()
|
defer clientHandshakes.Release(1)
|
||||||
|
|
||||||
if h.side != core.ClientSide {
|
if h.side != core.ClientSide {
|
||||||
return nil, nil, errors.New("only handshakers created using NewClientHandshaker can perform a client handshaker")
|
return nil, nil, errors.New("only handshakers created using NewClientHandshaker can perform a client handshaker")
|
||||||
|
@ -238,10 +208,10 @@ func (h *altsHandshaker) ClientHandshake(ctx context.Context) (net.Conn, credent
|
||||||
// ServerHandshake starts and completes a server ALTS handshake for GCP. Once
|
// ServerHandshake starts and completes a server ALTS handshake for GCP. Once
|
||||||
// done, ServerHandshake returns a secure connection.
|
// done, ServerHandshake returns a secure connection.
|
||||||
func (h *altsHandshaker) ServerHandshake(ctx context.Context) (net.Conn, credentials.AuthInfo, error) {
|
func (h *altsHandshaker) ServerHandshake(ctx context.Context) (net.Conn, credentials.AuthInfo, error) {
|
||||||
if !acquire() {
|
if !serverHandshakes.TryAcquire(1) {
|
||||||
return nil, nil, errDropped
|
return nil, nil, errDropped
|
||||||
}
|
}
|
||||||
defer release()
|
defer serverHandshakes.Release(1)
|
||||||
|
|
||||||
if h.side != core.ServerSide {
|
if h.side != core.ServerSide {
|
||||||
return nil, nil, errors.New("only handshakers created using NewServerHandshaker can perform a server handshaker")
|
return nil, nil, errors.New("only handshakers created using NewServerHandshaker can perform a server handshaker")
|
||||||
|
@ -264,8 +234,6 @@ func (h *altsHandshaker) ServerHandshake(ctx context.Context) (net.Conn, credent
|
||||||
}
|
}
|
||||||
|
|
||||||
// Prepare server parameters.
|
// Prepare server parameters.
|
||||||
// TODO: currently only ALTS parameters are provided. Might need to use
|
|
||||||
// more options in the future.
|
|
||||||
params := make(map[int32]*altspb.ServerHandshakeParameters)
|
params := make(map[int32]*altspb.ServerHandshakeParameters)
|
||||||
params[int32(altspb.HandshakeProtocol_ALTS)] = &altspb.ServerHandshakeParameters{
|
params[int32(altspb.HandshakeProtocol_ALTS)] = &altspb.ServerHandshakeParameters{
|
||||||
RecordProtocols: recordProtocols,
|
RecordProtocols: recordProtocols,
|
||||||
|
@ -391,3 +359,10 @@ func (h *altsHandshaker) Close() {
|
||||||
h.stream.CloseSend()
|
h.stream.CloseSend()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ResetConcurrentHandshakeSemaphoreForTesting resets the handshake semaphores
|
||||||
|
// to allow numberOfAllowedHandshakes concurrent handshakes each.
|
||||||
|
func ResetConcurrentHandshakeSemaphoreForTesting(numberOfAllowedHandshakes int64) {
|
||||||
|
clientHandshakes = semaphore.NewWeighted(numberOfAllowedHandshakes)
|
||||||
|
serverHandshakes = semaphore.NewWeighted(numberOfAllowedHandshakes)
|
||||||
|
}
|
||||||
|
|
23
vendor/google.golang.org/grpc/dialoptions.go
generated
vendored
23
vendor/google.golang.org/grpc/dialoptions.go
generated
vendored
|
@ -78,6 +78,7 @@ type dialOptions struct {
|
||||||
defaultServiceConfigRawJSON *string
|
defaultServiceConfigRawJSON *string
|
||||||
resolvers []resolver.Builder
|
resolvers []resolver.Builder
|
||||||
idleTimeout time.Duration
|
idleTimeout time.Duration
|
||||||
|
recvBufferPool SharedBufferPool
|
||||||
}
|
}
|
||||||
|
|
||||||
// DialOption configures how we set up the connection.
|
// DialOption configures how we set up the connection.
|
||||||
|
@ -628,6 +629,7 @@ func defaultDialOptions() dialOptions {
|
||||||
ReadBufferSize: defaultReadBufSize,
|
ReadBufferSize: defaultReadBufSize,
|
||||||
UseProxy: true,
|
UseProxy: true,
|
||||||
},
|
},
|
||||||
|
recvBufferPool: nopBufferPool{},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -676,3 +678,24 @@ func WithIdleTimeout(d time.Duration) DialOption {
|
||||||
o.idleTimeout = d
|
o.idleTimeout = d
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WithRecvBufferPool returns a DialOption that configures the ClientConn
|
||||||
|
// to use the provided shared buffer pool for parsing incoming messages. Depending
|
||||||
|
// on the application's workload, this could result in reduced memory allocation.
|
||||||
|
//
|
||||||
|
// If you are unsure about how to implement a memory pool but want to utilize one,
|
||||||
|
// begin with grpc.NewSharedBufferPool.
|
||||||
|
//
|
||||||
|
// Note: The shared buffer pool feature will not be active if any of the following
|
||||||
|
// options are used: WithStatsHandler, EnableTracing, or binary logging. In such
|
||||||
|
// cases, the shared buffer pool will be ignored.
|
||||||
|
//
|
||||||
|
// # Experimental
|
||||||
|
//
|
||||||
|
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||||||
|
// later release.
|
||||||
|
func WithRecvBufferPool(bufferPool SharedBufferPool) DialOption {
|
||||||
|
return newFuncDialOption(func(o *dialOptions) {
|
||||||
|
o.recvBufferPool = bufferPool
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
3
vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
generated
vendored
3
vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
generated
vendored
|
@ -40,6 +40,9 @@ var (
|
||||||
// pick_first LB policy, which can be enabled by setting the environment
|
// pick_first LB policy, which can be enabled by setting the environment
|
||||||
// variable "GRPC_EXPERIMENTAL_PICKFIRST_LB_CONFIG" to "true".
|
// variable "GRPC_EXPERIMENTAL_PICKFIRST_LB_CONFIG" to "true".
|
||||||
PickFirstLBConfig = boolFromEnv("GRPC_EXPERIMENTAL_PICKFIRST_LB_CONFIG", false)
|
PickFirstLBConfig = boolFromEnv("GRPC_EXPERIMENTAL_PICKFIRST_LB_CONFIG", false)
|
||||||
|
// ALTSMaxConcurrentHandshakes is the maximum number of concurrent ALTS
|
||||||
|
// handshakes that can be performed.
|
||||||
|
ALTSMaxConcurrentHandshakes = uint64FromEnv("GRPC_ALTS_MAX_CONCURRENT_HANDSHAKES", 100, 1, 100)
|
||||||
)
|
)
|
||||||
|
|
||||||
func boolFromEnv(envVar string, def bool) bool {
|
func boolFromEnv(envVar string, def bool) bool {
|
||||||
|
|
7
vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go
generated
vendored
7
vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go
generated
vendored
|
@ -80,6 +80,13 @@ func Uint32() uint32 {
|
||||||
return r.Uint32()
|
return r.Uint32()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ExpFloat64 implements rand.ExpFloat64 on the grpcrand global source.
|
||||||
|
func ExpFloat64() float64 {
|
||||||
|
mu.Lock()
|
||||||
|
defer mu.Unlock()
|
||||||
|
return r.ExpFloat64()
|
||||||
|
}
|
||||||
|
|
||||||
// Shuffle implements rand.Shuffle on the grpcrand global source.
|
// Shuffle implements rand.Shuffle on the grpcrand global source.
|
||||||
var Shuffle = func(n int, f func(int, int)) {
|
var Shuffle = func(n int, f func(int, int)) {
|
||||||
mu.Lock()
|
mu.Lock()
|
||||||
|
|
136
vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go
generated
vendored
Normal file
136
vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go
generated
vendored
Normal file
|
@ -0,0 +1,136 @@
|
||||||
|
/*
|
||||||
|
*
|
||||||
|
* Copyright 2023 gRPC authors.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
package grpcsync
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Subscriber represents an entity that is subscribed to messages published on
|
||||||
|
// a PubSub. It wraps the callback to be invoked by the PubSub when a new
|
||||||
|
// message is published.
|
||||||
|
type Subscriber interface {
|
||||||
|
// OnMessage is invoked when a new message is published. Implementations
|
||||||
|
// must not block in this method.
|
||||||
|
OnMessage(msg interface{})
|
||||||
|
}
|
||||||
|
|
||||||
|
// PubSub is a simple one-to-many publish-subscribe system that supports
|
||||||
|
// messages of arbitrary type. It guarantees that messages are delivered in
|
||||||
|
// the same order in which they were published.
|
||||||
|
//
|
||||||
|
// Publisher invokes the Publish() method to publish new messages, while
|
||||||
|
// subscribers interested in receiving these messages register a callback
|
||||||
|
// via the Subscribe() method.
|
||||||
|
//
|
||||||
|
// Once a PubSub is stopped, no more messages can be published, and
|
||||||
|
// it is guaranteed that no more subscriber callback will be invoked.
|
||||||
|
type PubSub struct {
|
||||||
|
cs *CallbackSerializer
|
||||||
|
cancel context.CancelFunc
|
||||||
|
|
||||||
|
// Access to the below fields are guarded by this mutex.
|
||||||
|
mu sync.Mutex
|
||||||
|
msg interface{}
|
||||||
|
subscribers map[Subscriber]bool
|
||||||
|
stopped bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewPubSub returns a new PubSub instance.
|
||||||
|
func NewPubSub() *PubSub {
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
return &PubSub{
|
||||||
|
cs: NewCallbackSerializer(ctx),
|
||||||
|
cancel: cancel,
|
||||||
|
subscribers: map[Subscriber]bool{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Subscribe registers the provided Subscriber to the PubSub.
|
||||||
|
//
|
||||||
|
// If the PubSub contains a previously published message, the Subscriber's
|
||||||
|
// OnMessage() callback will be invoked asynchronously with the existing
|
||||||
|
// message to begin with, and subsequently for every newly published message.
|
||||||
|
//
|
||||||
|
// The caller is responsible for invoking the returned cancel function to
|
||||||
|
// unsubscribe itself from the PubSub.
|
||||||
|
func (ps *PubSub) Subscribe(sub Subscriber) (cancel func()) {
|
||||||
|
ps.mu.Lock()
|
||||||
|
defer ps.mu.Unlock()
|
||||||
|
|
||||||
|
if ps.stopped {
|
||||||
|
return func() {}
|
||||||
|
}
|
||||||
|
|
||||||
|
ps.subscribers[sub] = true
|
||||||
|
|
||||||
|
if ps.msg != nil {
|
||||||
|
msg := ps.msg
|
||||||
|
ps.cs.Schedule(func(context.Context) {
|
||||||
|
ps.mu.Lock()
|
||||||
|
defer ps.mu.Unlock()
|
||||||
|
if !ps.subscribers[sub] {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
sub.OnMessage(msg)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return func() {
|
||||||
|
ps.mu.Lock()
|
||||||
|
defer ps.mu.Unlock()
|
||||||
|
delete(ps.subscribers, sub)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Publish publishes the provided message to the PubSub, and invokes
|
||||||
|
// callbacks registered by subscribers asynchronously.
|
||||||
|
func (ps *PubSub) Publish(msg interface{}) {
|
||||||
|
ps.mu.Lock()
|
||||||
|
defer ps.mu.Unlock()
|
||||||
|
|
||||||
|
if ps.stopped {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
ps.msg = msg
|
||||||
|
for sub := range ps.subscribers {
|
||||||
|
s := sub
|
||||||
|
ps.cs.Schedule(func(context.Context) {
|
||||||
|
ps.mu.Lock()
|
||||||
|
defer ps.mu.Unlock()
|
||||||
|
if !ps.subscribers[s] {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
s.OnMessage(msg)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stop shuts down the PubSub and releases any resources allocated by it.
|
||||||
|
// It is guaranteed that no subscriber callbacks would be invoked once this
|
||||||
|
// method returns.
|
||||||
|
func (ps *PubSub) Stop() {
|
||||||
|
ps.mu.Lock()
|
||||||
|
defer ps.mu.Unlock()
|
||||||
|
ps.stopped = true
|
||||||
|
|
||||||
|
ps.cancel()
|
||||||
|
}
|
74
vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
generated
vendored
74
vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
generated
vendored
|
@ -62,7 +62,8 @@ const (
|
||||||
defaultPort = "443"
|
defaultPort = "443"
|
||||||
defaultDNSSvrPort = "53"
|
defaultDNSSvrPort = "53"
|
||||||
golang = "GO"
|
golang = "GO"
|
||||||
// txtPrefix is the prefix string to be prepended to the host name for txt record lookup.
|
// txtPrefix is the prefix string to be prepended to the host name for txt
|
||||||
|
// record lookup.
|
||||||
txtPrefix = "_grpc_config."
|
txtPrefix = "_grpc_config."
|
||||||
// In DNS, service config is encoded in a TXT record via the mechanism
|
// In DNS, service config is encoded in a TXT record via the mechanism
|
||||||
// described in RFC-1464 using the attribute name grpc_config.
|
// described in RFC-1464 using the attribute name grpc_config.
|
||||||
|
@ -86,14 +87,14 @@ var (
|
||||||
minDNSResRate = 30 * time.Second
|
minDNSResRate = 30 * time.Second
|
||||||
)
|
)
|
||||||
|
|
||||||
var customAuthorityDialler = func(authority string) func(ctx context.Context, network, address string) (net.Conn, error) {
|
var addressDialer = func(address string) func(context.Context, string, string) (net.Conn, error) {
|
||||||
return func(ctx context.Context, network, address string) (net.Conn, error) {
|
return func(ctx context.Context, network, _ string) (net.Conn, error) {
|
||||||
var dialer net.Dialer
|
var dialer net.Dialer
|
||||||
return dialer.DialContext(ctx, network, authority)
|
return dialer.DialContext(ctx, network, address)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var customAuthorityResolver = func(authority string) (netResolver, error) {
|
var newNetResolver = func(authority string) (netResolver, error) {
|
||||||
host, port, err := parseTarget(authority, defaultDNSSvrPort)
|
host, port, err := parseTarget(authority, defaultDNSSvrPort)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -103,7 +104,7 @@ var customAuthorityResolver = func(authority string) (netResolver, error) {
|
||||||
|
|
||||||
return &net.Resolver{
|
return &net.Resolver{
|
||||||
PreferGo: true,
|
PreferGo: true,
|
||||||
Dial: customAuthorityDialler(authorityWithPort),
|
Dial: addressDialer(authorityWithPort),
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -114,7 +115,8 @@ func NewBuilder() resolver.Builder {
|
||||||
|
|
||||||
type dnsBuilder struct{}
|
type dnsBuilder struct{}
|
||||||
|
|
||||||
// Build creates and starts a DNS resolver that watches the name resolution of the target.
|
// Build creates and starts a DNS resolver that watches the name resolution of
|
||||||
|
// the target.
|
||||||
func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) {
|
func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) {
|
||||||
host, port, err := parseTarget(target.Endpoint(), defaultPort)
|
host, port, err := parseTarget(target.Endpoint(), defaultPort)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -143,7 +145,7 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts
|
||||||
if target.URL.Host == "" {
|
if target.URL.Host == "" {
|
||||||
d.resolver = defaultResolver
|
d.resolver = defaultResolver
|
||||||
} else {
|
} else {
|
||||||
d.resolver, err = customAuthorityResolver(target.URL.Host)
|
d.resolver, err = newNetResolver(target.URL.Host)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -180,19 +182,22 @@ type dnsResolver struct {
|
||||||
ctx context.Context
|
ctx context.Context
|
||||||
cancel context.CancelFunc
|
cancel context.CancelFunc
|
||||||
cc resolver.ClientConn
|
cc resolver.ClientConn
|
||||||
// rn channel is used by ResolveNow() to force an immediate resolution of the target.
|
// rn channel is used by ResolveNow() to force an immediate resolution of the
|
||||||
|
// target.
|
||||||
rn chan struct{}
|
rn chan struct{}
|
||||||
// wg is used to enforce Close() to return after the watcher() goroutine has finished.
|
// wg is used to enforce Close() to return after the watcher() goroutine has
|
||||||
// Otherwise, data race will be possible. [Race Example] in dns_resolver_test we
|
// finished. Otherwise, data race will be possible. [Race Example] in
|
||||||
// replace the real lookup functions with mocked ones to facilitate testing.
|
// dns_resolver_test we replace the real lookup functions with mocked ones to
|
||||||
// If Close() doesn't wait for watcher() goroutine finishes, race detector sometimes
|
// facilitate testing. If Close() doesn't wait for watcher() goroutine
|
||||||
// will warns lookup (READ the lookup function pointers) inside watcher() goroutine
|
// finishes, race detector sometimes will warns lookup (READ the lookup
|
||||||
// has data race with replaceNetFunc (WRITE the lookup function pointers).
|
// function pointers) inside watcher() goroutine has data race with
|
||||||
|
// replaceNetFunc (WRITE the lookup function pointers).
|
||||||
wg sync.WaitGroup
|
wg sync.WaitGroup
|
||||||
disableServiceConfig bool
|
disableServiceConfig bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// ResolveNow invoke an immediate resolution of the target that this dnsResolver watches.
|
// ResolveNow invoke an immediate resolution of the target that this
|
||||||
|
// dnsResolver watches.
|
||||||
func (d *dnsResolver) ResolveNow(resolver.ResolveNowOptions) {
|
func (d *dnsResolver) ResolveNow(resolver.ResolveNowOptions) {
|
||||||
select {
|
select {
|
||||||
case d.rn <- struct{}{}:
|
case d.rn <- struct{}{}:
|
||||||
|
@ -220,8 +225,8 @@ func (d *dnsResolver) watcher() {
|
||||||
|
|
||||||
var timer *time.Timer
|
var timer *time.Timer
|
||||||
if err == nil {
|
if err == nil {
|
||||||
// Success resolving, wait for the next ResolveNow. However, also wait 30 seconds at the very least
|
// Success resolving, wait for the next ResolveNow. However, also wait 30
|
||||||
// to prevent constantly re-resolving.
|
// seconds at the very least to prevent constantly re-resolving.
|
||||||
backoffIndex = 1
|
backoffIndex = 1
|
||||||
timer = newTimerDNSResRate(minDNSResRate)
|
timer = newTimerDNSResRate(minDNSResRate)
|
||||||
select {
|
select {
|
||||||
|
@ -231,7 +236,8 @@ func (d *dnsResolver) watcher() {
|
||||||
case <-d.rn:
|
case <-d.rn:
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// Poll on an error found in DNS Resolver or an error received from ClientConn.
|
// Poll on an error found in DNS Resolver or an error received from
|
||||||
|
// ClientConn.
|
||||||
timer = newTimer(backoff.DefaultExponential.Backoff(backoffIndex))
|
timer = newTimer(backoff.DefaultExponential.Backoff(backoffIndex))
|
||||||
backoffIndex++
|
backoffIndex++
|
||||||
}
|
}
|
||||||
|
@ -278,7 +284,8 @@ func (d *dnsResolver) lookupSRV() ([]resolver.Address, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func handleDNSError(err error, lookupType string) error {
|
func handleDNSError(err error, lookupType string) error {
|
||||||
if dnsErr, ok := err.(*net.DNSError); ok && !dnsErr.IsTimeout && !dnsErr.IsTemporary {
|
dnsErr, ok := err.(*net.DNSError)
|
||||||
|
if ok && !dnsErr.IsTimeout && !dnsErr.IsTemporary {
|
||||||
// Timeouts and temporary errors should be communicated to gRPC to
|
// Timeouts and temporary errors should be communicated to gRPC to
|
||||||
// attempt another DNS query (with backoff). Other errors should be
|
// attempt another DNS query (with backoff). Other errors should be
|
||||||
// suppressed (they may represent the absence of a TXT record).
|
// suppressed (they may represent the absence of a TXT record).
|
||||||
|
@ -307,10 +314,12 @@ func (d *dnsResolver) lookupTXT() *serviceconfig.ParseResult {
|
||||||
res += s
|
res += s
|
||||||
}
|
}
|
||||||
|
|
||||||
// TXT record must have "grpc_config=" attribute in order to be used as service config.
|
// TXT record must have "grpc_config=" attribute in order to be used as
|
||||||
|
// service config.
|
||||||
if !strings.HasPrefix(res, txtAttribute) {
|
if !strings.HasPrefix(res, txtAttribute) {
|
||||||
logger.Warningf("dns: TXT record %v missing %v attribute", res, txtAttribute)
|
logger.Warningf("dns: TXT record %v missing %v attribute", res, txtAttribute)
|
||||||
// This is not an error; it is the equivalent of not having a service config.
|
// This is not an error; it is the equivalent of not having a service
|
||||||
|
// config.
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
sc := canaryingSC(strings.TrimPrefix(res, txtAttribute))
|
sc := canaryingSC(strings.TrimPrefix(res, txtAttribute))
|
||||||
|
@ -352,9 +361,10 @@ func (d *dnsResolver) lookup() (*resolver.State, error) {
|
||||||
return &state, nil
|
return &state, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// formatIP returns ok = false if addr is not a valid textual representation of an IP address.
|
// formatIP returns ok = false if addr is not a valid textual representation of
|
||||||
// If addr is an IPv4 address, return the addr and ok = true.
|
// an IP address. If addr is an IPv4 address, return the addr and ok = true.
|
||||||
// If addr is an IPv6 address, return the addr enclosed in square brackets and ok = true.
|
// If addr is an IPv6 address, return the addr enclosed in square brackets and
|
||||||
|
// ok = true.
|
||||||
func formatIP(addr string) (addrIP string, ok bool) {
|
func formatIP(addr string) (addrIP string, ok bool) {
|
||||||
ip := net.ParseIP(addr)
|
ip := net.ParseIP(addr)
|
||||||
if ip == nil {
|
if ip == nil {
|
||||||
|
@ -366,10 +376,10 @@ func formatIP(addr string) (addrIP string, ok bool) {
|
||||||
return "[" + addr + "]", true
|
return "[" + addr + "]", true
|
||||||
}
|
}
|
||||||
|
|
||||||
// parseTarget takes the user input target string and default port, returns formatted host and port info.
|
// parseTarget takes the user input target string and default port, returns
|
||||||
// If target doesn't specify a port, set the port to be the defaultPort.
|
// formatted host and port info. If target doesn't specify a port, set the port
|
||||||
// If target is in IPv6 format and host-name is enclosed in square brackets, brackets
|
// to be the defaultPort. If target is in IPv6 format and host-name is enclosed
|
||||||
// are stripped when setting the host.
|
// in square brackets, brackets are stripped when setting the host.
|
||||||
// examples:
|
// examples:
|
||||||
// target: "www.google.com" defaultPort: "443" returns host: "www.google.com", port: "443"
|
// target: "www.google.com" defaultPort: "443" returns host: "www.google.com", port: "443"
|
||||||
// target: "ipv4-host:80" defaultPort: "443" returns host: "ipv4-host", port: "80"
|
// target: "ipv4-host:80" defaultPort: "443" returns host: "ipv4-host", port: "80"
|
||||||
|
@ -385,12 +395,14 @@ func parseTarget(target, defaultPort string) (host, port string, err error) {
|
||||||
}
|
}
|
||||||
if host, port, err = net.SplitHostPort(target); err == nil {
|
if host, port, err = net.SplitHostPort(target); err == nil {
|
||||||
if port == "" {
|
if port == "" {
|
||||||
// If the port field is empty (target ends with colon), e.g. "[::1]:", this is an error.
|
// If the port field is empty (target ends with colon), e.g. "[::1]:",
|
||||||
|
// this is an error.
|
||||||
return "", "", errEndsWithColon
|
return "", "", errEndsWithColon
|
||||||
}
|
}
|
||||||
// target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port
|
// target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port
|
||||||
if host == "" {
|
if host == "" {
|
||||||
// Keep consistent with net.Dial(): If the host is empty, as in ":80", the local system is assumed.
|
// Keep consistent with net.Dial(): If the host is empty, as in ":80",
|
||||||
|
// the local system is assumed.
|
||||||
host = "localhost"
|
host = "localhost"
|
||||||
}
|
}
|
||||||
return host, port, nil
|
return host, port, nil
|
||||||
|
|
2
vendor/google.golang.org/grpc/internal/transport/http2_server.go
generated
vendored
2
vendor/google.golang.org/grpc/internal/transport/http2_server.go
generated
vendored
|
@ -238,7 +238,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
|
||||||
kp.Timeout = defaultServerKeepaliveTimeout
|
kp.Timeout = defaultServerKeepaliveTimeout
|
||||||
}
|
}
|
||||||
if kp.Time != infinity {
|
if kp.Time != infinity {
|
||||||
if err = syscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil {
|
if err = syscall.SetTCPUserTimeout(rawConn, kp.Timeout); err != nil {
|
||||||
return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err)
|
return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
8
vendor/google.golang.org/grpc/resolver/resolver.go
generated
vendored
8
vendor/google.golang.org/grpc/resolver/resolver.go
generated
vendored
|
@ -142,6 +142,10 @@ type Address struct {
|
||||||
|
|
||||||
// Equal returns whether a and o are identical. Metadata is compared directly,
|
// Equal returns whether a and o are identical. Metadata is compared directly,
|
||||||
// not with any recursive introspection.
|
// not with any recursive introspection.
|
||||||
|
//
|
||||||
|
// This method compares all fields of the address. When used to tell apart
|
||||||
|
// addresses during subchannel creation or connection establishment, it might be
|
||||||
|
// more appropriate for the caller to implement custom equality logic.
|
||||||
func (a Address) Equal(o Address) bool {
|
func (a Address) Equal(o Address) bool {
|
||||||
return a.Addr == o.Addr && a.ServerName == o.ServerName &&
|
return a.Addr == o.Addr && a.ServerName == o.ServerName &&
|
||||||
a.Attributes.Equal(o.Attributes) &&
|
a.Attributes.Equal(o.Attributes) &&
|
||||||
|
@ -264,10 +268,6 @@ type ClientConn interface {
|
||||||
// - "unknown_scheme://authority/endpoint"
|
// - "unknown_scheme://authority/endpoint"
|
||||||
// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"}
|
// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"}
|
||||||
type Target struct {
|
type Target struct {
|
||||||
// Deprecated: use URL.Scheme instead.
|
|
||||||
Scheme string
|
|
||||||
// Deprecated: use URL.Host instead.
|
|
||||||
Authority string
|
|
||||||
// URL contains the parsed dial target with an optional default scheme added
|
// URL contains the parsed dial target with an optional default scheme added
|
||||||
// to it if the original dial target contained no scheme or contained an
|
// to it if the original dial target contained no scheme or contained an
|
||||||
// unregistered scheme. Any query params specified in the original dial
|
// unregistered scheme. Any query params specified in the original dial
|
||||||
|
|
27
vendor/google.golang.org/grpc/rpc_util.go
generated
vendored
27
vendor/google.golang.org/grpc/rpc_util.go
generated
vendored
|
@ -577,6 +577,9 @@ type parser struct {
|
||||||
// The header of a gRPC message. Find more detail at
|
// The header of a gRPC message. Find more detail at
|
||||||
// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md
|
// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md
|
||||||
header [5]byte
|
header [5]byte
|
||||||
|
|
||||||
|
// recvBufferPool is the pool of shared receive buffers.
|
||||||
|
recvBufferPool SharedBufferPool
|
||||||
}
|
}
|
||||||
|
|
||||||
// recvMsg reads a complete gRPC message from the stream.
|
// recvMsg reads a complete gRPC message from the stream.
|
||||||
|
@ -610,9 +613,7 @@ func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byt
|
||||||
if int(length) > maxReceiveMessageSize {
|
if int(length) > maxReceiveMessageSize {
|
||||||
return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize)
|
return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize)
|
||||||
}
|
}
|
||||||
// TODO(bradfitz,zhaoq): garbage. reuse buffer after proto decoding instead
|
msg = p.recvBufferPool.Get(int(length))
|
||||||
// of making it for each message:
|
|
||||||
msg = make([]byte, int(length))
|
|
||||||
if _, err := p.r.Read(msg); err != nil {
|
if _, err := p.r.Read(msg); err != nil {
|
||||||
if err == io.EOF {
|
if err == io.EOF {
|
||||||
err = io.ErrUnexpectedEOF
|
err = io.ErrUnexpectedEOF
|
||||||
|
@ -726,12 +727,12 @@ type payloadInfo struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) ([]byte, error) {
|
func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) ([]byte, error) {
|
||||||
pf, d, err := p.recvMsg(maxReceiveMessageSize)
|
pf, buf, err := p.recvMsg(maxReceiveMessageSize)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if payInfo != nil {
|
if payInfo != nil {
|
||||||
payInfo.compressedLength = len(d)
|
payInfo.compressedLength = len(buf)
|
||||||
}
|
}
|
||||||
|
|
||||||
if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil {
|
if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil {
|
||||||
|
@ -743,10 +744,10 @@ func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxRecei
|
||||||
// To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor,
|
// To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor,
|
||||||
// use this decompressor as the default.
|
// use this decompressor as the default.
|
||||||
if dc != nil {
|
if dc != nil {
|
||||||
d, err = dc.Do(bytes.NewReader(d))
|
buf, err = dc.Do(bytes.NewReader(buf))
|
||||||
size = len(d)
|
size = len(buf)
|
||||||
} else {
|
} else {
|
||||||
d, size, err = decompress(compressor, d, maxReceiveMessageSize)
|
buf, size, err = decompress(compressor, buf, maxReceiveMessageSize)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err)
|
return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err)
|
||||||
|
@ -757,7 +758,7 @@ func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxRecei
|
||||||
return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize)
|
return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return d, nil
|
return buf, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Using compressor, decompress d, returning data and size.
|
// Using compressor, decompress d, returning data and size.
|
||||||
|
@ -792,15 +793,17 @@ func decompress(compressor encoding.Compressor, d []byte, maxReceiveMessageSize
|
||||||
// dc takes precedence over compressor.
|
// dc takes precedence over compressor.
|
||||||
// TODO(dfawley): wrap the old compressor/decompressor using the new API?
|
// TODO(dfawley): wrap the old compressor/decompressor using the new API?
|
||||||
func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m interface{}, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) error {
|
func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m interface{}, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) error {
|
||||||
d, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor)
|
buf, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := c.Unmarshal(d, m); err != nil {
|
if err := c.Unmarshal(buf, m); err != nil {
|
||||||
return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message: %v", err)
|
return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message: %v", err)
|
||||||
}
|
}
|
||||||
if payInfo != nil {
|
if payInfo != nil {
|
||||||
payInfo.uncompressedBytes = d
|
payInfo.uncompressedBytes = buf
|
||||||
|
} else {
|
||||||
|
p.recvBufferPool.Put(&buf)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
27
vendor/google.golang.org/grpc/server.go
generated
vendored
27
vendor/google.golang.org/grpc/server.go
generated
vendored
|
@ -174,6 +174,7 @@ type serverOptions struct {
|
||||||
maxHeaderListSize *uint32
|
maxHeaderListSize *uint32
|
||||||
headerTableSize *uint32
|
headerTableSize *uint32
|
||||||
numServerWorkers uint32
|
numServerWorkers uint32
|
||||||
|
recvBufferPool SharedBufferPool
|
||||||
}
|
}
|
||||||
|
|
||||||
var defaultServerOptions = serverOptions{
|
var defaultServerOptions = serverOptions{
|
||||||
|
@ -182,6 +183,7 @@ var defaultServerOptions = serverOptions{
|
||||||
connectionTimeout: 120 * time.Second,
|
connectionTimeout: 120 * time.Second,
|
||||||
writeBufferSize: defaultWriteBufSize,
|
writeBufferSize: defaultWriteBufSize,
|
||||||
readBufferSize: defaultReadBufSize,
|
readBufferSize: defaultReadBufSize,
|
||||||
|
recvBufferPool: nopBufferPool{},
|
||||||
}
|
}
|
||||||
var globalServerOptions []ServerOption
|
var globalServerOptions []ServerOption
|
||||||
|
|
||||||
|
@ -552,6 +554,27 @@ func NumStreamWorkers(numServerWorkers uint32) ServerOption {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// RecvBufferPool returns a ServerOption that configures the server
|
||||||
|
// to use the provided shared buffer pool for parsing incoming messages. Depending
|
||||||
|
// on the application's workload, this could result in reduced memory allocation.
|
||||||
|
//
|
||||||
|
// If you are unsure about how to implement a memory pool but want to utilize one,
|
||||||
|
// begin with grpc.NewSharedBufferPool.
|
||||||
|
//
|
||||||
|
// Note: The shared buffer pool feature will not be active if any of the following
|
||||||
|
// options are used: StatsHandler, EnableTracing, or binary logging. In such
|
||||||
|
// cases, the shared buffer pool will be ignored.
|
||||||
|
//
|
||||||
|
// # Experimental
|
||||||
|
//
|
||||||
|
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||||||
|
// later release.
|
||||||
|
func RecvBufferPool(bufferPool SharedBufferPool) ServerOption {
|
||||||
|
return newFuncServerOption(func(o *serverOptions) {
|
||||||
|
o.recvBufferPool = bufferPool
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
// serverWorkerResetThreshold defines how often the stack must be reset. Every
|
// serverWorkerResetThreshold defines how often the stack must be reset. Every
|
||||||
// N requests, by spawning a new goroutine in its place, a worker can reset its
|
// N requests, by spawning a new goroutine in its place, a worker can reset its
|
||||||
// stack so that large stacks don't live in memory forever. 2^16 should allow
|
// stack so that large stacks don't live in memory forever. 2^16 should allow
|
||||||
|
@ -1296,7 +1319,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
|
||||||
if len(shs) != 0 || len(binlogs) != 0 {
|
if len(shs) != 0 || len(binlogs) != 0 {
|
||||||
payInfo = &payloadInfo{}
|
payInfo = &payloadInfo{}
|
||||||
}
|
}
|
||||||
d, err := recvAndDecompress(&parser{r: stream}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp)
|
d, err := recvAndDecompress(&parser{r: stream, recvBufferPool: s.opts.recvBufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if e := t.WriteStatus(stream, status.Convert(err)); e != nil {
|
if e := t.WriteStatus(stream, status.Convert(err)); e != nil {
|
||||||
channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e)
|
channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e)
|
||||||
|
@ -1506,7 +1529,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
|
||||||
ctx: ctx,
|
ctx: ctx,
|
||||||
t: t,
|
t: t,
|
||||||
s: stream,
|
s: stream,
|
||||||
p: &parser{r: stream},
|
p: &parser{r: stream, recvBufferPool: s.opts.recvBufferPool},
|
||||||
codec: s.getCodec(stream.ContentSubtype()),
|
codec: s.getCodec(stream.ContentSubtype()),
|
||||||
maxReceiveMessageSize: s.opts.maxReceiveMessageSize,
|
maxReceiveMessageSize: s.opts.maxReceiveMessageSize,
|
||||||
maxSendMessageSize: s.opts.maxSendMessageSize,
|
maxSendMessageSize: s.opts.maxSendMessageSize,
|
||||||
|
|
154
vendor/google.golang.org/grpc/shared_buffer_pool.go
generated
vendored
Normal file
154
vendor/google.golang.org/grpc/shared_buffer_pool.go
generated
vendored
Normal file
|
@ -0,0 +1,154 @@
|
||||||
|
/*
|
||||||
|
*
|
||||||
|
* Copyright 2023 gRPC authors.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
package grpc
|
||||||
|
|
||||||
|
import "sync"
|
||||||
|
|
||||||
|
// SharedBufferPool is a pool of buffers that can be shared, resulting in
|
||||||
|
// decreased memory allocation. Currently, in gRPC-go, it is only utilized
|
||||||
|
// for parsing incoming messages.
|
||||||
|
//
|
||||||
|
// # Experimental
|
||||||
|
//
|
||||||
|
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||||||
|
// later release.
|
||||||
|
type SharedBufferPool interface {
|
||||||
|
// Get returns a buffer with specified length from the pool.
|
||||||
|
//
|
||||||
|
// The returned byte slice may be not zero initialized.
|
||||||
|
Get(length int) []byte
|
||||||
|
|
||||||
|
// Put returns a buffer to the pool.
|
||||||
|
Put(*[]byte)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSharedBufferPool creates a simple SharedBufferPool with buckets
|
||||||
|
// of different sizes to optimize memory usage. This prevents the pool from
|
||||||
|
// wasting large amounts of memory, even when handling messages of varying sizes.
|
||||||
|
//
|
||||||
|
// # Experimental
|
||||||
|
//
|
||||||
|
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||||||
|
// later release.
|
||||||
|
func NewSharedBufferPool() SharedBufferPool {
|
||||||
|
return &simpleSharedBufferPool{
|
||||||
|
pools: [poolArraySize]simpleSharedBufferChildPool{
|
||||||
|
newBytesPool(level0PoolMaxSize),
|
||||||
|
newBytesPool(level1PoolMaxSize),
|
||||||
|
newBytesPool(level2PoolMaxSize),
|
||||||
|
newBytesPool(level3PoolMaxSize),
|
||||||
|
newBytesPool(level4PoolMaxSize),
|
||||||
|
newBytesPool(0),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// simpleSharedBufferPool is a simple implementation of SharedBufferPool.
|
||||||
|
type simpleSharedBufferPool struct {
|
||||||
|
pools [poolArraySize]simpleSharedBufferChildPool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *simpleSharedBufferPool) Get(size int) []byte {
|
||||||
|
return p.pools[p.poolIdx(size)].Get(size)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *simpleSharedBufferPool) Put(bs *[]byte) {
|
||||||
|
p.pools[p.poolIdx(cap(*bs))].Put(bs)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *simpleSharedBufferPool) poolIdx(size int) int {
|
||||||
|
switch {
|
||||||
|
case size <= level0PoolMaxSize:
|
||||||
|
return level0PoolIdx
|
||||||
|
case size <= level1PoolMaxSize:
|
||||||
|
return level1PoolIdx
|
||||||
|
case size <= level2PoolMaxSize:
|
||||||
|
return level2PoolIdx
|
||||||
|
case size <= level3PoolMaxSize:
|
||||||
|
return level3PoolIdx
|
||||||
|
case size <= level4PoolMaxSize:
|
||||||
|
return level4PoolIdx
|
||||||
|
default:
|
||||||
|
return levelMaxPoolIdx
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
level0PoolMaxSize = 16 // 16 B
|
||||||
|
level1PoolMaxSize = level0PoolMaxSize * 16 // 256 B
|
||||||
|
level2PoolMaxSize = level1PoolMaxSize * 16 // 4 KB
|
||||||
|
level3PoolMaxSize = level2PoolMaxSize * 16 // 64 KB
|
||||||
|
level4PoolMaxSize = level3PoolMaxSize * 16 // 1 MB
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
level0PoolIdx = iota
|
||||||
|
level1PoolIdx
|
||||||
|
level2PoolIdx
|
||||||
|
level3PoolIdx
|
||||||
|
level4PoolIdx
|
||||||
|
levelMaxPoolIdx
|
||||||
|
poolArraySize
|
||||||
|
)
|
||||||
|
|
||||||
|
type simpleSharedBufferChildPool interface {
|
||||||
|
Get(size int) []byte
|
||||||
|
Put(interface{})
|
||||||
|
}
|
||||||
|
|
||||||
|
type bufferPool struct {
|
||||||
|
sync.Pool
|
||||||
|
|
||||||
|
defaultSize int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *bufferPool) Get(size int) []byte {
|
||||||
|
bs := p.Pool.Get().(*[]byte)
|
||||||
|
|
||||||
|
if cap(*bs) < size {
|
||||||
|
p.Pool.Put(bs)
|
||||||
|
|
||||||
|
return make([]byte, size)
|
||||||
|
}
|
||||||
|
|
||||||
|
return (*bs)[:size]
|
||||||
|
}
|
||||||
|
|
||||||
|
func newBytesPool(size int) simpleSharedBufferChildPool {
|
||||||
|
return &bufferPool{
|
||||||
|
Pool: sync.Pool{
|
||||||
|
New: func() interface{} {
|
||||||
|
bs := make([]byte, size)
|
||||||
|
return &bs
|
||||||
|
},
|
||||||
|
},
|
||||||
|
defaultSize: size,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// nopBufferPool is a buffer pool just makes new buffer without pooling.
|
||||||
|
type nopBufferPool struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (nopBufferPool) Get(length int) []byte {
|
||||||
|
return make([]byte, length)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (nopBufferPool) Put(*[]byte) {
|
||||||
|
}
|
4
vendor/google.golang.org/grpc/stream.go
generated
vendored
4
vendor/google.golang.org/grpc/stream.go
generated
vendored
|
@ -507,7 +507,7 @@ func (a *csAttempt) newStream() error {
|
||||||
return toRPCErr(nse.Err)
|
return toRPCErr(nse.Err)
|
||||||
}
|
}
|
||||||
a.s = s
|
a.s = s
|
||||||
a.p = &parser{r: s}
|
a.p = &parser{r: s, recvBufferPool: a.cs.cc.dopts.recvBufferPool}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1270,7 +1270,7 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
as.s = s
|
as.s = s
|
||||||
as.p = &parser{r: s}
|
as.p = &parser{r: s, recvBufferPool: ac.dopts.recvBufferPool}
|
||||||
ac.incrCallsStarted()
|
ac.incrCallsStarted()
|
||||||
if desc != unaryStreamDesc {
|
if desc != unaryStreamDesc {
|
||||||
// Listen on stream context to cleanup when the stream context is
|
// Listen on stream context to cleanup when the stream context is
|
||||||
|
|
2
vendor/google.golang.org/grpc/version.go
generated
vendored
2
vendor/google.golang.org/grpc/version.go
generated
vendored
|
@ -19,4 +19,4 @@
|
||||||
package grpc
|
package grpc
|
||||||
|
|
||||||
// Version is the current grpc version.
|
// Version is the current grpc version.
|
||||||
const Version = "1.56.2"
|
const Version = "1.57.0"
|
||||||
|
|
33
vendor/modules.txt
vendored
33
vendor/modules.txt
vendored
|
@ -4,7 +4,7 @@ cloud.google.com/go/internal
|
||||||
cloud.google.com/go/internal/optional
|
cloud.google.com/go/internal/optional
|
||||||
cloud.google.com/go/internal/trace
|
cloud.google.com/go/internal/trace
|
||||||
cloud.google.com/go/internal/version
|
cloud.google.com/go/internal/version
|
||||||
# cloud.google.com/go/compute v1.22.0
|
# cloud.google.com/go/compute v1.23.0
|
||||||
## explicit; go 1.19
|
## explicit; go 1.19
|
||||||
cloud.google.com/go/compute/internal
|
cloud.google.com/go/compute/internal
|
||||||
# cloud.google.com/go/compute/metadata v0.2.3
|
# cloud.google.com/go/compute/metadata v0.2.3
|
||||||
|
@ -109,7 +109,7 @@ github.com/VividCortex/ewma
|
||||||
# github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137
|
# github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137
|
||||||
## explicit; go 1.15
|
## explicit; go 1.15
|
||||||
github.com/alecthomas/units
|
github.com/alecthomas/units
|
||||||
# github.com/aws/aws-sdk-go v1.44.302
|
# github.com/aws/aws-sdk-go v1.44.309
|
||||||
## explicit; go 1.11
|
## explicit; go 1.11
|
||||||
github.com/aws/aws-sdk-go/aws
|
github.com/aws/aws-sdk-go/aws
|
||||||
github.com/aws/aws-sdk-go/aws/auth/bearer
|
github.com/aws/aws-sdk-go/aws/auth/bearer
|
||||||
|
@ -180,10 +180,10 @@ github.com/aws/aws-sdk-go-v2/internal/timeconv
|
||||||
## explicit; go 1.15
|
## explicit; go 1.15
|
||||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream
|
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream
|
||||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi
|
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi
|
||||||
# github.com/aws/aws-sdk-go-v2/config v1.18.28
|
# github.com/aws/aws-sdk-go-v2/config v1.18.29
|
||||||
## explicit; go 1.15
|
## explicit; go 1.15
|
||||||
github.com/aws/aws-sdk-go-v2/config
|
github.com/aws/aws-sdk-go-v2/config
|
||||||
# github.com/aws/aws-sdk-go-v2/credentials v1.13.27
|
# github.com/aws/aws-sdk-go-v2/credentials v1.13.28
|
||||||
## explicit; go 1.15
|
## explicit; go 1.15
|
||||||
github.com/aws/aws-sdk-go-v2/credentials
|
github.com/aws/aws-sdk-go-v2/credentials
|
||||||
github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds
|
github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds
|
||||||
|
@ -196,7 +196,7 @@ github.com/aws/aws-sdk-go-v2/credentials/stscreds
|
||||||
## explicit; go 1.15
|
## explicit; go 1.15
|
||||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds
|
github.com/aws/aws-sdk-go-v2/feature/ec2/imds
|
||||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config
|
github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config
|
||||||
# github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.72
|
# github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.73
|
||||||
## explicit; go 1.15
|
## explicit; go 1.15
|
||||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager
|
github.com/aws/aws-sdk-go-v2/feature/s3/manager
|
||||||
# github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.35
|
# github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.35
|
||||||
|
@ -244,7 +244,7 @@ github.com/aws/aws-sdk-go-v2/service/sso/types
|
||||||
github.com/aws/aws-sdk-go-v2/service/ssooidc
|
github.com/aws/aws-sdk-go-v2/service/ssooidc
|
||||||
github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints
|
github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints
|
||||||
github.com/aws/aws-sdk-go-v2/service/ssooidc/types
|
github.com/aws/aws-sdk-go-v2/service/ssooidc/types
|
||||||
# github.com/aws/aws-sdk-go-v2/service/sts v1.19.3
|
# github.com/aws/aws-sdk-go-v2/service/sts v1.20.0
|
||||||
## explicit; go 1.15
|
## explicit; go 1.15
|
||||||
github.com/aws/aws-sdk-go-v2/service/sts
|
github.com/aws/aws-sdk-go-v2/service/sts
|
||||||
github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints
|
github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints
|
||||||
|
@ -286,7 +286,7 @@ github.com/cheggaaa/pb/v3/termutil
|
||||||
# github.com/cpuguy83/go-md2man/v2 v2.0.2
|
# github.com/cpuguy83/go-md2man/v2 v2.0.2
|
||||||
## explicit; go 1.11
|
## explicit; go 1.11
|
||||||
github.com/cpuguy83/go-md2man/v2/md2man
|
github.com/cpuguy83/go-md2man/v2/md2man
|
||||||
# github.com/davecgh/go-spew v1.1.1
|
# github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc
|
||||||
## explicit
|
## explicit
|
||||||
github.com/davecgh/go-spew/spew
|
github.com/davecgh/go-spew/spew
|
||||||
# github.com/dennwc/varint v1.0.0
|
# github.com/dennwc/varint v1.0.0
|
||||||
|
@ -420,7 +420,7 @@ github.com/mattn/go-colorable
|
||||||
# github.com/mattn/go-isatty v0.0.19
|
# github.com/mattn/go-isatty v0.0.19
|
||||||
## explicit; go 1.15
|
## explicit; go 1.15
|
||||||
github.com/mattn/go-isatty
|
github.com/mattn/go-isatty
|
||||||
# github.com/mattn/go-runewidth v0.0.14
|
# github.com/mattn/go-runewidth v0.0.15
|
||||||
## explicit; go 1.9
|
## explicit; go 1.9
|
||||||
github.com/mattn/go-runewidth
|
github.com/mattn/go-runewidth
|
||||||
# github.com/matttproud/golang_protobuf_extensions v1.0.4
|
# github.com/matttproud/golang_protobuf_extensions v1.0.4
|
||||||
|
@ -438,7 +438,7 @@ github.com/pkg/browser
|
||||||
# github.com/pkg/errors v0.9.1
|
# github.com/pkg/errors v0.9.1
|
||||||
## explicit
|
## explicit
|
||||||
github.com/pkg/errors
|
github.com/pkg/errors
|
||||||
# github.com/pmezard/go-difflib v1.0.0
|
# github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2
|
||||||
## explicit
|
## explicit
|
||||||
github.com/pmezard/go-difflib/difflib
|
github.com/pmezard/go-difflib/difflib
|
||||||
# github.com/prometheus/client_golang v1.16.0
|
# github.com/prometheus/client_golang v1.16.0
|
||||||
|
@ -461,12 +461,12 @@ github.com/prometheus/common/version
|
||||||
# github.com/prometheus/common/sigv4 v0.1.0
|
# github.com/prometheus/common/sigv4 v0.1.0
|
||||||
## explicit; go 1.15
|
## explicit; go 1.15
|
||||||
github.com/prometheus/common/sigv4
|
github.com/prometheus/common/sigv4
|
||||||
# github.com/prometheus/procfs v0.11.0
|
# github.com/prometheus/procfs v0.11.1
|
||||||
## explicit; go 1.19
|
## explicit; go 1.19
|
||||||
github.com/prometheus/procfs
|
github.com/prometheus/procfs
|
||||||
github.com/prometheus/procfs/internal/fs
|
github.com/prometheus/procfs/internal/fs
|
||||||
github.com/prometheus/procfs/internal/util
|
github.com/prometheus/procfs/internal/util
|
||||||
# github.com/prometheus/prometheus v0.45.0
|
# github.com/prometheus/prometheus v0.46.0
|
||||||
## explicit; go 1.19
|
## explicit; go 1.19
|
||||||
github.com/prometheus/prometheus/config
|
github.com/prometheus/prometheus/config
|
||||||
github.com/prometheus/prometheus/discovery
|
github.com/prometheus/prometheus/discovery
|
||||||
|
@ -631,6 +631,7 @@ golang.org/x/oauth2/jwt
|
||||||
# golang.org/x/sync v0.3.0
|
# golang.org/x/sync v0.3.0
|
||||||
## explicit; go 1.17
|
## explicit; go 1.17
|
||||||
golang.org/x/sync/errgroup
|
golang.org/x/sync/errgroup
|
||||||
|
golang.org/x/sync/semaphore
|
||||||
# golang.org/x/sys v0.10.0
|
# golang.org/x/sys v0.10.0
|
||||||
## explicit; go 1.17
|
## explicit; go 1.17
|
||||||
golang.org/x/sys/cpu
|
golang.org/x/sys/cpu
|
||||||
|
@ -650,7 +651,7 @@ golang.org/x/time/rate
|
||||||
## explicit; go 1.17
|
## explicit; go 1.17
|
||||||
golang.org/x/xerrors
|
golang.org/x/xerrors
|
||||||
golang.org/x/xerrors/internal
|
golang.org/x/xerrors/internal
|
||||||
# google.golang.org/api v0.132.0
|
# google.golang.org/api v0.134.0
|
||||||
## explicit; go 1.19
|
## explicit; go 1.19
|
||||||
google.golang.org/api/googleapi
|
google.golang.org/api/googleapi
|
||||||
google.golang.org/api/googleapi/transport
|
google.golang.org/api/googleapi/transport
|
||||||
|
@ -682,21 +683,21 @@ google.golang.org/appengine/internal/socket
|
||||||
google.golang.org/appengine/internal/urlfetch
|
google.golang.org/appengine/internal/urlfetch
|
||||||
google.golang.org/appengine/socket
|
google.golang.org/appengine/socket
|
||||||
google.golang.org/appengine/urlfetch
|
google.golang.org/appengine/urlfetch
|
||||||
# google.golang.org/genproto v0.0.0-20230717213848-3f92550aa753
|
# google.golang.org/genproto v0.0.0-20230726155614-23370e0ffb3e
|
||||||
## explicit; go 1.19
|
## explicit; go 1.19
|
||||||
google.golang.org/genproto/googleapis/type/date
|
google.golang.org/genproto/googleapis/type/date
|
||||||
google.golang.org/genproto/googleapis/type/expr
|
google.golang.org/genproto/googleapis/type/expr
|
||||||
google.golang.org/genproto/internal
|
google.golang.org/genproto/internal
|
||||||
# google.golang.org/genproto/googleapis/api v0.0.0-20230717213848-3f92550aa753
|
# google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e
|
||||||
## explicit; go 1.19
|
## explicit; go 1.19
|
||||||
google.golang.org/genproto/googleapis/api
|
google.golang.org/genproto/googleapis/api
|
||||||
google.golang.org/genproto/googleapis/api/annotations
|
google.golang.org/genproto/googleapis/api/annotations
|
||||||
# google.golang.org/genproto/googleapis/rpc v0.0.0-20230717213848-3f92550aa753
|
# google.golang.org/genproto/googleapis/rpc v0.0.0-20230726155614-23370e0ffb3e
|
||||||
## explicit; go 1.19
|
## explicit; go 1.19
|
||||||
google.golang.org/genproto/googleapis/rpc/code
|
google.golang.org/genproto/googleapis/rpc/code
|
||||||
google.golang.org/genproto/googleapis/rpc/errdetails
|
google.golang.org/genproto/googleapis/rpc/errdetails
|
||||||
google.golang.org/genproto/googleapis/rpc/status
|
google.golang.org/genproto/googleapis/rpc/status
|
||||||
# google.golang.org/grpc v1.56.2
|
# google.golang.org/grpc v1.57.0
|
||||||
## explicit; go 1.17
|
## explicit; go 1.17
|
||||||
google.golang.org/grpc
|
google.golang.org/grpc
|
||||||
google.golang.org/grpc/attributes
|
google.golang.org/grpc/attributes
|
||||||
|
|
Loading…
Reference in a new issue