mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2024-11-21 14:44:00 +00:00
vendor: make vendor-update
This commit is contained in:
parent
f0eb1f3749
commit
ac92d471a6
46 changed files with 3604 additions and 1431 deletions
12
go.mod
12
go.mod
|
@ -3,7 +3,7 @@ module github.com/VictoriaMetrics/VictoriaMetrics
|
||||||
go 1.17
|
go 1.17
|
||||||
|
|
||||||
require (
|
require (
|
||||||
cloud.google.com/go/storage v1.18.2
|
cloud.google.com/go/storage v1.19.0
|
||||||
github.com/VictoriaMetrics/fastcache v1.8.0
|
github.com/VictoriaMetrics/fastcache v1.8.0
|
||||||
|
|
||||||
// Do not use the original github.com/valyala/fasthttp because of issues
|
// Do not use the original github.com/valyala/fasthttp because of issues
|
||||||
|
@ -11,12 +11,12 @@ require (
|
||||||
github.com/VictoriaMetrics/fasthttp v1.1.0
|
github.com/VictoriaMetrics/fasthttp v1.1.0
|
||||||
github.com/VictoriaMetrics/metrics v1.18.1
|
github.com/VictoriaMetrics/metrics v1.18.1
|
||||||
github.com/VictoriaMetrics/metricsql v0.37.0
|
github.com/VictoriaMetrics/metricsql v0.37.0
|
||||||
github.com/aws/aws-sdk-go v1.42.39
|
github.com/aws/aws-sdk-go v1.42.42
|
||||||
github.com/cespare/xxhash/v2 v2.1.2
|
github.com/cespare/xxhash/v2 v2.1.2
|
||||||
github.com/cheggaaa/pb/v3 v3.0.8
|
github.com/cheggaaa/pb/v3 v3.0.8
|
||||||
github.com/golang/snappy v0.0.4
|
github.com/golang/snappy v0.0.4
|
||||||
github.com/influxdata/influxdb v1.9.5
|
github.com/influxdata/influxdb v1.9.5
|
||||||
github.com/klauspost/compress v1.14.1
|
github.com/klauspost/compress v1.14.2
|
||||||
github.com/prometheus/prometheus v1.8.2-0.20201119142752-3ad25a6dc3d9
|
github.com/prometheus/prometheus v1.8.2-0.20201119142752-3ad25a6dc3d9
|
||||||
github.com/urfave/cli/v2 v2.3.0
|
github.com/urfave/cli/v2 v2.3.0
|
||||||
github.com/valyala/fastjson v1.6.3
|
github.com/valyala/fastjson v1.6.3
|
||||||
|
@ -24,7 +24,7 @@ require (
|
||||||
github.com/valyala/fasttemplate v1.2.1
|
github.com/valyala/fasttemplate v1.2.1
|
||||||
github.com/valyala/gozstd v1.15.1
|
github.com/valyala/gozstd v1.15.1
|
||||||
github.com/valyala/quicktemplate v1.7.0
|
github.com/valyala/quicktemplate v1.7.0
|
||||||
golang.org/x/net v0.0.0-20220121210141-e204ce36a2ba
|
golang.org/x/net v0.0.0-20220127074510-2fabfed7e28f
|
||||||
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8
|
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8
|
||||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9
|
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9
|
||||||
google.golang.org/api v0.65.0
|
google.golang.org/api v0.65.0
|
||||||
|
@ -68,8 +68,8 @@ require (
|
||||||
golang.org/x/text v0.3.7 // indirect
|
golang.org/x/text v0.3.7 // indirect
|
||||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
|
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
|
||||||
google.golang.org/appengine v1.6.7 // indirect
|
google.golang.org/appengine v1.6.7 // indirect
|
||||||
google.golang.org/genproto v0.0.0-20220118154757-00ab72f36ad5 // indirect
|
google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350 // indirect
|
||||||
google.golang.org/grpc v1.43.0 // indirect
|
google.golang.org/grpc v1.44.0 // indirect
|
||||||
google.golang.org/protobuf v1.27.1 // indirect
|
google.golang.org/protobuf v1.27.1 // indirect
|
||||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
|
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
|
||||||
)
|
)
|
||||||
|
|
26
go.sum
26
go.sum
|
@ -55,8 +55,8 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo
|
||||||
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
|
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
|
||||||
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
|
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
|
||||||
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
|
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
|
||||||
cloud.google.com/go/storage v1.18.2 h1:5NQw6tOn3eMm0oE8vTkfjau18kjL79FlMjy/CHTpmoY=
|
cloud.google.com/go/storage v1.19.0 h1:XOQSnPJD8hRtZJ3VdCyK0mBZsGGImrzPAMbSWcHSe6Q=
|
||||||
cloud.google.com/go/storage v1.18.2/go.mod h1:AiIj7BWXyhO5gGVmYJ+S8tbkCx3yb0IMjua8Aw4naVM=
|
cloud.google.com/go/storage v1.19.0/go.mod h1:6rgiTRjOqI/Zd9YKimub5TIB4d+p3LH33V3ZE1DMuUM=
|
||||||
collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE=
|
collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE=
|
||||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||||
github.com/Azure/azure-sdk-for-go v41.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
|
github.com/Azure/azure-sdk-for-go v41.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
|
||||||
|
@ -161,8 +161,8 @@ github.com/aws/aws-sdk-go v1.30.12/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZve
|
||||||
github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48=
|
github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48=
|
||||||
github.com/aws/aws-sdk-go v1.35.31/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
github.com/aws/aws-sdk-go v1.35.31/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
||||||
github.com/aws/aws-sdk-go v1.40.45/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q=
|
github.com/aws/aws-sdk-go v1.40.45/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q=
|
||||||
github.com/aws/aws-sdk-go v1.42.39 h1:6Lso73VoCI8Zmv3zAMv4BNg2gHAKNOlbLv1s/ew90SI=
|
github.com/aws/aws-sdk-go v1.42.42 h1:2K61yu5BApC9ExAwC5Vk6ljWzBGbiRGRQYLW7adhP5U=
|
||||||
github.com/aws/aws-sdk-go v1.42.39/go.mod h1:OGr6lGMAKGlG9CVrYnWYDKIyb829c6EVBRjxqjmPepc=
|
github.com/aws/aws-sdk-go v1.42.42/go.mod h1:OGr6lGMAKGlG9CVrYnWYDKIyb829c6EVBRjxqjmPepc=
|
||||||
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
|
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
|
||||||
github.com/aws/aws-sdk-go-v2 v1.9.1/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4=
|
github.com/aws/aws-sdk-go-v2 v1.9.1/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4=
|
||||||
github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.8.1/go.mod h1:CM+19rL1+4dFWnOQKwDc7H1KwXTz+h61oUSHyhV0b3o=
|
github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.8.1/go.mod h1:CM+19rL1+4dFWnOQKwDc7H1KwXTz+h61oUSHyhV0b3o=
|
||||||
|
@ -654,8 +654,8 @@ github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0
|
||||||
github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
|
github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
|
||||||
github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
||||||
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
||||||
github.com/klauspost/compress v1.14.1 h1:hLQYb23E8/fO+1u53d02A97a8UnsddcvYzq4ERRU4ds=
|
github.com/klauspost/compress v1.14.2 h1:S0OHlFk/Gbon/yauFJ4FfJJF5V0fc5HbBTJazi28pRw=
|
||||||
github.com/klauspost/compress v1.14.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
github.com/klauspost/compress v1.14.2/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
||||||
github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||||
github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg=
|
github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg=
|
||||||
github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
|
github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
|
||||||
|
@ -1168,8 +1168,8 @@ golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qx
|
||||||
golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||||
golang.org/x/net v0.0.0-20210917221730-978cfadd31cf/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
golang.org/x/net v0.0.0-20210917221730-978cfadd31cf/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||||
golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||||
golang.org/x/net v0.0.0-20220121210141-e204ce36a2ba h1:6u6sik+bn/y7vILcYkK3iwTBWN7WtBvB0+SZswQnbf8=
|
golang.org/x/net v0.0.0-20220127074510-2fabfed7e28f h1:o66Bv9+w/vuk7Krcig9jZqD01FP7BL8OliFqqw0xzPI=
|
||||||
golang.org/x/net v0.0.0-20220121210141-e204ce36a2ba/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
golang.org/x/net v0.0.0-20220127074510-2fabfed7e28f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
|
@ -1185,7 +1185,6 @@ golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ
|
||||||
golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||||
golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||||
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||||
golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
|
||||||
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 h1:RerP+noqYHUQ8CMRcPlC2nvTa4dcBIjegkuWdcUDuqg=
|
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 h1:RerP+noqYHUQ8CMRcPlC2nvTa4dcBIjegkuWdcUDuqg=
|
||||||
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
@ -1446,7 +1445,6 @@ google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6
|
||||||
google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
|
google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
|
||||||
google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
|
google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
|
||||||
google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI=
|
google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI=
|
||||||
google.golang.org/api v0.58.0/go.mod h1:cAbP2FsxoGVNwtgNAmmn3y5G1TWAiVYRmg4yku3lv+E=
|
|
||||||
google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I=
|
google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I=
|
||||||
google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo=
|
google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo=
|
||||||
google.golang.org/api v0.64.0/go.mod h1:931CdxA8Rm4t6zqTFGSsgwbAEZ2+GMYurbndwSimebM=
|
google.golang.org/api v0.64.0/go.mod h1:931CdxA8Rm4t6zqTFGSsgwbAEZ2+GMYurbndwSimebM=
|
||||||
|
@ -1523,7 +1521,6 @@ google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEc
|
||||||
google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
|
google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
|
||||||
google.golang.org/genproto v0.0.0-20210917145530-b395a37504d4/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
|
google.golang.org/genproto v0.0.0-20210917145530-b395a37504d4/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
|
||||||
google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||||
google.golang.org/genproto v0.0.0-20211016002631-37fc39342514/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
|
||||||
google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||||
google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||||
google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||||
|
@ -1531,8 +1528,9 @@ google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ6
|
||||||
google.golang.org/genproto v0.0.0-20211223182754-3ac035c7e7cb/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
google.golang.org/genproto v0.0.0-20211223182754-3ac035c7e7cb/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||||
google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||||
google.golang.org/genproto v0.0.0-20220111164026-67b88f271998/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
google.golang.org/genproto v0.0.0-20220111164026-67b88f271998/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||||
google.golang.org/genproto v0.0.0-20220118154757-00ab72f36ad5 h1:zzNejm+EgrbLfDZ6lu9Uud2IVvHySPl8vQzf04laR5Q=
|
|
||||||
google.golang.org/genproto v0.0.0-20220118154757-00ab72f36ad5/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
google.golang.org/genproto v0.0.0-20220118154757-00ab72f36ad5/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||||
|
google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350 h1:YxHp5zqIcAShDEvRr5/0rVESVS+njYF68PSdazrNLJo=
|
||||||
|
google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||||
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
|
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
|
||||||
|
@ -1566,8 +1564,8 @@ google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnD
|
||||||
google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
|
google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
|
||||||
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
||||||
google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
||||||
google.golang.org/grpc v1.43.0 h1:Eeu7bZtDZ2DpRCsLhUlcrLnvYaMK1Gz86a+hMVvELmM=
|
google.golang.org/grpc v1.44.0 h1:weqSxi/TMs1SqFRMHCtBgXRs8k3X39QIDEZ0pRcttUg=
|
||||||
google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
|
google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
|
||||||
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
|
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
|
||||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||||
|
|
32
vendor/cloud.google.com/go/storage/CHANGES.md
generated
vendored
32
vendor/cloud.google.com/go/storage/CHANGES.md
generated
vendored
|
@ -1,5 +1,37 @@
|
||||||
# Changes
|
# Changes
|
||||||
|
|
||||||
|
## [1.19.0](https://www.github.com/googleapis/google-cloud-go/compare/storage/v1.18.2...storage/v1.19.0) (2022-01-25)
|
||||||
|
|
||||||
|
|
||||||
|
### Features
|
||||||
|
|
||||||
|
* **storage:** add fully configurable and idempotency-aware retry strategy ([#5384](https://www.github.com/googleapis/google-cloud-go/issues/5384), [#5185](https://www.github.com/googleapis/google-cloud-go/issues/5185), [#5170](https://www.github.com/googleapis/google-cloud-go/issues/5170), [#5223](https://www.github.com/googleapis/google-cloud-go/issues/5223), [#5221](https://www.github.com/googleapis/google-cloud-go/issues/5221), [#5193](https://www.github.com/googleapis/google-cloud-go/issues/5193), [#5159](https://www.github.com/googleapis/google-cloud-go/issues/5159), [#5165](https://www.github.com/googleapis/google-cloud-go/issues/5165), [#5166](https://www.github.com/googleapis/google-cloud-go/issues/5166), [#5210](https://www.github.com/googleapis/google-cloud-go/issues/5210), [#5172](https://www.github.com/googleapis/google-cloud-go/issues/5172), [#5314](https://www.github.com/googleapis/google-cloud-go/issues/5314))
|
||||||
|
* This release contains changes to fully align this library's retry strategy
|
||||||
|
with best practices as described in the
|
||||||
|
Cloud Storage [docs](https://cloud.google.com/storage/docs/retry-strategy).
|
||||||
|
* The library will now retry only idempotent operations by default. This means
|
||||||
|
that for certain operations, including object upload, compose, rewrite,
|
||||||
|
update, and delete, requests will not be retried by default unless
|
||||||
|
[idempotency conditions](https://cloud.google.com/storage/docs/retry-strategy#idempotency)
|
||||||
|
for the request have been met.
|
||||||
|
* The library now has methods to configure aspects of retry policy for
|
||||||
|
API calls, including which errors are retried, the timing of the
|
||||||
|
exponential backoff, and how idempotency is taken into account.
|
||||||
|
* If you wish to re-enable retries for a non-idempotent request, use the
|
||||||
|
[RetryAlways](https://pkg.go.dev/cloud.google.com/go/storage@main#RetryAlways)
|
||||||
|
policy.
|
||||||
|
* For full details on how to configure retries, see the
|
||||||
|
[package docs](https://pkg.go.dev/cloud.google.com/go/storage@main#hdr-Retrying_failed_requests)
|
||||||
|
and the
|
||||||
|
[Cloud Storage docs](https://cloud.google.com/storage/docs/retry-strategy)
|
||||||
|
* **storage:** GenerateSignedPostPolicyV4 can use existing creds to authenticate ([#5105](https://www.github.com/googleapis/google-cloud-go/issues/5105)) ([46489f4](https://www.github.com/googleapis/google-cloud-go/commit/46489f4c8a634068a3e7cf2fd5e5ca11b555c0a8))
|
||||||
|
* **storage:** post policy can be signed with a fn that takes raw bytes ([#5079](https://www.github.com/googleapis/google-cloud-go/issues/5079)) ([25d1278](https://www.github.com/googleapis/google-cloud-go/commit/25d1278cab539fbfdd8563ed6b297e30d3fe555c))
|
||||||
|
* **storage:** add rpo (turbo replication) support ([#5003](https://www.github.com/googleapis/google-cloud-go/issues/5003)) ([3bd5995](https://www.github.com/googleapis/google-cloud-go/commit/3bd59958e0c06d2655b67fcb5410668db3c52af0))
|
||||||
|
|
||||||
|
### Bug Fixes
|
||||||
|
|
||||||
|
* **storage:** fix nil check in gRPC Reader ([#5376](https://www.github.com/googleapis/google-cloud-go/issues/5376)) ([5e7d722](https://www.github.com/googleapis/google-cloud-go/commit/5e7d722d18a62b28ba98169b3bdbb49401377264))
|
||||||
|
|
||||||
### [1.18.2](https://www.github.com/googleapis/google-cloud-go/compare/storage/v1.18.1...storage/v1.18.2) (2021-10-18)
|
### [1.18.2](https://www.github.com/googleapis/google-cloud-go/compare/storage/v1.18.1...storage/v1.18.2) (2021-10-18)
|
||||||
|
|
||||||
|
|
||||||
|
|
2
vendor/cloud.google.com/go/storage/README.md
generated
vendored
2
vendor/cloud.google.com/go/storage/README.md
generated
vendored
|
@ -3,7 +3,7 @@
|
||||||
- [About Cloud Storage](https://cloud.google.com/storage/)
|
- [About Cloud Storage](https://cloud.google.com/storage/)
|
||||||
- [API documentation](https://cloud.google.com/storage/docs)
|
- [API documentation](https://cloud.google.com/storage/docs)
|
||||||
- [Go client documentation](https://pkg.go.dev/cloud.google.com/go/storage)
|
- [Go client documentation](https://pkg.go.dev/cloud.google.com/go/storage)
|
||||||
- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/storage)
|
- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/main/storage)
|
||||||
|
|
||||||
### Example Usage
|
### Example Usage
|
||||||
|
|
||||||
|
|
38
vendor/cloud.google.com/go/storage/acl.go
generated
vendored
38
vendor/cloud.google.com/go/storage/acl.go
generated
vendored
|
@ -73,6 +73,7 @@ type ACLHandle struct {
|
||||||
object string
|
object string
|
||||||
isDefault bool
|
isDefault bool
|
||||||
userProject string // for requester-pays buckets
|
userProject string // for requester-pays buckets
|
||||||
|
retry *retryConfig
|
||||||
}
|
}
|
||||||
|
|
||||||
// Delete permanently deletes the ACL entry for the given entity.
|
// Delete permanently deletes the ACL entry for the given entity.
|
||||||
|
@ -120,12 +121,12 @@ func (a *ACLHandle) List(ctx context.Context) (rules []ACLRule, err error) {
|
||||||
func (a *ACLHandle) bucketDefaultList(ctx context.Context) ([]ACLRule, error) {
|
func (a *ACLHandle) bucketDefaultList(ctx context.Context) ([]ACLRule, error) {
|
||||||
var acls *raw.ObjectAccessControls
|
var acls *raw.ObjectAccessControls
|
||||||
var err error
|
var err error
|
||||||
err = runWithRetry(ctx, func() error {
|
err = run(ctx, func() error {
|
||||||
req := a.c.raw.DefaultObjectAccessControls.List(a.bucket)
|
req := a.c.raw.DefaultObjectAccessControls.List(a.bucket)
|
||||||
a.configureCall(ctx, req)
|
a.configureCall(ctx, req)
|
||||||
acls, err = req.Do()
|
acls, err = req.Do()
|
||||||
return err
|
return err
|
||||||
})
|
}, a.retry, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -135,18 +136,21 @@ func (a *ACLHandle) bucketDefaultList(ctx context.Context) ([]ACLRule, error) {
|
||||||
func (a *ACLHandle) bucketDefaultDelete(ctx context.Context, entity ACLEntity) error {
|
func (a *ACLHandle) bucketDefaultDelete(ctx context.Context, entity ACLEntity) error {
|
||||||
req := a.c.raw.DefaultObjectAccessControls.Delete(a.bucket, string(entity))
|
req := a.c.raw.DefaultObjectAccessControls.Delete(a.bucket, string(entity))
|
||||||
a.configureCall(ctx, req)
|
a.configureCall(ctx, req)
|
||||||
return req.Do()
|
|
||||||
|
return run(ctx, func() error {
|
||||||
|
return req.Do()
|
||||||
|
}, a.retry, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *ACLHandle) bucketList(ctx context.Context) ([]ACLRule, error) {
|
func (a *ACLHandle) bucketList(ctx context.Context) ([]ACLRule, error) {
|
||||||
var acls *raw.BucketAccessControls
|
var acls *raw.BucketAccessControls
|
||||||
var err error
|
var err error
|
||||||
err = runWithRetry(ctx, func() error {
|
err = run(ctx, func() error {
|
||||||
req := a.c.raw.BucketAccessControls.List(a.bucket)
|
req := a.c.raw.BucketAccessControls.List(a.bucket)
|
||||||
a.configureCall(ctx, req)
|
a.configureCall(ctx, req)
|
||||||
acls, err = req.Do()
|
acls, err = req.Do()
|
||||||
return err
|
return err
|
||||||
})
|
}, a.retry, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -161,25 +165,29 @@ func (a *ACLHandle) bucketSet(ctx context.Context, entity ACLEntity, role ACLRol
|
||||||
}
|
}
|
||||||
req := a.c.raw.BucketAccessControls.Update(a.bucket, string(entity), acl)
|
req := a.c.raw.BucketAccessControls.Update(a.bucket, string(entity), acl)
|
||||||
a.configureCall(ctx, req)
|
a.configureCall(ctx, req)
|
||||||
_, err := req.Do()
|
return run(ctx, func() error {
|
||||||
return err
|
_, err := req.Do()
|
||||||
|
return err
|
||||||
|
}, a.retry, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *ACLHandle) bucketDelete(ctx context.Context, entity ACLEntity) error {
|
func (a *ACLHandle) bucketDelete(ctx context.Context, entity ACLEntity) error {
|
||||||
req := a.c.raw.BucketAccessControls.Delete(a.bucket, string(entity))
|
req := a.c.raw.BucketAccessControls.Delete(a.bucket, string(entity))
|
||||||
a.configureCall(ctx, req)
|
a.configureCall(ctx, req)
|
||||||
return req.Do()
|
return run(ctx, func() error {
|
||||||
|
return req.Do()
|
||||||
|
}, a.retry, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *ACLHandle) objectList(ctx context.Context) ([]ACLRule, error) {
|
func (a *ACLHandle) objectList(ctx context.Context) ([]ACLRule, error) {
|
||||||
var acls *raw.ObjectAccessControls
|
var acls *raw.ObjectAccessControls
|
||||||
var err error
|
var err error
|
||||||
err = runWithRetry(ctx, func() error {
|
err = run(ctx, func() error {
|
||||||
req := a.c.raw.ObjectAccessControls.List(a.bucket, a.object)
|
req := a.c.raw.ObjectAccessControls.List(a.bucket, a.object)
|
||||||
a.configureCall(ctx, req)
|
a.configureCall(ctx, req)
|
||||||
acls, err = req.Do()
|
acls, err = req.Do()
|
||||||
return err
|
return err
|
||||||
})
|
}, a.retry, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -204,14 +212,18 @@ func (a *ACLHandle) objectSet(ctx context.Context, entity ACLEntity, role ACLRol
|
||||||
req = a.c.raw.ObjectAccessControls.Update(a.bucket, a.object, string(entity), acl)
|
req = a.c.raw.ObjectAccessControls.Update(a.bucket, a.object, string(entity), acl)
|
||||||
}
|
}
|
||||||
a.configureCall(ctx, req)
|
a.configureCall(ctx, req)
|
||||||
_, err := req.Do()
|
return run(ctx, func() error {
|
||||||
return err
|
_, err := req.Do()
|
||||||
|
return err
|
||||||
|
}, a.retry, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *ACLHandle) objectDelete(ctx context.Context, entity ACLEntity) error {
|
func (a *ACLHandle) objectDelete(ctx context.Context, entity ACLEntity) error {
|
||||||
req := a.c.raw.ObjectAccessControls.Delete(a.bucket, a.object, string(entity))
|
req := a.c.raw.ObjectAccessControls.Delete(a.bucket, a.object, string(entity))
|
||||||
a.configureCall(ctx, req)
|
a.configureCall(ctx, req)
|
||||||
return req.Do()
|
return run(ctx, func() error {
|
||||||
|
return req.Do()
|
||||||
|
}, a.retry, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *ACLHandle) configureCall(ctx context.Context, call interface{ Header() http.Header }) {
|
func (a *ACLHandle) configureCall(ctx context.Context, call interface{ Header() http.Header }) {
|
||||||
|
|
189
vendor/cloud.google.com/go/storage/bucket.go
generated
vendored
189
vendor/cloud.google.com/go/storage/bucket.go
generated
vendored
|
@ -44,6 +44,7 @@ type BucketHandle struct {
|
||||||
defaultObjectACL ACLHandle
|
defaultObjectACL ACLHandle
|
||||||
conds *BucketConditions
|
conds *BucketConditions
|
||||||
userProject string // project for Requester Pays buckets
|
userProject string // project for Requester Pays buckets
|
||||||
|
retry *retryConfig
|
||||||
}
|
}
|
||||||
|
|
||||||
// Bucket returns a BucketHandle, which provides operations on the named bucket.
|
// Bucket returns a BucketHandle, which provides operations on the named bucket.
|
||||||
|
@ -54,18 +55,22 @@ type BucketHandle struct {
|
||||||
// found at:
|
// found at:
|
||||||
// https://cloud.google.com/storage/docs/bucket-naming
|
// https://cloud.google.com/storage/docs/bucket-naming
|
||||||
func (c *Client) Bucket(name string) *BucketHandle {
|
func (c *Client) Bucket(name string) *BucketHandle {
|
||||||
|
retry := c.retry.clone()
|
||||||
return &BucketHandle{
|
return &BucketHandle{
|
||||||
c: c,
|
c: c,
|
||||||
name: name,
|
name: name,
|
||||||
acl: ACLHandle{
|
acl: ACLHandle{
|
||||||
c: c,
|
c: c,
|
||||||
bucket: name,
|
bucket: name,
|
||||||
|
retry: retry,
|
||||||
},
|
},
|
||||||
defaultObjectACL: ACLHandle{
|
defaultObjectACL: ACLHandle{
|
||||||
c: c,
|
c: c,
|
||||||
bucket: name,
|
bucket: name,
|
||||||
isDefault: true,
|
isDefault: true,
|
||||||
|
retry: retry,
|
||||||
},
|
},
|
||||||
|
retry: retry,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -95,7 +100,7 @@ func (b *BucketHandle) Create(ctx context.Context, projectID string, attrs *Buck
|
||||||
if attrs != nil && attrs.PredefinedDefaultObjectACL != "" {
|
if attrs != nil && attrs.PredefinedDefaultObjectACL != "" {
|
||||||
req.PredefinedDefaultObjectAcl(attrs.PredefinedDefaultObjectACL)
|
req.PredefinedDefaultObjectAcl(attrs.PredefinedDefaultObjectACL)
|
||||||
}
|
}
|
||||||
return runWithRetry(ctx, func() error { _, err := req.Context(ctx).Do(); return err })
|
return run(ctx, func() error { _, err := req.Context(ctx).Do(); return err }, b.retry, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Delete deletes the Bucket.
|
// Delete deletes the Bucket.
|
||||||
|
@ -107,7 +112,8 @@ func (b *BucketHandle) Delete(ctx context.Context) (err error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return runWithRetry(ctx, func() error { return req.Context(ctx).Do() })
|
|
||||||
|
return run(ctx, func() error { return req.Context(ctx).Do() }, b.retry, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *BucketHandle) newDeleteCall() (*raw.BucketsDeleteCall, error) {
|
func (b *BucketHandle) newDeleteCall() (*raw.BucketsDeleteCall, error) {
|
||||||
|
@ -144,6 +150,7 @@ func (b *BucketHandle) DefaultObjectACL() *ACLHandle {
|
||||||
// for valid object names can be found at:
|
// for valid object names can be found at:
|
||||||
// https://cloud.google.com/storage/docs/naming-objects
|
// https://cloud.google.com/storage/docs/naming-objects
|
||||||
func (b *BucketHandle) Object(name string) *ObjectHandle {
|
func (b *BucketHandle) Object(name string) *ObjectHandle {
|
||||||
|
retry := b.retry.clone()
|
||||||
return &ObjectHandle{
|
return &ObjectHandle{
|
||||||
c: b.c,
|
c: b.c,
|
||||||
bucket: b.name,
|
bucket: b.name,
|
||||||
|
@ -153,9 +160,11 @@ func (b *BucketHandle) Object(name string) *ObjectHandle {
|
||||||
bucket: b.name,
|
bucket: b.name,
|
||||||
object: name,
|
object: name,
|
||||||
userProject: b.userProject,
|
userProject: b.userProject,
|
||||||
|
retry: retry,
|
||||||
},
|
},
|
||||||
gen: -1,
|
gen: -1,
|
||||||
userProject: b.userProject,
|
userProject: b.userProject,
|
||||||
|
retry: retry,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -169,10 +178,10 @@ func (b *BucketHandle) Attrs(ctx context.Context) (attrs *BucketAttrs, err error
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
var resp *raw.Bucket
|
var resp *raw.Bucket
|
||||||
err = runWithRetry(ctx, func() error {
|
err = run(ctx, func() error {
|
||||||
resp, err = req.Context(ctx).Do()
|
resp, err = req.Context(ctx).Do()
|
||||||
return err
|
return err
|
||||||
})
|
}, b.retry, true)
|
||||||
var e *googleapi.Error
|
var e *googleapi.Error
|
||||||
if ok := xerrors.As(err, &e); ok && e.Code == http.StatusNotFound {
|
if ok := xerrors.As(err, &e); ok && e.Code == http.StatusNotFound {
|
||||||
return nil, ErrBucketNotExist
|
return nil, ErrBucketNotExist
|
||||||
|
@ -210,12 +219,20 @@ func (b *BucketHandle) Update(ctx context.Context, uattrs BucketAttrsToUpdate) (
|
||||||
if uattrs.PredefinedDefaultObjectACL != "" {
|
if uattrs.PredefinedDefaultObjectACL != "" {
|
||||||
req.PredefinedDefaultObjectAcl(uattrs.PredefinedDefaultObjectACL)
|
req.PredefinedDefaultObjectAcl(uattrs.PredefinedDefaultObjectACL)
|
||||||
}
|
}
|
||||||
// TODO(jba): retry iff metagen is set?
|
|
||||||
rb, err := req.Context(ctx).Do()
|
isIdempotent := b.conds != nil && b.conds.MetagenerationMatch != 0
|
||||||
if err != nil {
|
|
||||||
|
var rawBucket *raw.Bucket
|
||||||
|
call := func() error {
|
||||||
|
rb, err := req.Context(ctx).Do()
|
||||||
|
rawBucket = rb
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := run(ctx, call, b.retry, isIdempotent); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return newBucket(rb)
|
return newBucket(rawBucket)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *BucketHandle) newPatchCall(uattrs *BucketAttrsToUpdate) (*raw.BucketsPatchCall, error) {
|
func (b *BucketHandle) newPatchCall(uattrs *BucketAttrsToUpdate) (*raw.BucketsPatchCall, error) {
|
||||||
|
@ -282,8 +299,54 @@ func (b *BucketHandle) SignedURL(object string, opts *SignedURLOptions) (string,
|
||||||
return SignedURL(b.name, object, newopts)
|
return SignedURL(b.name, object, newopts)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: Add a similar wrapper for GenerateSignedPostPolicyV4 allowing users to
|
// GenerateSignedPostPolicyV4 generates a PostPolicyV4 value from bucket, object and opts.
|
||||||
// omit PrivateKey/SignBytes
|
// The generated URL and fields will then allow an unauthenticated client to perform multipart uploads.
|
||||||
|
//
|
||||||
|
// This method only requires the Expires field in the specified PostPolicyV4Options
|
||||||
|
// to be non-nil. If not provided, it attempts to fill the GoogleAccessID and PrivateKey
|
||||||
|
// from the GOOGLE_APPLICATION_CREDENTIALS environment variable.
|
||||||
|
// If you are authenticating with a custom HTTP client, Service Account based
|
||||||
|
// auto-detection will be hindered.
|
||||||
|
//
|
||||||
|
// If no private key is found, it attempts to use the GoogleAccessID to sign the URL.
|
||||||
|
// This requires the IAM Service Account Credentials API to be enabled
|
||||||
|
// (https://console.developers.google.com/apis/api/iamcredentials.googleapis.com/overview)
|
||||||
|
// and iam.serviceAccounts.signBlob permissions on the GoogleAccessID service account.
|
||||||
|
// If you do not want these fields set for you, you may pass them in through opts or use
|
||||||
|
// GenerateSignedPostPolicyV4(bucket, name string, opts *PostPolicyV4Options) instead.
|
||||||
|
func (b *BucketHandle) GenerateSignedPostPolicyV4(object string, opts *PostPolicyV4Options) (*PostPolicyV4, error) {
|
||||||
|
if opts.GoogleAccessID != "" && (opts.SignRawBytes != nil || opts.SignBytes != nil || len(opts.PrivateKey) > 0) {
|
||||||
|
return GenerateSignedPostPolicyV4(b.name, object, opts)
|
||||||
|
}
|
||||||
|
// Make a copy of opts so we don't modify the pointer parameter.
|
||||||
|
newopts := opts.clone()
|
||||||
|
|
||||||
|
if newopts.GoogleAccessID == "" {
|
||||||
|
id, err := b.detectDefaultGoogleAccessID()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
newopts.GoogleAccessID = id
|
||||||
|
}
|
||||||
|
if newopts.SignBytes == nil && newopts.SignRawBytes == nil && len(newopts.PrivateKey) == 0 {
|
||||||
|
if b.c.creds != nil && len(b.c.creds.JSON) > 0 {
|
||||||
|
var sa struct {
|
||||||
|
PrivateKey string `json:"private_key"`
|
||||||
|
}
|
||||||
|
err := json.Unmarshal(b.c.creds.JSON, &sa)
|
||||||
|
if err == nil && sa.PrivateKey != "" {
|
||||||
|
newopts.PrivateKey = []byte(sa.PrivateKey)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Don't error out if we can't unmarshal the private key from the client,
|
||||||
|
// fallback to the default sign function for the service account.
|
||||||
|
if len(newopts.PrivateKey) == 0 {
|
||||||
|
newopts.SignRawBytes = b.defaultSignBytesFunc(newopts.GoogleAccessID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return GenerateSignedPostPolicyV4(b.name, object, newopts)
|
||||||
|
}
|
||||||
|
|
||||||
func (b *BucketHandle) detectDefaultGoogleAccessID() (string, error) {
|
func (b *BucketHandle) detectDefaultGoogleAccessID() (string, error) {
|
||||||
returnErr := errors.New("no credentials found on client and not on GCE (Google Compute Engine)")
|
returnErr := errors.New("no credentials found on client and not on GCE (Google Compute Engine)")
|
||||||
|
@ -461,6 +524,12 @@ type BucketAttrs struct {
|
||||||
// The project number of the project the bucket belongs to.
|
// The project number of the project the bucket belongs to.
|
||||||
// This field is read-only.
|
// This field is read-only.
|
||||||
ProjectNumber uint64
|
ProjectNumber uint64
|
||||||
|
|
||||||
|
// RPO configures the Recovery Point Objective (RPO) policy of the bucket.
|
||||||
|
// Set to RPOAsyncTurbo to turn on Turbo Replication for a bucket.
|
||||||
|
// See https://cloud.google.com/storage/docs/managing-turbo-replication for
|
||||||
|
// more information.
|
||||||
|
RPO RPO
|
||||||
}
|
}
|
||||||
|
|
||||||
// BucketPolicyOnly is an alias for UniformBucketLevelAccess.
|
// BucketPolicyOnly is an alias for UniformBucketLevelAccess.
|
||||||
|
@ -728,6 +797,7 @@ func newBucket(b *raw.Bucket) (*BucketAttrs, error) {
|
||||||
Etag: b.Etag,
|
Etag: b.Etag,
|
||||||
LocationType: b.LocationType,
|
LocationType: b.LocationType,
|
||||||
ProjectNumber: b.ProjectNumber,
|
ProjectNumber: b.ProjectNumber,
|
||||||
|
RPO: toRPO(b),
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -780,6 +850,7 @@ func (b *BucketAttrs) toRawBucket() *raw.Bucket {
|
||||||
Logging: b.Logging.toRawBucketLogging(),
|
Logging: b.Logging.toRawBucketLogging(),
|
||||||
Website: b.Website.toRawBucketWebsite(),
|
Website: b.Website.toRawBucketWebsite(),
|
||||||
IamConfiguration: bktIAM,
|
IamConfiguration: bktIAM,
|
||||||
|
Rpo: b.RPO.String(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -889,6 +960,12 @@ type BucketAttrsToUpdate struct {
|
||||||
// See https://cloud.google.com/storage/docs/json_api/v1/buckets/patch.
|
// See https://cloud.google.com/storage/docs/json_api/v1/buckets/patch.
|
||||||
PredefinedDefaultObjectACL string
|
PredefinedDefaultObjectACL string
|
||||||
|
|
||||||
|
// RPO configures the Recovery Point Objective (RPO) policy of the bucket.
|
||||||
|
// Set to RPOAsyncTurbo to turn on Turbo Replication for a bucket.
|
||||||
|
// See https://cloud.google.com/storage/docs/managing-turbo-replication for
|
||||||
|
// more information.
|
||||||
|
RPO RPO
|
||||||
|
|
||||||
setLabels map[string]string
|
setLabels map[string]string
|
||||||
deleteLabels map[string]bool
|
deleteLabels map[string]bool
|
||||||
}
|
}
|
||||||
|
@ -1001,7 +1078,10 @@ func (ua *BucketAttrsToUpdate) toRawBucket() *raw.Bucket {
|
||||||
rb.DefaultObjectAcl = nil
|
rb.DefaultObjectAcl = nil
|
||||||
rb.ForceSendFields = append(rb.ForceSendFields, "DefaultObjectAcl")
|
rb.ForceSendFields = append(rb.ForceSendFields, "DefaultObjectAcl")
|
||||||
}
|
}
|
||||||
|
|
||||||
rb.StorageClass = ua.StorageClass
|
rb.StorageClass = ua.StorageClass
|
||||||
|
rb.Rpo = ua.RPO.String()
|
||||||
|
|
||||||
if ua.setLabels != nil || ua.deleteLabels != nil {
|
if ua.setLabels != nil || ua.deleteLabels != nil {
|
||||||
rb.Labels = map[string]string{}
|
rb.Labels = map[string]string{}
|
||||||
for k, v := range ua.setLabels {
|
for k, v := range ua.setLabels {
|
||||||
|
@ -1081,10 +1161,10 @@ func (b *BucketHandle) LockRetentionPolicy(ctx context.Context) error {
|
||||||
metageneration = b.conds.MetagenerationMatch
|
metageneration = b.conds.MetagenerationMatch
|
||||||
}
|
}
|
||||||
req := b.c.raw.Buckets.LockRetentionPolicy(b.name, metageneration)
|
req := b.c.raw.Buckets.LockRetentionPolicy(b.name, metageneration)
|
||||||
return runWithRetry(ctx, func() error {
|
return run(ctx, func() error {
|
||||||
_, err := req.Context(ctx).Do()
|
_, err := req.Context(ctx).Do()
|
||||||
return err
|
return err
|
||||||
})
|
}, b.retry, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
// applyBucketConds modifies the provided call using the conditions in conds.
|
// applyBucketConds modifies the provided call using the conditions in conds.
|
||||||
|
@ -1347,6 +1427,20 @@ func toPublicAccessPrevention(b *raw.BucketIamConfiguration) PublicAccessPrevent
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func toRPO(b *raw.Bucket) RPO {
|
||||||
|
if b == nil {
|
||||||
|
return RPOUnknown
|
||||||
|
}
|
||||||
|
switch b.Rpo {
|
||||||
|
case rpoDefault:
|
||||||
|
return RPODefault
|
||||||
|
case rpoAsyncTurbo:
|
||||||
|
return RPOAsyncTurbo
|
||||||
|
default:
|
||||||
|
return RPOUnknown
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Objects returns an iterator over the objects in the bucket that match the
|
// Objects returns an iterator over the objects in the bucket that match the
|
||||||
// Query q. If q is nil, no filtering is done. Objects will be iterated over
|
// Query q. If q is nil, no filtering is done. Objects will be iterated over
|
||||||
// lexicographically by name.
|
// lexicographically by name.
|
||||||
|
@ -1367,6 +1461,33 @@ func (b *BucketHandle) Objects(ctx context.Context, q *Query) *ObjectIterator {
|
||||||
return it
|
return it
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Retryer returns a bucket handle that is configured with custom retry
|
||||||
|
// behavior as specified by the options that are passed to it. All operations
|
||||||
|
// on the new handle will use the customized retry configuration.
|
||||||
|
// Retry options set on a object handle will take precedence over options set on
|
||||||
|
// the bucket handle.
|
||||||
|
// These retry options will merge with the client's retry configuration (if set)
|
||||||
|
// for the returned handle. Options passed into this method will take precedence
|
||||||
|
// over retry options on the client. Note that you must explicitly pass in each
|
||||||
|
// option you want to override.
|
||||||
|
func (b *BucketHandle) Retryer(opts ...RetryOption) *BucketHandle {
|
||||||
|
b2 := *b
|
||||||
|
var retry *retryConfig
|
||||||
|
if b.retry != nil {
|
||||||
|
// merge the options with the existing retry
|
||||||
|
retry = b.retry
|
||||||
|
} else {
|
||||||
|
retry = &retryConfig{}
|
||||||
|
}
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt.apply(retry)
|
||||||
|
}
|
||||||
|
b2.retry = retry
|
||||||
|
b2.acl.retry = retry
|
||||||
|
b2.defaultObjectACL.retry = retry
|
||||||
|
return &b2
|
||||||
|
}
|
||||||
|
|
||||||
// An ObjectIterator is an iterator over ObjectAttrs.
|
// An ObjectIterator is an iterator over ObjectAttrs.
|
||||||
//
|
//
|
||||||
// Note: This iterator is not safe for concurrent operations without explicit synchronization.
|
// Note: This iterator is not safe for concurrent operations without explicit synchronization.
|
||||||
|
@ -1434,10 +1555,10 @@ func (it *ObjectIterator) fetch(pageSize int, pageToken string) (string, error)
|
||||||
}
|
}
|
||||||
var resp *raw.Objects
|
var resp *raw.Objects
|
||||||
var err error
|
var err error
|
||||||
err = runWithRetry(it.ctx, func() error {
|
err = run(it.ctx, func() error {
|
||||||
resp, err = req.Context(it.ctx).Do()
|
resp, err = req.Context(it.ctx).Do()
|
||||||
return err
|
return err
|
||||||
})
|
}, it.bucket.retry, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
var e *googleapi.Error
|
var e *googleapi.Error
|
||||||
if ok := xerrors.As(err, &e); ok && e.Code == http.StatusNotFound {
|
if ok := xerrors.As(err, &e); ok && e.Code == http.StatusNotFound {
|
||||||
|
@ -1518,10 +1639,10 @@ func (it *BucketIterator) fetch(pageSize int, pageToken string) (token string, e
|
||||||
req.MaxResults(int64(pageSize))
|
req.MaxResults(int64(pageSize))
|
||||||
}
|
}
|
||||||
var resp *raw.Buckets
|
var resp *raw.Buckets
|
||||||
err = runWithRetry(it.ctx, func() error {
|
err = run(it.ctx, func() error {
|
||||||
resp, err = req.Context(it.ctx).Do()
|
resp, err = req.Context(it.ctx).Do()
|
||||||
return err
|
return err
|
||||||
})
|
}, it.client.retry, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
@ -1534,3 +1655,39 @@ func (it *BucketIterator) fetch(pageSize int, pageToken string) (token string, e
|
||||||
}
|
}
|
||||||
return resp.NextPageToken, nil
|
return resp.NextPageToken, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// RPO (Recovery Point Objective) configures the turbo replication feature. See
|
||||||
|
// https://cloud.google.com/storage/docs/managing-turbo-replication for more information.
|
||||||
|
type RPO int
|
||||||
|
|
||||||
|
const (
|
||||||
|
// RPOUnknown is a zero value. It may be returned from bucket.Attrs() if RPO
|
||||||
|
// is not present in the bucket metadata, that is, the bucket is not dual-region.
|
||||||
|
// This value is also used if the RPO field is not set in a call to GCS.
|
||||||
|
RPOUnknown RPO = iota
|
||||||
|
|
||||||
|
// RPODefault represents default replication. It is used to reset RPO on an
|
||||||
|
// existing bucket that has this field set to RPOAsyncTurbo. Otherwise it
|
||||||
|
// is equivalent to RPOUnknown, and is always ignored. This value is valid
|
||||||
|
// for dual- or multi-region buckets.
|
||||||
|
RPODefault
|
||||||
|
|
||||||
|
// RPOAsyncTurbo represents turbo replication and is used to enable Turbo
|
||||||
|
// Replication on a bucket. This value is only valid for dual-region buckets.
|
||||||
|
RPOAsyncTurbo
|
||||||
|
|
||||||
|
rpoUnknown string = ""
|
||||||
|
rpoDefault = "DEFAULT"
|
||||||
|
rpoAsyncTurbo = "ASYNC_TURBO"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (rpo RPO) String() string {
|
||||||
|
switch rpo {
|
||||||
|
case RPODefault:
|
||||||
|
return rpoDefault
|
||||||
|
case RPOAsyncTurbo:
|
||||||
|
return rpoAsyncTurbo
|
||||||
|
default:
|
||||||
|
return rpoUnknown
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
14
vendor/cloud.google.com/go/storage/copy.go
generated
vendored
14
vendor/cloud.google.com/go/storage/copy.go
generated
vendored
|
@ -138,8 +138,11 @@ func (c *Copier) callRewrite(ctx context.Context, rawObj *raw.Object) (*raw.Rewr
|
||||||
var res *raw.RewriteResponse
|
var res *raw.RewriteResponse
|
||||||
var err error
|
var err error
|
||||||
setClientHeader(call.Header())
|
setClientHeader(call.Header())
|
||||||
err = runWithRetry(ctx, func() error { res, err = call.Do(); return err })
|
|
||||||
if err != nil {
|
retryCall := func() error { res, err = call.Do(); return err }
|
||||||
|
isIdempotent := c.dst.conds != nil && (c.dst.conds.GenerationMatch != 0 || c.dst.conds.DoesNotExist)
|
||||||
|
|
||||||
|
if err := run(ctx, retryCall, c.dst.retry, isIdempotent); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
c.RewriteToken = res.RewriteToken
|
c.RewriteToken = res.RewriteToken
|
||||||
|
@ -230,8 +233,11 @@ func (c *Composer) Run(ctx context.Context) (attrs *ObjectAttrs, err error) {
|
||||||
}
|
}
|
||||||
var obj *raw.Object
|
var obj *raw.Object
|
||||||
setClientHeader(call.Header())
|
setClientHeader(call.Header())
|
||||||
err = runWithRetry(ctx, func() error { obj, err = call.Do(); return err })
|
|
||||||
if err != nil {
|
retryCall := func() error { obj, err = call.Do(); return err }
|
||||||
|
isIdempotent := c.dst.conds != nil && (c.dst.conds.GenerationMatch != 0 || c.dst.conds.DoesNotExist)
|
||||||
|
|
||||||
|
if err := run(ctx, retryCall, c.dst.retry, isIdempotent); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return newObject(obj), nil
|
return newObject(obj), nil
|
||||||
|
|
52
vendor/cloud.google.com/go/storage/doc.go
generated
vendored
52
vendor/cloud.google.com/go/storage/doc.go
generated
vendored
|
@ -19,15 +19,9 @@ Google Cloud Storage stores data in named objects, which are grouped into bucket
|
||||||
More information about Google Cloud Storage is available at
|
More information about Google Cloud Storage is available at
|
||||||
https://cloud.google.com/storage/docs.
|
https://cloud.google.com/storage/docs.
|
||||||
|
|
||||||
See https://godoc.org/cloud.google.com/go for authentication, timeouts,
|
See https://pkg.go.dev/cloud.google.com/go for authentication, timeouts,
|
||||||
connection pooling and similar aspects of this package.
|
connection pooling and similar aspects of this package.
|
||||||
|
|
||||||
All of the methods of this package use exponential backoff to retry calls that fail
|
|
||||||
with certain errors, as described in
|
|
||||||
https://cloud.google.com/storage/docs/exponential-backoff. Retrying continues
|
|
||||||
indefinitely unless the controlling context is canceled or the client is closed. See
|
|
||||||
context.WithTimeout and context.WithCancel.
|
|
||||||
|
|
||||||
|
|
||||||
Creating a Client
|
Creating a Client
|
||||||
|
|
||||||
|
@ -246,12 +240,52 @@ as the documentation of GenerateSignedPostPolicyV4.
|
||||||
|
|
||||||
Errors
|
Errors
|
||||||
|
|
||||||
Errors returned by this client are often of the type [`googleapi.Error`](https://godoc.org/google.golang.org/api/googleapi#Error).
|
Errors returned by this client are often of the type googleapi.Error.
|
||||||
These errors can be introspected for more information by using `errors.As` with the richer `googleapi.Error` type. For example:
|
These errors can be introspected for more information by using errors.As
|
||||||
|
with the richer googleapi.Error type. For example:
|
||||||
|
|
||||||
var e *googleapi.Error
|
var e *googleapi.Error
|
||||||
if ok := errors.As(err, &e); ok {
|
if ok := errors.As(err, &e); ok {
|
||||||
if e.Code == 409 { ... }
|
if e.Code == 409 { ... }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
See https://pkg.go.dev/google.golang.org/api/googleapi#Error for more information.
|
||||||
|
|
||||||
|
Retrying failed requests
|
||||||
|
|
||||||
|
Methods in this package may retry calls that fail with transient errors.
|
||||||
|
Retrying continues indefinitely unless the controlling context is canceled, the
|
||||||
|
client is closed, or a non-transient error is received. To stop retries from
|
||||||
|
continuing, use context timeouts or cancellation.
|
||||||
|
|
||||||
|
The retry strategy in this library follows best practices for Cloud Storage. By
|
||||||
|
default, operations are retried only if they are idempotent, and exponential
|
||||||
|
backoff with jitter is employed. In addition, errors are only retried if they
|
||||||
|
are defined as transient by the service. See
|
||||||
|
https://cloud.google.com/storage/docs/retry-strategy for more information.
|
||||||
|
|
||||||
|
Users can configure non-default retry behavior for a single library call (using
|
||||||
|
BucketHandle.Retryer and ObjectHandle.Retryer) or for all calls made by a
|
||||||
|
client (using Client.SetRetry). For example:
|
||||||
|
|
||||||
|
o := client.Bucket(bucket).Object(object).Retryer(
|
||||||
|
// Use WithBackoff to change the timing of the exponential backoff.
|
||||||
|
storage.WithBackoff(gax.Backoff{
|
||||||
|
Initial: 2 * time.Second,
|
||||||
|
}),
|
||||||
|
// Use WithPolicy to configure the idempotency policy. RetryAlways will
|
||||||
|
// retry the operation even if it is non-idempotent.
|
||||||
|
storage.WithPolicy(storage.RetryAlways),
|
||||||
|
)
|
||||||
|
|
||||||
|
// Use a context timeout to set an overall deadline on the call, including all
|
||||||
|
// potential retries.
|
||||||
|
ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
// Delete an object using the specified strategy and timeout.
|
||||||
|
if err := o.Delete(ctx); err != nil {
|
||||||
|
// Handle err.
|
||||||
|
}
|
||||||
*/
|
*/
|
||||||
package storage // import "cloud.google.com/go/storage"
|
package storage // import "cloud.google.com/go/storage"
|
||||||
|
|
71
vendor/cloud.google.com/go/storage/emulator_test.sh
generated
vendored
Normal file
71
vendor/cloud.google.com/go/storage/emulator_test.sh
generated
vendored
Normal file
|
@ -0,0 +1,71 @@
|
||||||
|
#!/bin/bash
|
||||||
|
# Copyright 2021 Google LLC
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License..
|
||||||
|
|
||||||
|
# Fail on any error
|
||||||
|
set -eo pipefail
|
||||||
|
|
||||||
|
# Display commands being run
|
||||||
|
set -x
|
||||||
|
|
||||||
|
# Only run on Go 1.17+
|
||||||
|
min_minor_ver=17
|
||||||
|
|
||||||
|
v=`go version | { read _ _ v _; echo ${v#go}; }`
|
||||||
|
comps=(${v//./ })
|
||||||
|
minor_ver=${comps[1]}
|
||||||
|
|
||||||
|
if [ "$minor_ver" -lt "$min_minor_ver" ]; then
|
||||||
|
echo minor version $minor_ver, skipping
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
export STORAGE_EMULATOR_HOST="http://localhost:9000"
|
||||||
|
|
||||||
|
DEFAULT_IMAGE_NAME='gcr.io/cloud-devrel-public-resources/storage-testbench'
|
||||||
|
DEFAULT_IMAGE_TAG='latest'
|
||||||
|
DOCKER_IMAGE=${DEFAULT_IMAGE_NAME}:${DEFAULT_IMAGE_TAG}
|
||||||
|
CONTAINER_NAME=storage_testbench
|
||||||
|
|
||||||
|
# Get the docker image for the testbench
|
||||||
|
docker pull $DOCKER_IMAGE
|
||||||
|
|
||||||
|
# Start the testbench
|
||||||
|
# Note: --net=host makes the container bind directly to the Docker host’s network,
|
||||||
|
# with no network isolation. If we were to use port-mapping instead, reset connection errors
|
||||||
|
# would be captured differently and cause unexpected test behaviour.
|
||||||
|
# The host networking driver works only on Linux hosts.
|
||||||
|
# See more about using host networking: https://docs.docker.com/network/host/
|
||||||
|
docker run --name $CONTAINER_NAME --rm --net=host $DOCKER_IMAGE &
|
||||||
|
echo "Running the Cloud Storage testbench: $STORAGE_EMULATOR_HOST"
|
||||||
|
|
||||||
|
# Check that the server is running - retry several times to allow for start-up time
|
||||||
|
response=$(curl -w "%{http_code}\n" $STORAGE_EMULATOR_HOST --retry-connrefused --retry 5 -o /dev/null)
|
||||||
|
|
||||||
|
if [[ $response != 200 ]]
|
||||||
|
then
|
||||||
|
echo "Testbench server did not start correctly"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Stop the testbench & cleanup environment variables
|
||||||
|
function cleanup() {
|
||||||
|
echo "Cleanup testbench"
|
||||||
|
docker stop $CONTAINER_NAME
|
||||||
|
unset STORAGE_EMULATOR_HOST;
|
||||||
|
}
|
||||||
|
trap cleanup EXIT
|
||||||
|
|
||||||
|
# Run tests
|
||||||
|
go test -v -timeout 10m ./ -run="TestRetryConformance" -short 2>&1 | tee -a sponge_log.log
|
33
vendor/cloud.google.com/go/storage/hmac.go
generated
vendored
33
vendor/cloud.google.com/go/storage/hmac.go
generated
vendored
|
@ -89,8 +89,8 @@ type HMACKey struct {
|
||||||
type HMACKeyHandle struct {
|
type HMACKeyHandle struct {
|
||||||
projectID string
|
projectID string
|
||||||
accessID string
|
accessID string
|
||||||
|
retry *retryConfig
|
||||||
raw *raw.ProjectsHmacKeysService
|
raw *raw.ProjectsHmacKeysService
|
||||||
}
|
}
|
||||||
|
|
||||||
// HMACKeyHandle creates a handle that will be used for HMACKey operations.
|
// HMACKeyHandle creates a handle that will be used for HMACKey operations.
|
||||||
|
@ -100,6 +100,7 @@ func (c *Client) HMACKeyHandle(projectID, accessID string) *HMACKeyHandle {
|
||||||
return &HMACKeyHandle{
|
return &HMACKeyHandle{
|
||||||
projectID: projectID,
|
projectID: projectID,
|
||||||
accessID: accessID,
|
accessID: accessID,
|
||||||
|
retry: c.retry,
|
||||||
raw: raw.NewProjectsHmacKeysService(c.raw),
|
raw: raw.NewProjectsHmacKeysService(c.raw),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -126,10 +127,10 @@ func (hkh *HMACKeyHandle) Get(ctx context.Context, opts ...HMACKeyOption) (*HMAC
|
||||||
|
|
||||||
var metadata *raw.HmacKeyMetadata
|
var metadata *raw.HmacKeyMetadata
|
||||||
var err error
|
var err error
|
||||||
err = runWithRetry(ctx, func() error {
|
err = run(ctx, func() error {
|
||||||
metadata, err = call.Context(ctx).Do()
|
metadata, err = call.Context(ctx).Do()
|
||||||
return err
|
return err
|
||||||
})
|
}, hkh.retry, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -156,9 +157,9 @@ func (hkh *HMACKeyHandle) Delete(ctx context.Context, opts ...HMACKeyOption) err
|
||||||
}
|
}
|
||||||
setClientHeader(delCall.Header())
|
setClientHeader(delCall.Header())
|
||||||
|
|
||||||
return runWithRetry(ctx, func() error {
|
return run(ctx, func() error {
|
||||||
return delCall.Context(ctx).Do()
|
return delCall.Context(ctx).Do()
|
||||||
})
|
}, hkh.retry, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func pbHmacKeyToHMACKey(pb *raw.HmacKey, updatedTimeCanBeNil bool) (*HMACKey, error) {
|
func pbHmacKeyToHMACKey(pb *raw.HmacKey, updatedTimeCanBeNil bool) (*HMACKey, error) {
|
||||||
|
@ -214,8 +215,13 @@ func (c *Client) CreateHMACKey(ctx context.Context, projectID, serviceAccountEma
|
||||||
|
|
||||||
setClientHeader(call.Header())
|
setClientHeader(call.Header())
|
||||||
|
|
||||||
hkPb, err := call.Context(ctx).Do()
|
var hkPb *raw.HmacKey
|
||||||
if err != nil {
|
|
||||||
|
if err := run(ctx, func() error {
|
||||||
|
h, err := call.Context(ctx).Do()
|
||||||
|
hkPb = h
|
||||||
|
return err
|
||||||
|
}, c.retry, false); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -257,10 +263,11 @@ func (h *HMACKeyHandle) Update(ctx context.Context, au HMACKeyAttrsToUpdate, opt
|
||||||
|
|
||||||
var metadata *raw.HmacKeyMetadata
|
var metadata *raw.HmacKeyMetadata
|
||||||
var err error
|
var err error
|
||||||
err = runWithRetry(ctx, func() error {
|
isIdempotent := len(au.Etag) > 0
|
||||||
|
err = run(ctx, func() error {
|
||||||
metadata, err = call.Context(ctx).Do()
|
metadata, err = call.Context(ctx).Do()
|
||||||
return err
|
return err
|
||||||
})
|
}, h.retry, isIdempotent)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -285,6 +292,7 @@ type HMACKeysIterator struct {
|
||||||
nextFunc func() error
|
nextFunc func() error
|
||||||
index int
|
index int
|
||||||
desc hmacKeyDesc
|
desc hmacKeyDesc
|
||||||
|
retry *retryConfig
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListHMACKeys returns an iterator for listing HMACKeys.
|
// ListHMACKeys returns an iterator for listing HMACKeys.
|
||||||
|
@ -297,6 +305,7 @@ func (c *Client) ListHMACKeys(ctx context.Context, projectID string, opts ...HMA
|
||||||
ctx: ctx,
|
ctx: ctx,
|
||||||
raw: raw.NewProjectsHmacKeysService(c.raw),
|
raw: raw.NewProjectsHmacKeysService(c.raw),
|
||||||
projectID: projectID,
|
projectID: projectID,
|
||||||
|
retry: c.retry,
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, opt := range opts {
|
for _, opt := range opts {
|
||||||
|
@ -361,10 +370,10 @@ func (it *HMACKeysIterator) fetch(pageSize int, pageToken string) (token string,
|
||||||
|
|
||||||
ctx := it.ctx
|
ctx := it.ctx
|
||||||
var resp *raw.HmacKeysMetadata
|
var resp *raw.HmacKeysMetadata
|
||||||
err = runWithRetry(it.ctx, func() error {
|
err = run(it.ctx, func() error {
|
||||||
resp, err = call.Context(ctx).Do()
|
resp, err = call.Context(ctx).Do()
|
||||||
return err
|
return err
|
||||||
})
|
}, it.retry, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
15
vendor/cloud.google.com/go/storage/iam.go
generated
vendored
15
vendor/cloud.google.com/go/storage/iam.go
generated
vendored
|
@ -29,6 +29,7 @@ func (b *BucketHandle) IAM() *iam.Handle {
|
||||||
return iam.InternalNewHandleClient(&iamClient{
|
return iam.InternalNewHandleClient(&iamClient{
|
||||||
raw: b.c.raw,
|
raw: b.c.raw,
|
||||||
userProject: b.userProject,
|
userProject: b.userProject,
|
||||||
|
retry: b.retry,
|
||||||
}, b.name)
|
}, b.name)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -36,6 +37,7 @@ func (b *BucketHandle) IAM() *iam.Handle {
|
||||||
type iamClient struct {
|
type iamClient struct {
|
||||||
raw *raw.Service
|
raw *raw.Service
|
||||||
userProject string
|
userProject string
|
||||||
|
retry *retryConfig
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *iamClient) Get(ctx context.Context, resource string) (p *iampb.Policy, err error) {
|
func (c *iamClient) Get(ctx context.Context, resource string) (p *iampb.Policy, err error) {
|
||||||
|
@ -52,10 +54,10 @@ func (c *iamClient) GetWithVersion(ctx context.Context, resource string, request
|
||||||
call.UserProject(c.userProject)
|
call.UserProject(c.userProject)
|
||||||
}
|
}
|
||||||
var rp *raw.Policy
|
var rp *raw.Policy
|
||||||
err = runWithRetry(ctx, func() error {
|
err = run(ctx, func() error {
|
||||||
rp, err = call.Context(ctx).Do()
|
rp, err = call.Context(ctx).Do()
|
||||||
return err
|
return err
|
||||||
})
|
}, c.retry, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -72,10 +74,11 @@ func (c *iamClient) Set(ctx context.Context, resource string, p *iampb.Policy) (
|
||||||
if c.userProject != "" {
|
if c.userProject != "" {
|
||||||
call.UserProject(c.userProject)
|
call.UserProject(c.userProject)
|
||||||
}
|
}
|
||||||
return runWithRetry(ctx, func() error {
|
isIdempotent := len(p.Etag) > 0
|
||||||
|
return run(ctx, func() error {
|
||||||
_, err := call.Context(ctx).Do()
|
_, err := call.Context(ctx).Do()
|
||||||
return err
|
return err
|
||||||
})
|
}, c.retry, isIdempotent)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *iamClient) Test(ctx context.Context, resource string, perms []string) (permissions []string, err error) {
|
func (c *iamClient) Test(ctx context.Context, resource string, perms []string) (permissions []string, err error) {
|
||||||
|
@ -88,10 +91,10 @@ func (c *iamClient) Test(ctx context.Context, resource string, perms []string) (
|
||||||
call.UserProject(c.userProject)
|
call.UserProject(c.userProject)
|
||||||
}
|
}
|
||||||
var res *raw.TestIamPermissionsResponse
|
var res *raw.TestIamPermissionsResponse
|
||||||
err = runWithRetry(ctx, func() error {
|
err = run(ctx, func() error {
|
||||||
res, err = call.Context(ctx).Do()
|
res, err = call.Context(ctx).Do()
|
||||||
return err
|
return err
|
||||||
})
|
}, c.retry, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
20
vendor/cloud.google.com/go/storage/internal/apiv2/doc.go
generated
vendored
20
vendor/cloud.google.com/go/storage/internal/apiv2/doc.go
generated
vendored
|
@ -1,4 +1,4 @@
|
||||||
// Copyright 2021 Google LLC
|
// Copyright 2022 Google LLC
|
||||||
//
|
//
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
// you may not use this file except in compliance with the License.
|
// you may not use this file except in compliance with the License.
|
||||||
|
@ -39,6 +39,22 @@
|
||||||
//
|
//
|
||||||
// The following is an example of making an API call with the newly created client.
|
// The following is an example of making an API call with the newly created client.
|
||||||
//
|
//
|
||||||
|
// ctx := context.Background()
|
||||||
|
// c, err := storage.NewClient(ctx)
|
||||||
|
// if err != nil {
|
||||||
|
// // TODO: Handle error.
|
||||||
|
// }
|
||||||
|
// defer c.Close()
|
||||||
|
//
|
||||||
|
// req := &storagepb.DeleteBucketRequest{
|
||||||
|
// // TODO: Fill request struct fields.
|
||||||
|
// // See https://pkg.go.dev/google.golang.org/genproto/googleapis/storage/v2#DeleteBucketRequest.
|
||||||
|
// }
|
||||||
|
// err = c.DeleteBucket(ctx, req)
|
||||||
|
// if err != nil {
|
||||||
|
// // TODO: Handle error.
|
||||||
|
// }
|
||||||
|
//
|
||||||
// Use of Context
|
// Use of Context
|
||||||
//
|
//
|
||||||
// The ctx passed to NewClient is used for authentication requests and
|
// The ctx passed to NewClient is used for authentication requests and
|
||||||
|
@ -68,7 +84,7 @@ import (
|
||||||
type clientHookParams struct{}
|
type clientHookParams struct{}
|
||||||
type clientHook func(context.Context, clientHookParams) ([]option.ClientOption, error)
|
type clientHook func(context.Context, clientHookParams) ([]option.ClientOption, error)
|
||||||
|
|
||||||
const versionClient = "20211015"
|
const versionClient = "20220114"
|
||||||
|
|
||||||
func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
|
func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
|
||||||
out, _ := metadata.FromOutgoingContext(ctx)
|
out, _ := metadata.FromOutgoingContext(ctx)
|
||||||
|
|
125
vendor/cloud.google.com/go/storage/internal/apiv2/gapic_metadata.json
generated
vendored
125
vendor/cloud.google.com/go/storage/internal/apiv2/gapic_metadata.json
generated
vendored
|
@ -10,6 +10,101 @@
|
||||||
"grpc": {
|
"grpc": {
|
||||||
"libraryClient": "Client",
|
"libraryClient": "Client",
|
||||||
"rpcs": {
|
"rpcs": {
|
||||||
|
"ComposeObject": {
|
||||||
|
"methods": [
|
||||||
|
"ComposeObject"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"CreateBucket": {
|
||||||
|
"methods": [
|
||||||
|
"CreateBucket"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"CreateHmacKey": {
|
||||||
|
"methods": [
|
||||||
|
"CreateHmacKey"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"CreateNotification": {
|
||||||
|
"methods": [
|
||||||
|
"CreateNotification"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"DeleteBucket": {
|
||||||
|
"methods": [
|
||||||
|
"DeleteBucket"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"DeleteHmacKey": {
|
||||||
|
"methods": [
|
||||||
|
"DeleteHmacKey"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"DeleteNotification": {
|
||||||
|
"methods": [
|
||||||
|
"DeleteNotification"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"DeleteObject": {
|
||||||
|
"methods": [
|
||||||
|
"DeleteObject"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"GetBucket": {
|
||||||
|
"methods": [
|
||||||
|
"GetBucket"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"GetHmacKey": {
|
||||||
|
"methods": [
|
||||||
|
"GetHmacKey"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"GetIamPolicy": {
|
||||||
|
"methods": [
|
||||||
|
"GetIamPolicy"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"GetNotification": {
|
||||||
|
"methods": [
|
||||||
|
"GetNotification"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"GetObject": {
|
||||||
|
"methods": [
|
||||||
|
"GetObject"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"GetServiceAccount": {
|
||||||
|
"methods": [
|
||||||
|
"GetServiceAccount"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"ListBuckets": {
|
||||||
|
"methods": [
|
||||||
|
"ListBuckets"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"ListHmacKeys": {
|
||||||
|
"methods": [
|
||||||
|
"ListHmacKeys"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"ListNotifications": {
|
||||||
|
"methods": [
|
||||||
|
"ListNotifications"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"ListObjects": {
|
||||||
|
"methods": [
|
||||||
|
"ListObjects"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"LockBucketRetentionPolicy": {
|
||||||
|
"methods": [
|
||||||
|
"LockBucketRetentionPolicy"
|
||||||
|
]
|
||||||
|
},
|
||||||
"QueryWriteStatus": {
|
"QueryWriteStatus": {
|
||||||
"methods": [
|
"methods": [
|
||||||
"QueryWriteStatus"
|
"QueryWriteStatus"
|
||||||
|
@ -20,11 +115,41 @@
|
||||||
"ReadObject"
|
"ReadObject"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
"RewriteObject": {
|
||||||
|
"methods": [
|
||||||
|
"RewriteObject"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"SetIamPolicy": {
|
||||||
|
"methods": [
|
||||||
|
"SetIamPolicy"
|
||||||
|
]
|
||||||
|
},
|
||||||
"StartResumableWrite": {
|
"StartResumableWrite": {
|
||||||
"methods": [
|
"methods": [
|
||||||
"StartResumableWrite"
|
"StartResumableWrite"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
"TestIamPermissions": {
|
||||||
|
"methods": [
|
||||||
|
"TestIamPermissions"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"UpdateBucket": {
|
||||||
|
"methods": [
|
||||||
|
"UpdateBucket"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"UpdateHmacKey": {
|
||||||
|
"methods": [
|
||||||
|
"UpdateHmacKey"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"UpdateObject": {
|
||||||
|
"methods": [
|
||||||
|
"UpdateObject"
|
||||||
|
]
|
||||||
|
},
|
||||||
"WriteObject": {
|
"WriteObject": {
|
||||||
"methods": [
|
"methods": [
|
||||||
"WriteObject"
|
"WriteObject"
|
||||||
|
|
1308
vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go
generated
vendored
1308
vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go
generated
vendored
File diff suppressed because it is too large
Load diff
54
vendor/cloud.google.com/go/storage/invoke.go
generated
vendored
54
vendor/cloud.google.com/go/storage/invoke.go
generated
vendored
|
@ -17,42 +17,66 @@ package storage
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"io"
|
"io"
|
||||||
|
"net"
|
||||||
"net/url"
|
"net/url"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"cloud.google.com/go/internal"
|
"cloud.google.com/go/internal"
|
||||||
gax "github.com/googleapis/gax-go/v2"
|
gax "github.com/googleapis/gax-go/v2"
|
||||||
|
"golang.org/x/xerrors"
|
||||||
"google.golang.org/api/googleapi"
|
"google.golang.org/api/googleapi"
|
||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
"google.golang.org/grpc/status"
|
"google.golang.org/grpc/status"
|
||||||
)
|
)
|
||||||
|
|
||||||
// runWithRetry calls the function until it returns nil or a non-retryable error, or
|
var defaultRetry *retryConfig = &retryConfig{}
|
||||||
// the context is done.
|
|
||||||
func runWithRetry(ctx context.Context, call func() error) error {
|
// run determines whether a retry is necessary based on the config and
|
||||||
return internal.Retry(ctx, gax.Backoff{}, func() (stop bool, err error) {
|
// idempotency information. It then calls the function with or without retries
|
||||||
|
// as appropriate, using the configured settings.
|
||||||
|
func run(ctx context.Context, call func() error, retry *retryConfig, isIdempotent bool) error {
|
||||||
|
if retry == nil {
|
||||||
|
retry = defaultRetry
|
||||||
|
}
|
||||||
|
if (retry.policy == RetryIdempotent && !isIdempotent) || retry.policy == RetryNever {
|
||||||
|
return call()
|
||||||
|
}
|
||||||
|
bo := gax.Backoff{}
|
||||||
|
if retry.backoff != nil {
|
||||||
|
bo.Multiplier = retry.backoff.Multiplier
|
||||||
|
bo.Initial = retry.backoff.Initial
|
||||||
|
bo.Max = retry.backoff.Max
|
||||||
|
}
|
||||||
|
var errorFunc func(err error) bool = shouldRetry
|
||||||
|
if retry.shouldRetry != nil {
|
||||||
|
errorFunc = retry.shouldRetry
|
||||||
|
}
|
||||||
|
return internal.Retry(ctx, bo, func() (stop bool, err error) {
|
||||||
err = call()
|
err = call()
|
||||||
if err == nil {
|
return !errorFunc(err), err
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
if shouldRetry(err) {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
return true, err
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func shouldRetry(err error) bool {
|
func shouldRetry(err error) bool {
|
||||||
if err == io.ErrUnexpectedEOF {
|
if err == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if xerrors.Is(err, io.ErrUnexpectedEOF) {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
switch e := err.(type) {
|
switch e := err.(type) {
|
||||||
|
case *net.OpError:
|
||||||
|
if strings.Contains(e.Error(), "use of closed network connection") {
|
||||||
|
// TODO: check against net.ErrClosed (go 1.16+) instead of string
|
||||||
|
return true
|
||||||
|
}
|
||||||
case *googleapi.Error:
|
case *googleapi.Error:
|
||||||
// Retry on 429 and 5xx, according to
|
// Retry on 408, 429, and 5xx, according to
|
||||||
// https://cloud.google.com/storage/docs/exponential-backoff.
|
// https://cloud.google.com/storage/docs/exponential-backoff.
|
||||||
return e.Code == 429 || (e.Code >= 500 && e.Code < 600)
|
return e.Code == 408 || e.Code == 429 || (e.Code >= 500 && e.Code < 600)
|
||||||
case *url.Error:
|
case *url.Error:
|
||||||
// Retry socket-level errors ECONNREFUSED and ENETUNREACH (from syscall).
|
// Retry socket-level errors ECONNREFUSED and ECONNRESET (from syscall).
|
||||||
// Unfortunately the error type is unexported, so we resort to string
|
// Unfortunately the error type is unexported, so we resort to string
|
||||||
// matching.
|
// matching.
|
||||||
retriable := []string{"connection refused", "connection reset"}
|
retriable := []string{"connection refused", "connection reset"}
|
||||||
|
|
15
vendor/cloud.google.com/go/storage/notifications.go
generated
vendored
15
vendor/cloud.google.com/go/storage/notifications.go
generated
vendored
|
@ -137,7 +137,12 @@ func (b *BucketHandle) AddNotification(ctx context.Context, n *Notification) (re
|
||||||
if b.userProject != "" {
|
if b.userProject != "" {
|
||||||
call.UserProject(b.userProject)
|
call.UserProject(b.userProject)
|
||||||
}
|
}
|
||||||
rn, err := call.Context(ctx).Do()
|
|
||||||
|
var rn *raw.Notification
|
||||||
|
err = run(ctx, func() error {
|
||||||
|
rn, err = call.Context(ctx).Do()
|
||||||
|
return err
|
||||||
|
}, b.retry, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -156,10 +161,10 @@ func (b *BucketHandle) Notifications(ctx context.Context) (n map[string]*Notific
|
||||||
call.UserProject(b.userProject)
|
call.UserProject(b.userProject)
|
||||||
}
|
}
|
||||||
var res *raw.Notifications
|
var res *raw.Notifications
|
||||||
err = runWithRetry(ctx, func() error {
|
err = run(ctx, func() error {
|
||||||
res, err = call.Context(ctx).Do()
|
res, err = call.Context(ctx).Do()
|
||||||
return err
|
return err
|
||||||
})
|
}, b.retry, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -184,7 +189,7 @@ func (b *BucketHandle) DeleteNotification(ctx context.Context, id string) (err e
|
||||||
if b.userProject != "" {
|
if b.userProject != "" {
|
||||||
call.UserProject(b.userProject)
|
call.UserProject(b.userProject)
|
||||||
}
|
}
|
||||||
return runWithRetry(ctx, func() error {
|
return run(ctx, func() error {
|
||||||
return call.Context(ctx).Do()
|
return call.Context(ctx).Do()
|
||||||
})
|
}, b.retry, true)
|
||||||
}
|
}
|
||||||
|
|
99
vendor/cloud.google.com/go/storage/post_policy_v4.go
generated
vendored
99
vendor/cloud.google.com/go/storage/post_policy_v4.go
generated
vendored
|
@ -52,22 +52,38 @@ type PostPolicyV4Options struct {
|
||||||
// Exactly one of PrivateKey or SignBytes must be non-nil.
|
// Exactly one of PrivateKey or SignBytes must be non-nil.
|
||||||
PrivateKey []byte
|
PrivateKey []byte
|
||||||
|
|
||||||
// SignBytes is a function for implementing custom signing. For example, if
|
// SignBytes is a function for implementing custom signing.
|
||||||
|
//
|
||||||
|
// Deprecated: Use SignRawBytes. If both SignBytes and SignRawBytes are defined,
|
||||||
|
// SignBytes will be ignored.
|
||||||
|
// This SignBytes function expects the bytes it receives to be hashed, while
|
||||||
|
// SignRawBytes accepts the raw bytes without hashing, allowing more flexibility.
|
||||||
|
// Add the following to the top of your signing function to hash the bytes
|
||||||
|
// to use SignRawBytes instead:
|
||||||
|
// shaSum := sha256.Sum256(bytes)
|
||||||
|
// bytes = shaSum[:]
|
||||||
|
//
|
||||||
|
SignBytes func(hashBytes []byte) (signature []byte, err error)
|
||||||
|
|
||||||
|
// SignRawBytes is a function for implementing custom signing. For example, if
|
||||||
// your application is running on Google App Engine, you can use
|
// your application is running on Google App Engine, you can use
|
||||||
// appengine's internal signing function:
|
// appengine's internal signing function:
|
||||||
// ctx := appengine.NewContext(request)
|
// ctx := appengine.NewContext(request)
|
||||||
// acc, _ := appengine.ServiceAccount(ctx)
|
// acc, _ := appengine.ServiceAccount(ctx)
|
||||||
// url, err := SignedURL("bucket", "object", &SignedURLOptions{
|
// &PostPolicyV4Options{
|
||||||
// GoogleAccessID: acc,
|
// GoogleAccessID: acc,
|
||||||
// SignBytes: func(b []byte) ([]byte, error) {
|
// SignRawBytes: func(b []byte) ([]byte, error) {
|
||||||
// _, signedBytes, err := appengine.SignBytes(ctx, b)
|
// _, signedBytes, err := appengine.SignBytes(ctx, b)
|
||||||
// return signedBytes, err
|
// return signedBytes, err
|
||||||
// },
|
// },
|
||||||
// // etc.
|
// // etc.
|
||||||
// })
|
// })
|
||||||
//
|
//
|
||||||
// Exactly one of PrivateKey or SignBytes must be non-nil.
|
// SignRawBytes is equivalent to the SignBytes field on SignedURLOptions;
|
||||||
SignBytes func(hashBytes []byte) (signature []byte, err error)
|
// that is, you may use the same signing function for the two.
|
||||||
|
//
|
||||||
|
// Exactly one of PrivateKey or SignRawBytes must be non-nil.
|
||||||
|
SignRawBytes func(bytes []byte) (signature []byte, err error)
|
||||||
|
|
||||||
// Expires is the expiration time on the signed URL.
|
// Expires is the expiration time on the signed URL.
|
||||||
// It must be a time in the future.
|
// It must be a time in the future.
|
||||||
|
@ -96,6 +112,23 @@ type PostPolicyV4Options struct {
|
||||||
// a 4XX status code, back with the message describing the problem.
|
// a 4XX status code, back with the message describing the problem.
|
||||||
// Optional.
|
// Optional.
|
||||||
Conditions []PostPolicyV4Condition
|
Conditions []PostPolicyV4Condition
|
||||||
|
|
||||||
|
shouldHashSignBytes bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (opts *PostPolicyV4Options) clone() *PostPolicyV4Options {
|
||||||
|
return &PostPolicyV4Options{
|
||||||
|
GoogleAccessID: opts.GoogleAccessID,
|
||||||
|
PrivateKey: opts.PrivateKey,
|
||||||
|
SignBytes: opts.SignBytes,
|
||||||
|
SignRawBytes: opts.SignRawBytes,
|
||||||
|
Expires: opts.Expires,
|
||||||
|
Style: opts.Style,
|
||||||
|
Insecure: opts.Insecure,
|
||||||
|
Fields: opts.Fields,
|
||||||
|
Conditions: opts.Conditions,
|
||||||
|
shouldHashSignBytes: opts.shouldHashSignBytes,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// PolicyV4Fields describes the attributes for a PostPolicyV4 request.
|
// PolicyV4Fields describes the attributes for a PostPolicyV4 request.
|
||||||
|
@ -220,20 +253,22 @@ func GenerateSignedPostPolicyV4(bucket, object string, opts *PostPolicyV4Options
|
||||||
|
|
||||||
var signingFn func(hashedBytes []byte) ([]byte, error)
|
var signingFn func(hashedBytes []byte) ([]byte, error)
|
||||||
switch {
|
switch {
|
||||||
case opts.SignBytes != nil:
|
case opts.SignRawBytes != nil:
|
||||||
|
signingFn = opts.SignRawBytes
|
||||||
|
case opts.shouldHashSignBytes:
|
||||||
signingFn = opts.SignBytes
|
signingFn = opts.SignBytes
|
||||||
|
|
||||||
case len(opts.PrivateKey) != 0:
|
case len(opts.PrivateKey) != 0:
|
||||||
parsedRSAPrivKey, err := parseKey(opts.PrivateKey)
|
parsedRSAPrivKey, err := parseKey(opts.PrivateKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
signingFn = func(hashedBytes []byte) ([]byte, error) {
|
signingFn = func(b []byte) ([]byte, error) {
|
||||||
return rsa.SignPKCS1v15(rand.Reader, parsedRSAPrivKey, crypto.SHA256, hashedBytes)
|
sum := sha256.Sum256(b)
|
||||||
|
return rsa.SignPKCS1v15(rand.Reader, parsedRSAPrivKey, crypto.SHA256, sum[:])
|
||||||
}
|
}
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return nil, errors.New("storage: exactly one of PrivateKey or SignedBytes must be set")
|
return nil, errors.New("storage: exactly one of PrivateKey or SignRawBytes must be set")
|
||||||
}
|
}
|
||||||
|
|
||||||
var descFields PolicyV4Fields
|
var descFields PolicyV4Fields
|
||||||
|
@ -307,10 +342,18 @@ func GenerateSignedPostPolicyV4(bucket, object string, opts *PostPolicyV4Options
|
||||||
}
|
}
|
||||||
|
|
||||||
b64Policy := base64.StdEncoding.EncodeToString(condsAsJSON)
|
b64Policy := base64.StdEncoding.EncodeToString(condsAsJSON)
|
||||||
shaSum := sha256.Sum256([]byte(b64Policy))
|
var signature []byte
|
||||||
signature, err := signingFn(shaSum[:])
|
var signErr error
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
if opts.shouldHashSignBytes {
|
||||||
|
// SignBytes expects hashed bytes as input instead of raw bytes, so we hash them
|
||||||
|
shaSum := sha256.Sum256([]byte(b64Policy))
|
||||||
|
signature, signErr = signingFn(shaSum[:])
|
||||||
|
} else {
|
||||||
|
signature, signErr = signingFn([]byte(b64Policy))
|
||||||
|
}
|
||||||
|
if signErr != nil {
|
||||||
|
return nil, signErr
|
||||||
}
|
}
|
||||||
|
|
||||||
policyFields["policy"] = b64Policy
|
policyFields["policy"] = b64Policy
|
||||||
|
@ -348,15 +391,16 @@ func GenerateSignedPostPolicyV4(bucket, object string, opts *PostPolicyV4Options
|
||||||
|
|
||||||
// validatePostPolicyV4Options checks that:
|
// validatePostPolicyV4Options checks that:
|
||||||
// * GoogleAccessID is set
|
// * GoogleAccessID is set
|
||||||
// * either but not both PrivateKey and SignBytes are set or nil, but not both
|
// * either PrivateKey or SignRawBytes/SignBytes is set, but not both
|
||||||
// * Expires, the deadline is not in the past
|
// * the deadline set in Expires is not in the past
|
||||||
// * if Style is not set, it'll use PathStyle
|
// * if Style is not set, it'll use PathStyle
|
||||||
|
// * sets shouldHashSignBytes to true if opts.SignBytes should be used
|
||||||
func validatePostPolicyV4Options(opts *PostPolicyV4Options, now time.Time) error {
|
func validatePostPolicyV4Options(opts *PostPolicyV4Options, now time.Time) error {
|
||||||
if opts == nil || opts.GoogleAccessID == "" {
|
if opts == nil || opts.GoogleAccessID == "" {
|
||||||
return errors.New("storage: missing required GoogleAccessID")
|
return errors.New("storage: missing required GoogleAccessID")
|
||||||
}
|
}
|
||||||
if privBlank, signBlank := len(opts.PrivateKey) == 0, opts.SignBytes == nil; privBlank == signBlank {
|
if privBlank, signBlank := len(opts.PrivateKey) == 0, opts.SignBytes == nil && opts.SignRawBytes == nil; privBlank == signBlank {
|
||||||
return errors.New("storage: exactly one of PrivateKey or SignedBytes must be set")
|
return errors.New("storage: exactly one of PrivateKey or SignRawBytes must be set")
|
||||||
}
|
}
|
||||||
if opts.Expires.Before(now) {
|
if opts.Expires.Before(now) {
|
||||||
return errors.New("storage: expecting Expires to be in the future")
|
return errors.New("storage: expecting Expires to be in the future")
|
||||||
|
@ -364,6 +408,9 @@ func validatePostPolicyV4Options(opts *PostPolicyV4Options, now time.Time) error
|
||||||
if opts.Style == nil {
|
if opts.Style == nil {
|
||||||
opts.Style = PathStyle()
|
opts.Style = PathStyle()
|
||||||
}
|
}
|
||||||
|
if opts.SignRawBytes == nil && opts.SignBytes != nil {
|
||||||
|
opts.shouldHashSignBytes = true
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
12
vendor/cloud.google.com/go/storage/reader.go
generated
vendored
12
vendor/cloud.google.com/go/storage/reader.go
generated
vendored
|
@ -163,7 +163,7 @@ func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64)
|
||||||
}
|
}
|
||||||
|
|
||||||
var res *http.Response
|
var res *http.Response
|
||||||
err = runWithRetry(ctx, func() error {
|
err = run(ctx, func() error {
|
||||||
res, err = o.c.hc.Do(req)
|
res, err = o.c.hc.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -210,7 +210,7 @@ func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64)
|
||||||
gen = gen64
|
gen = gen64
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
})
|
}, o.retry, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -483,7 +483,7 @@ func (o *ObjectHandle) newRangeReaderWithGRPC(ctx context.Context, offset, lengt
|
||||||
var msg *storagepb.ReadObjectResponse
|
var msg *storagepb.ReadObjectResponse
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
err = runWithRetry(cc, func() error {
|
err = run(cc, func() error {
|
||||||
stream, err = o.c.gc.ReadObject(cc, req)
|
stream, err = o.c.gc.ReadObject(cc, req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -492,7 +492,7 @@ func (o *ObjectHandle) newRangeReaderWithGRPC(ctx context.Context, offset, lengt
|
||||||
msg, err = stream.Recv()
|
msg, err = stream.Recv()
|
||||||
|
|
||||||
return err
|
return err
|
||||||
})
|
}, o.retry, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Close the stream context we just created to ensure we don't leak
|
// Close the stream context we just created to ensure we don't leak
|
||||||
// resources.
|
// resources.
|
||||||
|
@ -541,8 +541,8 @@ func (o *ObjectHandle) newRangeReaderWithGRPC(ctx context.Context, offset, lengt
|
||||||
}
|
}
|
||||||
|
|
||||||
// Only support checksums when reading an entire object, not a range.
|
// Only support checksums when reading an entire object, not a range.
|
||||||
if msg.GetObjectChecksums().Crc32C != nil && offset == 0 && length == 0 {
|
if checksums := msg.GetObjectChecksums(); checksums != nil && checksums.Crc32C != nil && offset == 0 && length == 0 {
|
||||||
r.wantCRC = msg.GetObjectChecksums().GetCrc32C()
|
r.wantCRC = checksums.GetCrc32C()
|
||||||
r.checkCRC = true
|
r.checkCRC = true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
224
vendor/cloud.google.com/go/storage/storage.go
generated
vendored
224
vendor/cloud.google.com/go/storage/storage.go
generated
vendored
|
@ -41,6 +41,7 @@ import (
|
||||||
"cloud.google.com/go/internal/trace"
|
"cloud.google.com/go/internal/trace"
|
||||||
"cloud.google.com/go/internal/version"
|
"cloud.google.com/go/internal/version"
|
||||||
gapic "cloud.google.com/go/storage/internal/apiv2"
|
gapic "cloud.google.com/go/storage/internal/apiv2"
|
||||||
|
"github.com/googleapis/gax-go/v2"
|
||||||
"golang.org/x/oauth2/google"
|
"golang.org/x/oauth2/google"
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
"google.golang.org/api/googleapi"
|
"google.golang.org/api/googleapi"
|
||||||
|
@ -50,6 +51,7 @@ import (
|
||||||
"google.golang.org/api/transport"
|
"google.golang.org/api/transport"
|
||||||
htransport "google.golang.org/api/transport/http"
|
htransport "google.golang.org/api/transport/http"
|
||||||
storagepb "google.golang.org/genproto/googleapis/storage/v2"
|
storagepb "google.golang.org/genproto/googleapis/storage/v2"
|
||||||
|
"google.golang.org/grpc"
|
||||||
"google.golang.org/protobuf/proto"
|
"google.golang.org/protobuf/proto"
|
||||||
"google.golang.org/protobuf/reflect/protoreflect"
|
"google.golang.org/protobuf/reflect/protoreflect"
|
||||||
"google.golang.org/protobuf/types/known/timestamppb"
|
"google.golang.org/protobuf/types/known/timestamppb"
|
||||||
|
@ -81,6 +83,12 @@ const (
|
||||||
// ScopeReadWrite grants permissions to manage your
|
// ScopeReadWrite grants permissions to manage your
|
||||||
// data in Google Cloud Storage.
|
// data in Google Cloud Storage.
|
||||||
ScopeReadWrite = raw.DevstorageReadWriteScope
|
ScopeReadWrite = raw.DevstorageReadWriteScope
|
||||||
|
|
||||||
|
// defaultConnPoolSize is the default number of connections
|
||||||
|
// to initialize in the GAPIC gRPC connection pool. A larger
|
||||||
|
// connection pool may be necessary for jobs that require
|
||||||
|
// high throughput and/or leverage many concurrent streams.
|
||||||
|
defaultConnPoolSize = 4
|
||||||
)
|
)
|
||||||
|
|
||||||
var xGoogHeader = fmt.Sprintf("gl-go/%s gccl/%s", version.Go(), version.Repo)
|
var xGoogHeader = fmt.Sprintf("gl-go/%s gccl/%s", version.Go(), version.Repo)
|
||||||
|
@ -102,6 +110,7 @@ type Client struct {
|
||||||
readHost string
|
readHost string
|
||||||
// May be nil.
|
// May be nil.
|
||||||
creds *google.Credentials
|
creds *google.Credentials
|
||||||
|
retry *retryConfig
|
||||||
|
|
||||||
// gc is an optional gRPC-based, GAPIC client.
|
// gc is an optional gRPC-based, GAPIC client.
|
||||||
//
|
//
|
||||||
|
@ -203,11 +212,34 @@ func newHybridClient(ctx context.Context, opts *hybridClientOptions) (*Client, e
|
||||||
if opts == nil {
|
if opts == nil {
|
||||||
opts = &hybridClientOptions{}
|
opts = &hybridClientOptions{}
|
||||||
}
|
}
|
||||||
|
opts.GRPCOpts = append(defaultGRPCOptions(), opts.GRPCOpts...)
|
||||||
|
|
||||||
c, err := NewClient(ctx, opts.HTTPOpts...)
|
c, err := NewClient(ctx, opts.HTTPOpts...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Set emulator options for gRPC if an emulator was specified. Note that in a
|
||||||
|
// hybrid client, STORAGE_EMULATOR_HOST will set the host to use for HTTP and
|
||||||
|
// STORAGE_EMULATOR_HOST_GRPC will set the host to use for gRPC (when using a
|
||||||
|
// local emulator, HTTP and gRPC must use different ports, so this is
|
||||||
|
// necessary).
|
||||||
|
// TODO: when full gRPC client is available, remove STORAGE_EMULATOR_HOST_GRPC
|
||||||
|
// and use STORAGE_EMULATOR_HOST for both the HTTP and gRPC based clients.
|
||||||
|
if host := os.Getenv("STORAGE_EMULATOR_HOST_GRPC"); host != "" {
|
||||||
|
// Strip the scheme from the emulator host. WithEndpoint does not take a
|
||||||
|
// scheme for gRPC.
|
||||||
|
if strings.Contains(host, "://") {
|
||||||
|
host = strings.SplitN(host, "://", 2)[1]
|
||||||
|
}
|
||||||
|
|
||||||
|
opts.GRPCOpts = append(opts.GRPCOpts,
|
||||||
|
option.WithEndpoint(host),
|
||||||
|
option.WithGRPCDialOption(grpc.WithInsecure()),
|
||||||
|
option.WithoutAuthentication(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
g, err := gapic.NewClient(ctx, opts.GRPCOpts...)
|
g, err := gapic.NewClient(ctx, opts.GRPCOpts...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -217,6 +249,14 @@ func newHybridClient(ctx context.Context, opts *hybridClientOptions) (*Client, e
|
||||||
return c, nil
|
return c, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// defaultGRPCOptions returns a set of the default client options
|
||||||
|
// for gRPC client initialization.
|
||||||
|
func defaultGRPCOptions() []option.ClientOption {
|
||||||
|
return []option.ClientOption{
|
||||||
|
option.WithGRPCConnectionPool(defaultConnPoolSize),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Close closes the Client.
|
// Close closes the Client.
|
||||||
//
|
//
|
||||||
// Close need not be called at program exit.
|
// Close need not be called at program exit.
|
||||||
|
@ -836,6 +876,7 @@ type ObjectHandle struct {
|
||||||
encryptionKey []byte // AES-256 key
|
encryptionKey []byte // AES-256 key
|
||||||
userProject string // for requester-pays buckets
|
userProject string // for requester-pays buckets
|
||||||
readCompressed bool // Accept-Encoding: gzip
|
readCompressed bool // Accept-Encoding: gzip
|
||||||
|
retry *retryConfig
|
||||||
}
|
}
|
||||||
|
|
||||||
// ACL provides access to the object's access control list.
|
// ACL provides access to the object's access control list.
|
||||||
|
@ -899,7 +940,7 @@ func (o *ObjectHandle) Attrs(ctx context.Context) (attrs *ObjectAttrs, err error
|
||||||
}
|
}
|
||||||
var obj *raw.Object
|
var obj *raw.Object
|
||||||
setClientHeader(call.Header())
|
setClientHeader(call.Header())
|
||||||
err = runWithRetry(ctx, func() error { obj, err = call.Do(); return err })
|
err = run(ctx, func() error { obj, err = call.Do(); return err }, o.retry, true)
|
||||||
var e *googleapi.Error
|
var e *googleapi.Error
|
||||||
if ok := xerrors.As(err, &e); ok && e.Code == http.StatusNotFound {
|
if ok := xerrors.As(err, &e); ok && e.Code == http.StatusNotFound {
|
||||||
return nil, ErrObjectNotExist
|
return nil, ErrObjectNotExist
|
||||||
|
@ -1000,7 +1041,11 @@ func (o *ObjectHandle) Update(ctx context.Context, uattrs ObjectAttrsToUpdate) (
|
||||||
}
|
}
|
||||||
var obj *raw.Object
|
var obj *raw.Object
|
||||||
setClientHeader(call.Header())
|
setClientHeader(call.Header())
|
||||||
err = runWithRetry(ctx, func() error { obj, err = call.Do(); return err })
|
var isIdempotent bool
|
||||||
|
if o.conds != nil && o.conds.MetagenerationMatch != 0 {
|
||||||
|
isIdempotent = true
|
||||||
|
}
|
||||||
|
err = run(ctx, func() error { obj, err = call.Do(); return err }, o.retry, isIdempotent)
|
||||||
var e *googleapi.Error
|
var e *googleapi.Error
|
||||||
if ok := xerrors.As(err, &e); ok && e.Code == http.StatusNotFound {
|
if ok := xerrors.As(err, &e); ok && e.Code == http.StatusNotFound {
|
||||||
return nil, ErrObjectNotExist
|
return nil, ErrObjectNotExist
|
||||||
|
@ -1064,7 +1109,13 @@ func (o *ObjectHandle) Delete(ctx context.Context) error {
|
||||||
}
|
}
|
||||||
// Encryption doesn't apply to Delete.
|
// Encryption doesn't apply to Delete.
|
||||||
setClientHeader(call.Header())
|
setClientHeader(call.Header())
|
||||||
err := runWithRetry(ctx, func() error { return call.Do() })
|
var isIdempotent bool
|
||||||
|
// Delete is idempotent if GenerationMatch or Generation have been passed in.
|
||||||
|
// The default generation is negative to get the latest version of the object.
|
||||||
|
if (o.conds != nil && o.conds.GenerationMatch != 0) || o.gen >= 0 {
|
||||||
|
isIdempotent = true
|
||||||
|
}
|
||||||
|
err := run(ctx, func() error { return call.Do() }, o.retry, isIdempotent)
|
||||||
var e *googleapi.Error
|
var e *googleapi.Error
|
||||||
if ok := xerrors.As(err, &e); ok && e.Code == http.StatusNotFound {
|
if ok := xerrors.As(err, &e); ok && e.Code == http.StatusNotFound {
|
||||||
return ErrObjectNotExist
|
return ErrObjectNotExist
|
||||||
|
@ -1759,6 +1810,169 @@ func setConditionField(call reflect.Value, name string, value interface{}) bool
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Retryer returns an object handle that is configured with custom retry
|
||||||
|
// behavior as specified by the options that are passed to it. All operations
|
||||||
|
// on the new handle will use the customized retry configuration.
|
||||||
|
// These retry options will merge with the bucket's retryer (if set) for the
|
||||||
|
// returned handle. Options passed into this method will take precedence over
|
||||||
|
// retry options on the bucket and client. Note that you must explicitly pass in
|
||||||
|
// each option you want to override.
|
||||||
|
func (o *ObjectHandle) Retryer(opts ...RetryOption) *ObjectHandle {
|
||||||
|
o2 := *o
|
||||||
|
var retry *retryConfig
|
||||||
|
if o.retry != nil {
|
||||||
|
// merge the options with the existing retry
|
||||||
|
retry = o.retry
|
||||||
|
} else {
|
||||||
|
retry = &retryConfig{}
|
||||||
|
}
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt.apply(retry)
|
||||||
|
}
|
||||||
|
o2.retry = retry
|
||||||
|
o2.acl.retry = retry
|
||||||
|
return &o2
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetRetry configures the client with custom retry behavior as specified by the
|
||||||
|
// options that are passed to it. All operations using this client will use the
|
||||||
|
// customized retry configuration.
|
||||||
|
// This should be called once before using the client for network operations, as
|
||||||
|
// there could be indeterminate behaviour with operations in progress.
|
||||||
|
// Retry options set on a bucket or object handle will take precedence over
|
||||||
|
// these options.
|
||||||
|
func (c *Client) SetRetry(opts ...RetryOption) {
|
||||||
|
var retry *retryConfig
|
||||||
|
if c.retry != nil {
|
||||||
|
// merge the options with the existing retry
|
||||||
|
retry = c.retry
|
||||||
|
} else {
|
||||||
|
retry = &retryConfig{}
|
||||||
|
}
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt.apply(retry)
|
||||||
|
}
|
||||||
|
c.retry = retry
|
||||||
|
}
|
||||||
|
|
||||||
|
// RetryOption allows users to configure non-default retry behavior for API
|
||||||
|
// calls made to GCS.
|
||||||
|
type RetryOption interface {
|
||||||
|
apply(config *retryConfig)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithBackoff allows configuration of the backoff timing used for retries.
|
||||||
|
// Available configuration options (Initial, Max and Multiplier) are described
|
||||||
|
// at https://pkg.go.dev/github.com/googleapis/gax-go/v2#Backoff. If any fields
|
||||||
|
// are not supplied by the user, gax default values will be used.
|
||||||
|
func WithBackoff(backoff gax.Backoff) RetryOption {
|
||||||
|
return &withBackoff{
|
||||||
|
backoff: backoff,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type withBackoff struct {
|
||||||
|
backoff gax.Backoff
|
||||||
|
}
|
||||||
|
|
||||||
|
func (wb *withBackoff) apply(config *retryConfig) {
|
||||||
|
config.backoff = &wb.backoff
|
||||||
|
}
|
||||||
|
|
||||||
|
// RetryPolicy describes the available policies for which operations should be
|
||||||
|
// retried. The default is `RetryIdempotent`.
|
||||||
|
type RetryPolicy int
|
||||||
|
|
||||||
|
const (
|
||||||
|
// RetryIdempotent causes only idempotent operations to be retried when the
|
||||||
|
// service returns a transient error. Using this policy, fully idempotent
|
||||||
|
// operations (such as `ObjectHandle.Attrs()`) will always be retried.
|
||||||
|
// Conditionally idempotent operations (for example `ObjectHandle.Update()`)
|
||||||
|
// will be retried only if the necessary conditions have been supplied (in
|
||||||
|
// the case of `ObjectHandle.Update()` this would mean supplying a
|
||||||
|
// `Conditions.MetagenerationMatch` condition is required).
|
||||||
|
RetryIdempotent RetryPolicy = iota
|
||||||
|
|
||||||
|
// RetryAlways causes all operations to be retried when the service returns a
|
||||||
|
// transient error, regardless of idempotency considerations.
|
||||||
|
RetryAlways
|
||||||
|
|
||||||
|
// RetryNever causes the client to not perform retries on failed operations.
|
||||||
|
RetryNever
|
||||||
|
)
|
||||||
|
|
||||||
|
// WithPolicy allows the configuration of which operations should be performed
|
||||||
|
// with retries for transient errors.
|
||||||
|
func WithPolicy(policy RetryPolicy) RetryOption {
|
||||||
|
return &withPolicy{
|
||||||
|
policy: policy,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type withPolicy struct {
|
||||||
|
policy RetryPolicy
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ws *withPolicy) apply(config *retryConfig) {
|
||||||
|
config.policy = ws.policy
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithErrorFunc allows users to pass a custom function to the retryer. Errors
|
||||||
|
// will be retried if and only if `shouldRetry(err)` returns true.
|
||||||
|
// By default, the following errors are retried (see invoke.go for the default
|
||||||
|
// shouldRetry function):
|
||||||
|
//
|
||||||
|
// - HTTP responses with codes 408, 429, 502, 503, and 504.
|
||||||
|
//
|
||||||
|
// - Transient network errors such as connection reset and io.ErrUnexpectedEOF.
|
||||||
|
//
|
||||||
|
// - Errors which are considered transient using the Temporary() interface.
|
||||||
|
//
|
||||||
|
// - Wrapped versions of these errors.
|
||||||
|
//
|
||||||
|
// This option can be used to retry on a different set of errors than the
|
||||||
|
// default.
|
||||||
|
func WithErrorFunc(shouldRetry func(err error) bool) RetryOption {
|
||||||
|
return &withErrorFunc{
|
||||||
|
shouldRetry: shouldRetry,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type withErrorFunc struct {
|
||||||
|
shouldRetry func(err error) bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (wef *withErrorFunc) apply(config *retryConfig) {
|
||||||
|
config.shouldRetry = wef.shouldRetry
|
||||||
|
}
|
||||||
|
|
||||||
|
type retryConfig struct {
|
||||||
|
backoff *gax.Backoff
|
||||||
|
policy RetryPolicy
|
||||||
|
shouldRetry func(err error) bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *retryConfig) clone() *retryConfig {
|
||||||
|
if r == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var bo *gax.Backoff
|
||||||
|
if r.backoff != nil {
|
||||||
|
bo = &gax.Backoff{
|
||||||
|
Initial: r.backoff.Initial,
|
||||||
|
Max: r.backoff.Max,
|
||||||
|
Multiplier: r.backoff.Multiplier,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return &retryConfig{
|
||||||
|
backoff: bo,
|
||||||
|
policy: r.policy,
|
||||||
|
shouldRetry: r.shouldRetry,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// composeSourceObj wraps a *raw.ComposeRequestSourceObjects, but adds the methods
|
// composeSourceObj wraps a *raw.ComposeRequestSourceObjects, but adds the methods
|
||||||
// that modifyCall searches for by name.
|
// that modifyCall searches for by name.
|
||||||
type composeSourceObj struct {
|
type composeSourceObj struct {
|
||||||
|
@ -1802,10 +2016,10 @@ func (c *Client) ServiceAccount(ctx context.Context, projectID string) (string,
|
||||||
r := c.raw.Projects.ServiceAccount.Get(projectID)
|
r := c.raw.Projects.ServiceAccount.Get(projectID)
|
||||||
var res *raw.ServiceAccount
|
var res *raw.ServiceAccount
|
||||||
var err error
|
var err error
|
||||||
err = runWithRetry(ctx, func() error {
|
err = run(ctx, func() error {
|
||||||
res, err = r.Context(ctx).Do()
|
res, err = r.Context(ctx).Do()
|
||||||
return err
|
return err
|
||||||
})
|
}, c.retry, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
16
vendor/cloud.google.com/go/storage/writer.go
generated
vendored
16
vendor/cloud.google.com/go/storage/writer.go
generated
vendored
|
@ -172,6 +172,22 @@ func (w *Writer) open() error {
|
||||||
// call to set up the upload as well as calls to upload individual chunks
|
// call to set up the upload as well as calls to upload individual chunks
|
||||||
// for a resumable upload (as long as the chunk size is non-zero). Hence
|
// for a resumable upload (as long as the chunk size is non-zero). Hence
|
||||||
// there is no need to add retries here.
|
// there is no need to add retries here.
|
||||||
|
|
||||||
|
// Retry only when the operation is idempotent or the retry policy is RetryAlways.
|
||||||
|
isIdempotent := w.o.conds != nil && (w.o.conds.GenerationMatch >= 0 || w.o.conds.DoesNotExist == true)
|
||||||
|
var useRetry bool
|
||||||
|
if (w.o.retry == nil || w.o.retry.policy == RetryIdempotent) && isIdempotent {
|
||||||
|
useRetry = true
|
||||||
|
} else if w.o.retry != nil && w.o.retry.policy == RetryAlways {
|
||||||
|
useRetry = true
|
||||||
|
}
|
||||||
|
if useRetry {
|
||||||
|
if w.o.retry != nil {
|
||||||
|
call.WithRetry(w.o.retry.backoff, w.o.retry.shouldRetry)
|
||||||
|
} else {
|
||||||
|
call.WithRetry(nil, nil)
|
||||||
|
}
|
||||||
|
}
|
||||||
resp, err = call.Do()
|
resp, err = call.Do()
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
9
vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
generated
vendored
9
vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
generated
vendored
|
@ -9659,6 +9659,9 @@ var awsPartition = partition{
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ap-southeast-2",
|
Region: "ap-southeast-2",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
endpointKey{
|
||||||
|
Region: "ca-central-1",
|
||||||
|
}: endpoint{},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "eu-central-1",
|
Region: "eu-central-1",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
@ -10808,6 +10811,9 @@ var awsPartition = partition{
|
||||||
},
|
},
|
||||||
"kafka": service{
|
"kafka": service{
|
||||||
Endpoints: serviceEndpoints{
|
Endpoints: serviceEndpoints{
|
||||||
|
endpointKey{
|
||||||
|
Region: "af-south-1",
|
||||||
|
}: endpoint{},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ap-east-1",
|
Region: "ap-east-1",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
@ -10817,6 +10823,9 @@ var awsPartition = partition{
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ap-northeast-2",
|
Region: "ap-northeast-2",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
endpointKey{
|
||||||
|
Region: "ap-northeast-3",
|
||||||
|
}: endpoint{},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ap-south-1",
|
Region: "ap-south-1",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
|
2
vendor/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
|
@ -5,4 +5,4 @@ package aws
|
||||||
const SDKName = "aws-sdk-go"
|
const SDKName = "aws-sdk-go"
|
||||||
|
|
||||||
// SDKVersion is the version of this SDK
|
// SDKVersion is the version of this SDK
|
||||||
const SDKVersion = "1.42.39"
|
const SDKVersion = "1.42.42"
|
||||||
|
|
62
vendor/github.com/klauspost/compress/flate/deflate.go
generated
vendored
62
vendor/github.com/klauspost/compress/flate/deflate.go
generated
vendored
|
@ -10,9 +10,6 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"math"
|
"math"
|
||||||
"math/bits"
|
|
||||||
|
|
||||||
comp "github.com/klauspost/compress"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -76,8 +73,8 @@ var levels = []compressionLevel{
|
||||||
{0, 0, 0, 0, 0, 6},
|
{0, 0, 0, 0, 0, 6},
|
||||||
// Levels 7-9 use increasingly more lazy matching
|
// Levels 7-9 use increasingly more lazy matching
|
||||||
// and increasingly stringent conditions for "good enough".
|
// and increasingly stringent conditions for "good enough".
|
||||||
{6, 10, 12, 16, skipNever, 7},
|
{8, 12, 16, 24, skipNever, 7},
|
||||||
{10, 24, 32, 64, skipNever, 8},
|
{16, 30, 40, 64, skipNever, 8},
|
||||||
{32, 258, 258, 1024, skipNever, 9},
|
{32, 258, 258, 1024, skipNever, 9},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -110,6 +107,7 @@ type advancedState struct {
|
||||||
type compressor struct {
|
type compressor struct {
|
||||||
compressionLevel
|
compressionLevel
|
||||||
|
|
||||||
|
h *huffmanEncoder
|
||||||
w *huffmanBitWriter
|
w *huffmanBitWriter
|
||||||
|
|
||||||
// compression algorithm
|
// compression algorithm
|
||||||
|
@ -271,7 +269,7 @@ func (d *compressor) fillWindow(b []byte) {
|
||||||
// Try to find a match starting at index whose length is greater than prevSize.
|
// Try to find a match starting at index whose length is greater than prevSize.
|
||||||
// We only look at chainCount possibilities before giving up.
|
// We only look at chainCount possibilities before giving up.
|
||||||
// pos = s.index, prevHead = s.chainHead-s.hashOffset, prevLength=minMatchLength-1, lookahead
|
// pos = s.index, prevHead = s.chainHead-s.hashOffset, prevLength=minMatchLength-1, lookahead
|
||||||
func (d *compressor) findMatch(pos int, prevHead int, lookahead, bpb int) (length, offset int, ok bool) {
|
func (d *compressor) findMatch(pos int, prevHead int, lookahead int) (length, offset int, ok bool) {
|
||||||
minMatchLook := maxMatchLength
|
minMatchLook := maxMatchLength
|
||||||
if lookahead < minMatchLook {
|
if lookahead < minMatchLook {
|
||||||
minMatchLook = lookahead
|
minMatchLook = lookahead
|
||||||
|
@ -297,14 +295,46 @@ func (d *compressor) findMatch(pos int, prevHead int, lookahead, bpb int) (lengt
|
||||||
}
|
}
|
||||||
offset = 0
|
offset = 0
|
||||||
|
|
||||||
|
cGain := 0
|
||||||
|
if d.chain < 100 {
|
||||||
|
for i := prevHead; tries > 0; tries-- {
|
||||||
|
if wEnd == win[i+length] {
|
||||||
|
n := matchLen(win[i:i+minMatchLook], wPos)
|
||||||
|
if n > length {
|
||||||
|
length = n
|
||||||
|
offset = pos - i
|
||||||
|
ok = true
|
||||||
|
if n >= nice {
|
||||||
|
// The match is good enough that we don't try to find a better one.
|
||||||
|
break
|
||||||
|
}
|
||||||
|
wEnd = win[pos+n]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if i <= minIndex {
|
||||||
|
// hashPrev[i & windowMask] has already been overwritten, so stop now.
|
||||||
|
break
|
||||||
|
}
|
||||||
|
i = int(d.state.hashPrev[i&windowMask]) - d.state.hashOffset
|
||||||
|
if i < minIndex {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Some like it higher (CSV), some like it lower (JSON)
|
||||||
|
const baseCost = 6
|
||||||
// Base is 4 bytes at with an additional cost.
|
// Base is 4 bytes at with an additional cost.
|
||||||
// Matches must be better than this.
|
// Matches must be better than this.
|
||||||
cGain := minMatchLength*bpb - 12
|
|
||||||
for i := prevHead; tries > 0; tries-- {
|
for i := prevHead; tries > 0; tries-- {
|
||||||
if wEnd == win[i+length] {
|
if wEnd == win[i+length] {
|
||||||
n := matchLen(win[i:i+minMatchLook], wPos)
|
n := matchLen(win[i:i+minMatchLook], wPos)
|
||||||
if n > length {
|
if n > length {
|
||||||
newGain := n*bpb - bits.Len32(uint32(pos-i))
|
// Calculate gain. Estimate
|
||||||
|
newGain := d.h.bitLengthRaw(wPos[:n]) - int(offsetExtraBits[offsetCode(uint32(pos-i))]) - baseCost - int(lengthExtraBits[lengthCodes[(n-3)&255]])
|
||||||
|
|
||||||
|
//fmt.Println(n, "gain:", newGain, "prev:", cGain, "raw:", d.h.bitLengthRaw(wPos[:n]))
|
||||||
if newGain > cGain {
|
if newGain > cGain {
|
||||||
length = n
|
length = n
|
||||||
offset = pos - i
|
offset = pos - i
|
||||||
|
@ -389,10 +419,16 @@ func (d *compressor) deflateLazy() {
|
||||||
if d.windowEnd-s.index < minMatchLength+maxMatchLength && !d.sync {
|
if d.windowEnd-s.index < minMatchLength+maxMatchLength && !d.sync {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
s.estBitsPerByte = 8
|
if d.windowEnd != s.index && d.chain > 100 {
|
||||||
if !d.sync {
|
// Get literal huffman coder.
|
||||||
s.estBitsPerByte = comp.ShannonEntropyBits(d.window[s.index:d.windowEnd])
|
if d.h == nil {
|
||||||
s.estBitsPerByte = int(1 + float64(s.estBitsPerByte)/float64(d.windowEnd-s.index))
|
d.h = newHuffmanEncoder(maxFlateBlockTokens)
|
||||||
|
}
|
||||||
|
var tmp [256]uint16
|
||||||
|
for _, v := range d.window[s.index:d.windowEnd] {
|
||||||
|
tmp[v]++
|
||||||
|
}
|
||||||
|
d.h.generate(tmp[:], 15)
|
||||||
}
|
}
|
||||||
|
|
||||||
s.maxInsertIndex = d.windowEnd - (minMatchLength - 1)
|
s.maxInsertIndex = d.windowEnd - (minMatchLength - 1)
|
||||||
|
@ -446,7 +482,7 @@ func (d *compressor) deflateLazy() {
|
||||||
}
|
}
|
||||||
|
|
||||||
if s.chainHead-s.hashOffset >= minIndex && lookahead > prevLength && prevLength < d.lazy {
|
if s.chainHead-s.hashOffset >= minIndex && lookahead > prevLength && prevLength < d.lazy {
|
||||||
if newLength, newOffset, ok := d.findMatch(s.index, s.chainHead-s.hashOffset, lookahead, s.estBitsPerByte); ok {
|
if newLength, newOffset, ok := d.findMatch(s.index, s.chainHead-s.hashOffset, lookahead); ok {
|
||||||
s.length = newLength
|
s.length = newLength
|
||||||
s.offset = newOffset
|
s.offset = newOffset
|
||||||
}
|
}
|
||||||
|
|
10
vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
generated
vendored
10
vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
generated
vendored
|
@ -52,18 +52,18 @@ var lengthBase = [32]uint8{
|
||||||
}
|
}
|
||||||
|
|
||||||
// offset code word extra bits.
|
// offset code word extra bits.
|
||||||
var offsetExtraBits = [64]int8{
|
var offsetExtraBits = [32]int8{
|
||||||
0, 0, 0, 0, 1, 1, 2, 2, 3, 3,
|
0, 0, 0, 0, 1, 1, 2, 2, 3, 3,
|
||||||
4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
|
4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
|
||||||
9, 9, 10, 10, 11, 11, 12, 12, 13, 13,
|
9, 9, 10, 10, 11, 11, 12, 12, 13, 13,
|
||||||
/* extended window */
|
/* extended window */
|
||||||
14, 14, 15, 15, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20,
|
14, 14,
|
||||||
}
|
}
|
||||||
|
|
||||||
var offsetCombined = [32]uint32{}
|
var offsetCombined = [32]uint32{}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
var offsetBase = [64]uint32{
|
var offsetBase = [32]uint32{
|
||||||
/* normal deflate */
|
/* normal deflate */
|
||||||
0x000000, 0x000001, 0x000002, 0x000003, 0x000004,
|
0x000000, 0x000001, 0x000002, 0x000003, 0x000004,
|
||||||
0x000006, 0x000008, 0x00000c, 0x000010, 0x000018,
|
0x000006, 0x000008, 0x00000c, 0x000010, 0x000018,
|
||||||
|
@ -73,9 +73,7 @@ func init() {
|
||||||
0x001800, 0x002000, 0x003000, 0x004000, 0x006000,
|
0x001800, 0x002000, 0x003000, 0x004000, 0x006000,
|
||||||
|
|
||||||
/* extended window */
|
/* extended window */
|
||||||
0x008000, 0x00c000, 0x010000, 0x018000, 0x020000,
|
0x008000, 0x00c000,
|
||||||
0x030000, 0x040000, 0x060000, 0x080000, 0x0c0000,
|
|
||||||
0x100000, 0x180000, 0x200000, 0x300000,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := range offsetCombined[:] {
|
for i := range offsetCombined[:] {
|
||||||
|
|
4
vendor/github.com/klauspost/compress/flate/huffman_code.go
generated
vendored
4
vendor/github.com/klauspost/compress/flate/huffman_code.go
generated
vendored
|
@ -129,9 +129,7 @@ func (h *huffmanEncoder) bitLength(freq []uint16) int {
|
||||||
func (h *huffmanEncoder) bitLengthRaw(b []byte) int {
|
func (h *huffmanEncoder) bitLengthRaw(b []byte) int {
|
||||||
var total int
|
var total int
|
||||||
for _, f := range b {
|
for _, f := range b {
|
||||||
if f != 0 {
|
total += int(h.codes[f].len)
|
||||||
total += int(h.codes[f].len)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return total
|
return total
|
||||||
}
|
}
|
||||||
|
|
19
vendor/github.com/klauspost/compress/flate/token.go
generated
vendored
19
vendor/github.com/klauspost/compress/flate/token.go
generated
vendored
|
@ -129,11 +129,11 @@ var offsetCodes14 = [256]uint32{
|
||||||
type token uint32
|
type token uint32
|
||||||
|
|
||||||
type tokens struct {
|
type tokens struct {
|
||||||
nLits int
|
|
||||||
extraHist [32]uint16 // codes 256->maxnumlit
|
extraHist [32]uint16 // codes 256->maxnumlit
|
||||||
offHist [32]uint16 // offset codes
|
offHist [32]uint16 // offset codes
|
||||||
litHist [256]uint16 // codes 0->255
|
litHist [256]uint16 // codes 0->255
|
||||||
n uint16 // Must be able to contain maxStoreBlockSize
|
nFilled int
|
||||||
|
n uint16 // Must be able to contain maxStoreBlockSize
|
||||||
tokens [maxStoreBlockSize + 1]token
|
tokens [maxStoreBlockSize + 1]token
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -142,7 +142,7 @@ func (t *tokens) Reset() {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
t.n = 0
|
t.n = 0
|
||||||
t.nLits = 0
|
t.nFilled = 0
|
||||||
for i := range t.litHist[:] {
|
for i := range t.litHist[:] {
|
||||||
t.litHist[i] = 0
|
t.litHist[i] = 0
|
||||||
}
|
}
|
||||||
|
@ -161,12 +161,12 @@ func (t *tokens) Fill() {
|
||||||
for i, v := range t.litHist[:] {
|
for i, v := range t.litHist[:] {
|
||||||
if v == 0 {
|
if v == 0 {
|
||||||
t.litHist[i] = 1
|
t.litHist[i] = 1
|
||||||
t.nLits++
|
t.nFilled++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for i, v := range t.extraHist[:literalCount-256] {
|
for i, v := range t.extraHist[:literalCount-256] {
|
||||||
if v == 0 {
|
if v == 0 {
|
||||||
t.nLits++
|
t.nFilled++
|
||||||
t.extraHist[i] = 1
|
t.extraHist[i] = 1
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -202,14 +202,12 @@ func emitLiteral(dst *tokens, lit []byte) {
|
||||||
dst.litHist[v]++
|
dst.litHist[v]++
|
||||||
}
|
}
|
||||||
dst.n += uint16(len(lit))
|
dst.n += uint16(len(lit))
|
||||||
dst.nLits += len(lit)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *tokens) AddLiteral(lit byte) {
|
func (t *tokens) AddLiteral(lit byte) {
|
||||||
t.tokens[t.n] = token(lit)
|
t.tokens[t.n] = token(lit)
|
||||||
t.litHist[lit]++
|
t.litHist[lit]++
|
||||||
t.n++
|
t.n++
|
||||||
t.nLits++
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// from https://stackoverflow.com/a/28730362
|
// from https://stackoverflow.com/a/28730362
|
||||||
|
@ -230,8 +228,9 @@ func (t *tokens) EstimatedBits() int {
|
||||||
shannon := float32(0)
|
shannon := float32(0)
|
||||||
bits := int(0)
|
bits := int(0)
|
||||||
nMatches := 0
|
nMatches := 0
|
||||||
if t.nLits > 0 {
|
total := int(t.n) + t.nFilled
|
||||||
invTotal := 1.0 / float32(t.nLits)
|
if total > 0 {
|
||||||
|
invTotal := 1.0 / float32(total)
|
||||||
for _, v := range t.litHist[:] {
|
for _, v := range t.litHist[:] {
|
||||||
if v > 0 {
|
if v > 0 {
|
||||||
n := float32(v)
|
n := float32(v)
|
||||||
|
@ -275,7 +274,6 @@ func (t *tokens) AddMatch(xlength uint32, xoffset uint32) {
|
||||||
}
|
}
|
||||||
oCode := offsetCode(xoffset)
|
oCode := offsetCode(xoffset)
|
||||||
xoffset |= oCode << 16
|
xoffset |= oCode << 16
|
||||||
t.nLits++
|
|
||||||
|
|
||||||
t.extraHist[lengthCodes1[uint8(xlength)]]++
|
t.extraHist[lengthCodes1[uint8(xlength)]]++
|
||||||
t.offHist[oCode]++
|
t.offHist[oCode]++
|
||||||
|
@ -301,7 +299,6 @@ func (t *tokens) AddMatchLong(xlength int32, xoffset uint32) {
|
||||||
}
|
}
|
||||||
xlength -= xl
|
xlength -= xl
|
||||||
xl -= baseMatchLength
|
xl -= baseMatchLength
|
||||||
t.nLits++
|
|
||||||
t.extraHist[lengthCodes1[uint8(xl)]]++
|
t.extraHist[lengthCodes1[uint8(xl)]]++
|
||||||
t.offHist[oc]++
|
t.offHist[oc]++
|
||||||
t.tokens[t.n] = token(matchType | uint32(xl)<<lengthShift | xoffset)
|
t.tokens[t.n] = token(matchType | uint32(xl)<<lengthShift | xoffset)
|
||||||
|
|
24
vendor/github.com/klauspost/compress/zstd/blockdec.go
generated
vendored
24
vendor/github.com/klauspost/compress/zstd/blockdec.go
generated
vendored
|
@ -76,12 +76,11 @@ type blockDec struct {
|
||||||
// Window size of the block.
|
// Window size of the block.
|
||||||
WindowSize uint64
|
WindowSize uint64
|
||||||
|
|
||||||
history chan *history
|
history chan *history
|
||||||
input chan struct{}
|
input chan struct{}
|
||||||
result chan decodeOutput
|
result chan decodeOutput
|
||||||
sequenceBuf []seq
|
err error
|
||||||
err error
|
decWG sync.WaitGroup
|
||||||
decWG sync.WaitGroup
|
|
||||||
|
|
||||||
// Frame to use for singlethreaded decoding.
|
// Frame to use for singlethreaded decoding.
|
||||||
// Should not be used by the decoder itself since parent may be another frame.
|
// Should not be used by the decoder itself since parent may be another frame.
|
||||||
|
@ -512,18 +511,7 @@ func (b *blockDec) decodeCompressed(hist *history) error {
|
||||||
nSeqs = 0x7f00 + int(in[1]) + (int(in[2]) << 8)
|
nSeqs = 0x7f00 + int(in[1]) + (int(in[2]) << 8)
|
||||||
in = in[3:]
|
in = in[3:]
|
||||||
}
|
}
|
||||||
// Allocate sequences
|
|
||||||
if cap(b.sequenceBuf) < nSeqs {
|
|
||||||
if b.lowMem {
|
|
||||||
b.sequenceBuf = make([]seq, nSeqs)
|
|
||||||
} else {
|
|
||||||
// Allocate max
|
|
||||||
b.sequenceBuf = make([]seq, nSeqs, maxSequences)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// Reuse buffer
|
|
||||||
b.sequenceBuf = b.sequenceBuf[:nSeqs]
|
|
||||||
}
|
|
||||||
var seqs = &sequenceDecs{}
|
var seqs = &sequenceDecs{}
|
||||||
if nSeqs > 0 {
|
if nSeqs > 0 {
|
||||||
if len(in) < 1 {
|
if len(in) < 1 {
|
||||||
|
|
98
vendor/github.com/klauspost/compress/zstd/decodeheader.go
generated
vendored
98
vendor/github.com/klauspost/compress/zstd/decodeheader.go
generated
vendored
|
@ -5,6 +5,7 @@ package zstd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"encoding/binary"
|
||||||
"errors"
|
"errors"
|
||||||
"io"
|
"io"
|
||||||
)
|
)
|
||||||
|
@ -15,18 +16,50 @@ const HeaderMaxSize = 14 + 3
|
||||||
|
|
||||||
// Header contains information about the first frame and block within that.
|
// Header contains information about the first frame and block within that.
|
||||||
type Header struct {
|
type Header struct {
|
||||||
// Window Size the window of data to keep while decoding.
|
// SingleSegment specifies whether the data is to be decompressed into a
|
||||||
// Will only be set if HasFCS is false.
|
// single contiguous memory segment.
|
||||||
WindowSize uint64
|
// It implies that WindowSize is invalid and that FrameContentSize is valid.
|
||||||
|
SingleSegment bool
|
||||||
|
|
||||||
// Frame content size.
|
// WindowSize is the window of data to keep while decoding.
|
||||||
// Expected size of the entire frame.
|
// Will only be set if SingleSegment is false.
|
||||||
FrameContentSize uint64
|
WindowSize uint64
|
||||||
|
|
||||||
// Dictionary ID.
|
// Dictionary ID.
|
||||||
// If 0, no dictionary.
|
// If 0, no dictionary.
|
||||||
DictionaryID uint32
|
DictionaryID uint32
|
||||||
|
|
||||||
|
// HasFCS specifies whether FrameContentSize has a valid value.
|
||||||
|
HasFCS bool
|
||||||
|
|
||||||
|
// FrameContentSize is the expected uncompressed size of the entire frame.
|
||||||
|
FrameContentSize uint64
|
||||||
|
|
||||||
|
// Skippable will be true if the frame is meant to be skipped.
|
||||||
|
// This implies that FirstBlock.OK is false.
|
||||||
|
Skippable bool
|
||||||
|
|
||||||
|
// SkippableID is the user-specific ID for the skippable frame.
|
||||||
|
// Valid values are between 0 to 15, inclusive.
|
||||||
|
SkippableID int
|
||||||
|
|
||||||
|
// SkippableSize is the length of the user data to skip following
|
||||||
|
// the header.
|
||||||
|
SkippableSize uint32
|
||||||
|
|
||||||
|
// HeaderSize is the raw size of the frame header.
|
||||||
|
//
|
||||||
|
// For normal frames, it includes the size of the magic number and
|
||||||
|
// the size of the header (per section 3.1.1.1).
|
||||||
|
// It does not include the size for any data blocks (section 3.1.1.2) nor
|
||||||
|
// the size for the trailing content checksum.
|
||||||
|
//
|
||||||
|
// For skippable frames, this counts the size of the magic number
|
||||||
|
// along with the size of the size field of the payload.
|
||||||
|
// It does not include the size of the skippable payload itself.
|
||||||
|
// The total frame size is the HeaderSize plus the SkippableSize.
|
||||||
|
HeaderSize int
|
||||||
|
|
||||||
// First block information.
|
// First block information.
|
||||||
FirstBlock struct {
|
FirstBlock struct {
|
||||||
// OK will be set if first block could be decoded.
|
// OK will be set if first block could be decoded.
|
||||||
|
@ -51,17 +84,9 @@ type Header struct {
|
||||||
CompressedSize int
|
CompressedSize int
|
||||||
}
|
}
|
||||||
|
|
||||||
// Skippable will be true if the frame is meant to be skipped.
|
|
||||||
// No other information will be populated.
|
|
||||||
Skippable bool
|
|
||||||
|
|
||||||
// If set there is a checksum present for the block content.
|
// If set there is a checksum present for the block content.
|
||||||
|
// The checksum field at the end is always 4 bytes long.
|
||||||
HasCheckSum bool
|
HasCheckSum bool
|
||||||
|
|
||||||
// If this is true FrameContentSize will have a valid value
|
|
||||||
HasFCS bool
|
|
||||||
|
|
||||||
SingleSegment bool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Decode the header from the beginning of the stream.
|
// Decode the header from the beginning of the stream.
|
||||||
|
@ -71,39 +96,46 @@ type Header struct {
|
||||||
// If there isn't enough input, io.ErrUnexpectedEOF is returned.
|
// If there isn't enough input, io.ErrUnexpectedEOF is returned.
|
||||||
// The FirstBlock.OK will indicate if enough information was available to decode the first block header.
|
// The FirstBlock.OK will indicate if enough information was available to decode the first block header.
|
||||||
func (h *Header) Decode(in []byte) error {
|
func (h *Header) Decode(in []byte) error {
|
||||||
|
*h = Header{}
|
||||||
if len(in) < 4 {
|
if len(in) < 4 {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
|
h.HeaderSize += 4
|
||||||
b, in := in[:4], in[4:]
|
b, in := in[:4], in[4:]
|
||||||
if !bytes.Equal(b, frameMagic) {
|
if !bytes.Equal(b, frameMagic) {
|
||||||
if !bytes.Equal(b[1:4], skippableFrameMagic) || b[0]&0xf0 != 0x50 {
|
if !bytes.Equal(b[1:4], skippableFrameMagic) || b[0]&0xf0 != 0x50 {
|
||||||
return ErrMagicMismatch
|
return ErrMagicMismatch
|
||||||
}
|
}
|
||||||
*h = Header{Skippable: true}
|
if len(in) < 4 {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
h.HeaderSize += 4
|
||||||
|
h.Skippable = true
|
||||||
|
h.SkippableID = int(b[0] & 0xf)
|
||||||
|
h.SkippableSize = binary.LittleEndian.Uint32(in)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if len(in) < 1 {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
|
|
||||||
// Clear output
|
|
||||||
*h = Header{}
|
|
||||||
fhd, in := in[0], in[1:]
|
|
||||||
h.SingleSegment = fhd&(1<<5) != 0
|
|
||||||
h.HasCheckSum = fhd&(1<<2) != 0
|
|
||||||
|
|
||||||
if fhd&(1<<3) != 0 {
|
|
||||||
return errors.New("reserved bit set on frame header")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read Window_Descriptor
|
// Read Window_Descriptor
|
||||||
// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor
|
// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor
|
||||||
|
if len(in) < 1 {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
fhd, in := in[0], in[1:]
|
||||||
|
h.HeaderSize++
|
||||||
|
h.SingleSegment = fhd&(1<<5) != 0
|
||||||
|
h.HasCheckSum = fhd&(1<<2) != 0
|
||||||
|
if fhd&(1<<3) != 0 {
|
||||||
|
return errors.New("reserved bit set on frame header")
|
||||||
|
}
|
||||||
|
|
||||||
if !h.SingleSegment {
|
if !h.SingleSegment {
|
||||||
if len(in) < 1 {
|
if len(in) < 1 {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
var wd byte
|
var wd byte
|
||||||
wd, in = in[0], in[1:]
|
wd, in = in[0], in[1:]
|
||||||
|
h.HeaderSize++
|
||||||
windowLog := 10 + (wd >> 3)
|
windowLog := 10 + (wd >> 3)
|
||||||
windowBase := uint64(1) << windowLog
|
windowBase := uint64(1) << windowLog
|
||||||
windowAdd := (windowBase / 8) * uint64(wd&0x7)
|
windowAdd := (windowBase / 8) * uint64(wd&0x7)
|
||||||
|
@ -120,9 +152,7 @@ func (h *Header) Decode(in []byte) error {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b, in = in[:size], in[size:]
|
b, in = in[:size], in[size:]
|
||||||
if b == nil {
|
h.HeaderSize += int(size)
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
switch size {
|
switch size {
|
||||||
case 1:
|
case 1:
|
||||||
h.DictionaryID = uint32(b[0])
|
h.DictionaryID = uint32(b[0])
|
||||||
|
@ -152,9 +182,7 @@ func (h *Header) Decode(in []byte) error {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
b, in = in[:fcsSize], in[fcsSize:]
|
b, in = in[:fcsSize], in[fcsSize:]
|
||||||
if b == nil {
|
h.HeaderSize += int(fcsSize)
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
switch fcsSize {
|
switch fcsSize {
|
||||||
case 1:
|
case 1:
|
||||||
h.FrameContentSize = uint64(b[0])
|
h.FrameContentSize = uint64(b[0])
|
||||||
|
|
10
vendor/github.com/klauspost/compress/zstd/encoder_options.go
generated
vendored
10
vendor/github.com/klauspost/compress/zstd/encoder_options.go
generated
vendored
|
@ -24,6 +24,7 @@ type encoderOptions struct {
|
||||||
allLitEntropy bool
|
allLitEntropy bool
|
||||||
customWindow bool
|
customWindow bool
|
||||||
customALEntropy bool
|
customALEntropy bool
|
||||||
|
customBlockSize bool
|
||||||
lowMem bool
|
lowMem bool
|
||||||
dict *dict
|
dict *dict
|
||||||
}
|
}
|
||||||
|
@ -33,7 +34,7 @@ func (o *encoderOptions) setDefault() {
|
||||||
concurrent: runtime.GOMAXPROCS(0),
|
concurrent: runtime.GOMAXPROCS(0),
|
||||||
crc: true,
|
crc: true,
|
||||||
single: nil,
|
single: nil,
|
||||||
blockSize: 1 << 16,
|
blockSize: maxCompressedBlockSize,
|
||||||
windowSize: 8 << 20,
|
windowSize: 8 << 20,
|
||||||
level: SpeedDefault,
|
level: SpeedDefault,
|
||||||
allLitEntropy: true,
|
allLitEntropy: true,
|
||||||
|
@ -106,6 +107,7 @@ func WithWindowSize(n int) EOption {
|
||||||
o.customWindow = true
|
o.customWindow = true
|
||||||
if o.blockSize > o.windowSize {
|
if o.blockSize > o.windowSize {
|
||||||
o.blockSize = o.windowSize
|
o.blockSize = o.windowSize
|
||||||
|
o.customBlockSize = true
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -188,10 +190,9 @@ func EncoderLevelFromZstd(level int) EncoderLevel {
|
||||||
return SpeedDefault
|
return SpeedDefault
|
||||||
case level >= 6 && level < 10:
|
case level >= 6 && level < 10:
|
||||||
return SpeedBetterCompression
|
return SpeedBetterCompression
|
||||||
case level >= 10:
|
default:
|
||||||
return SpeedBestCompression
|
return SpeedBestCompression
|
||||||
}
|
}
|
||||||
return SpeedDefault
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// String provides a string representation of the compression level.
|
// String provides a string representation of the compression level.
|
||||||
|
@ -222,6 +223,9 @@ func WithEncoderLevel(l EncoderLevel) EOption {
|
||||||
switch o.level {
|
switch o.level {
|
||||||
case SpeedFastest:
|
case SpeedFastest:
|
||||||
o.windowSize = 4 << 20
|
o.windowSize = 4 << 20
|
||||||
|
if !o.customBlockSize {
|
||||||
|
o.blockSize = 1 << 16
|
||||||
|
}
|
||||||
case SpeedDefault:
|
case SpeedDefault:
|
||||||
o.windowSize = 8 << 20
|
o.windowSize = 8 << 20
|
||||||
case SpeedBetterCompression:
|
case SpeedBetterCompression:
|
||||||
|
|
1
vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s
generated
vendored
1
vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s
generated
vendored
|
@ -1,6 +1,7 @@
|
||||||
// +build !appengine
|
// +build !appengine
|
||||||
// +build gc
|
// +build gc
|
||||||
// +build !purego
|
// +build !purego
|
||||||
|
// +build !noasm
|
||||||
|
|
||||||
#include "textflag.h"
|
#include "textflag.h"
|
||||||
|
|
||||||
|
|
81
vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s
generated
vendored
81
vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s
generated
vendored
|
@ -1,13 +1,13 @@
|
||||||
// +build gc,!purego
|
// +build gc,!purego,!noasm
|
||||||
|
|
||||||
#include "textflag.h"
|
#include "textflag.h"
|
||||||
|
|
||||||
// Register allocation.
|
// Register allocation.
|
||||||
#define digest R1
|
#define digest R1
|
||||||
#define h R2 // Return value.
|
#define h R2 // Return value.
|
||||||
#define p R3 // Input pointer.
|
#define p R3 // Input pointer.
|
||||||
#define len R4
|
#define len R4
|
||||||
#define nblocks R5 // len / 32.
|
#define nblocks R5 // len / 32.
|
||||||
#define prime1 R7
|
#define prime1 R7
|
||||||
#define prime2 R8
|
#define prime2 R8
|
||||||
#define prime3 R9
|
#define prime3 R9
|
||||||
|
@ -22,50 +22,48 @@
|
||||||
#define x3 R22
|
#define x3 R22
|
||||||
#define x4 R23
|
#define x4 R23
|
||||||
|
|
||||||
#define round(acc, x) \
|
#define round(acc, x) \
|
||||||
MADD prime2, acc, x, acc \
|
MADD prime2, acc, x, acc \
|
||||||
ROR $64-31, acc \
|
ROR $64-31, acc \
|
||||||
MUL prime1, acc \
|
MUL prime1, acc \
|
||||||
|
|
||||||
// x = round(0, x).
|
// x = round(0, x).
|
||||||
#define round0(x) \
|
#define round0(x) \
|
||||||
MUL prime2, x \
|
MUL prime2, x \
|
||||||
ROR $64-31, x \
|
ROR $64-31, x \
|
||||||
MUL prime1, x \
|
MUL prime1, x \
|
||||||
|
|
||||||
#define mergeRound(x) \
|
#define mergeRound(x) \
|
||||||
round0(x) \
|
round0(x) \
|
||||||
EOR x, h \
|
EOR x, h \
|
||||||
MADD h, prime4, prime1, h \
|
MADD h, prime4, prime1, h \
|
||||||
|
|
||||||
// Update v[1-4] with 32-byte blocks. Assumes len >= 32.
|
// Update v[1-4] with 32-byte blocks. Assumes len >= 32.
|
||||||
#define blocksLoop() \
|
#define blocksLoop() \
|
||||||
LSR $5, len, nblocks \
|
LSR $5, len, nblocks \
|
||||||
PCALIGN $16 \
|
PCALIGN $16 \
|
||||||
loop: \
|
loop: \
|
||||||
LDP.P 32(p), (x1, x2) \
|
LDP.P 32(p), (x1, x2) \
|
||||||
round(v1, x1) \
|
round(v1, x1) \
|
||||||
LDP -16(p), (x3, x4) \
|
LDP -16(p), (x3, x4) \
|
||||||
round(v2, x2) \
|
round(v2, x2) \
|
||||||
SUB $1, nblocks \
|
SUB $1, nblocks \
|
||||||
round(v3, x3) \
|
round(v3, x3) \
|
||||||
round(v4, x4) \
|
round(v4, x4) \
|
||||||
CBNZ nblocks, loop \
|
CBNZ nblocks, loop \
|
||||||
|
|
||||||
|
|
||||||
// The primes are repeated here to ensure that they're stored
|
// The primes are repeated here to ensure that they're stored
|
||||||
// in a contiguous array, so we can load them with LDP.
|
// in a contiguous array, so we can load them with LDP.
|
||||||
DATA primes<> +0(SB)/8, $11400714785074694791
|
DATA primes<> +0(SB)/8, $11400714785074694791
|
||||||
DATA primes<> +8(SB)/8, $14029467366897019727
|
DATA primes<> +8(SB)/8, $14029467366897019727
|
||||||
DATA primes<>+16(SB)/8, $1609587929392839161
|
DATA primes<>+16(SB)/8, $1609587929392839161
|
||||||
DATA primes<>+24(SB)/8, $9650029242287828579
|
DATA primes<>+24(SB)/8, $9650029242287828579
|
||||||
DATA primes<>+32(SB)/8, $2870177450012600261
|
DATA primes<>+32(SB)/8, $2870177450012600261
|
||||||
GLOBL primes<>(SB), NOPTR+RODATA, $40
|
GLOBL primes<>(SB), NOPTR+RODATA, $40
|
||||||
|
|
||||||
|
|
||||||
// func Sum64(b []byte) uint64
|
// func Sum64(b []byte) uint64
|
||||||
TEXT ·Sum64(SB), NOFRAME+NOSPLIT, $0-32
|
TEXT ·Sum64(SB), NOFRAME+NOSPLIT, $0-32
|
||||||
LDP b_base+0(FP), (p, len)
|
LDP b_base+0(FP), (p, len)
|
||||||
|
|
||||||
LDP primes<> +0(SB), (prime1, prime2)
|
LDP primes<> +0(SB), (prime1, prime2)
|
||||||
LDP primes<>+16(SB), (prime3, prime4)
|
LDP primes<>+16(SB), (prime3, prime4)
|
||||||
|
@ -156,24 +154,23 @@ try1:
|
||||||
|
|
||||||
end:
|
end:
|
||||||
EOR h >> 33, h
|
EOR h >> 33, h
|
||||||
MUL prime2, h
|
MUL prime2, h
|
||||||
EOR h >> 29, h
|
EOR h >> 29, h
|
||||||
MUL prime3, h
|
MUL prime3, h
|
||||||
EOR h >> 32, h
|
EOR h >> 32, h
|
||||||
|
|
||||||
MOVD h, ret+24(FP)
|
MOVD h, ret+24(FP)
|
||||||
RET
|
RET
|
||||||
|
|
||||||
|
|
||||||
// func writeBlocks(d *Digest, b []byte) int
|
// func writeBlocks(d *Digest, b []byte) int
|
||||||
//
|
//
|
||||||
// Assumes len(b) >= 32.
|
// Assumes len(b) >= 32.
|
||||||
TEXT ·writeBlocks(SB), NOFRAME+NOSPLIT, $0-40
|
TEXT ·writeBlocks(SB), NOFRAME+NOSPLIT, $0-40
|
||||||
LDP primes<>(SB), (prime1, prime2)
|
LDP primes<>(SB), (prime1, prime2)
|
||||||
|
|
||||||
// Load state. Assume v[1-4] are stored contiguously.
|
// Load state. Assume v[1-4] are stored contiguously.
|
||||||
MOVD d+0(FP), digest
|
MOVD d+0(FP), digest
|
||||||
LDP 0(digest), (v1, v2)
|
LDP 0(digest), (v1, v2)
|
||||||
LDP 16(digest), (v3, v4)
|
LDP 16(digest), (v3, v4)
|
||||||
|
|
||||||
LDP b_base+8(FP), (p, len)
|
LDP b_base+8(FP), (p, len)
|
||||||
|
@ -181,7 +178,7 @@ TEXT ·writeBlocks(SB), NOFRAME+NOSPLIT, $0-40
|
||||||
blocksLoop()
|
blocksLoop()
|
||||||
|
|
||||||
// Store updated state.
|
// Store updated state.
|
||||||
STP (v1, v2), 0(digest)
|
STP (v1, v2), 0(digest)
|
||||||
STP (v3, v4), 16(digest)
|
STP (v3, v4), 16(digest)
|
||||||
|
|
||||||
BIC $31, len
|
BIC $31, len
|
||||||
|
|
3
vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go
generated
vendored
3
vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go
generated
vendored
|
@ -1,8 +1,9 @@
|
||||||
//go:build (amd64 || arm64) && !appengine && gc && !purego
|
//go:build (amd64 || arm64) && !appengine && gc && !purego && !noasm
|
||||||
// +build amd64 arm64
|
// +build amd64 arm64
|
||||||
// +build !appengine
|
// +build !appengine
|
||||||
// +build gc
|
// +build gc
|
||||||
// +build !purego
|
// +build !purego
|
||||||
|
// +build !noasm
|
||||||
|
|
||||||
package xxhash
|
package xxhash
|
||||||
|
|
||||||
|
|
4
vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go
generated
vendored
4
vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go
generated
vendored
|
@ -1,5 +1,5 @@
|
||||||
//go:build (!amd64 && !arm64) || appengine || !gc || purego
|
//go:build (!amd64 && !arm64) || appengine || !gc || purego || noasm
|
||||||
// +build !amd64,!arm64 appengine !gc purego
|
// +build !amd64,!arm64 appengine !gc purego noasm
|
||||||
|
|
||||||
package xxhash
|
package xxhash
|
||||||
|
|
||||||
|
|
2268
vendor/google.golang.org/genproto/googleapis/storage/v2/storage.pb.go
generated
vendored
2268
vendor/google.golang.org/genproto/googleapis/storage/v2/storage.pb.go
generated
vendored
File diff suppressed because it is too large
Load diff
4
vendor/google.golang.org/grpc/attributes/attributes.go
generated
vendored
4
vendor/google.golang.org/grpc/attributes/attributes.go
generated
vendored
|
@ -69,7 +69,9 @@ func (a *Attributes) Value(key interface{}) interface{} {
|
||||||
// bool' is implemented for a value in the attributes, it is called to
|
// bool' is implemented for a value in the attributes, it is called to
|
||||||
// determine if the value matches the one stored in the other attributes. If
|
// determine if the value matches the one stored in the other attributes. If
|
||||||
// Equal is not implemented, standard equality is used to determine if the two
|
// Equal is not implemented, standard equality is used to determine if the two
|
||||||
// values are equal.
|
// values are equal. Note that some types (e.g. maps) aren't comparable by
|
||||||
|
// default, so they must be wrapped in a struct, or in an alias type, with Equal
|
||||||
|
// defined.
|
||||||
func (a *Attributes) Equal(o *Attributes) bool {
|
func (a *Attributes) Equal(o *Attributes) bool {
|
||||||
if a == nil && o == nil {
|
if a == nil && o == nil {
|
||||||
return true
|
return true
|
||||||
|
|
2
vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go
generated
vendored
2
vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go
generated
vendored
|
@ -1,6 +1,6 @@
|
||||||
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
|
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
|
||||||
// versions:
|
// versions:
|
||||||
// - protoc-gen-go-grpc v1.1.0
|
// - protoc-gen-go-grpc v1.2.0
|
||||||
// - protoc v3.14.0
|
// - protoc v3.14.0
|
||||||
// source: grpc/lb/v1/load_balancer.proto
|
// source: grpc/lb/v1/load_balancer.proto
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
|
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
|
||||||
// versions:
|
// versions:
|
||||||
// - protoc-gen-go-grpc v1.1.0
|
// - protoc-gen-go-grpc v1.2.0
|
||||||
// - protoc v3.14.0
|
// - protoc v3.14.0
|
||||||
// source: grpc/gcp/handshaker.proto
|
// source: grpc/gcp/handshaker.proto
|
||||||
|
|
||||||
|
|
5
vendor/google.golang.org/grpc/credentials/insecure/insecure.go
generated
vendored
5
vendor/google.golang.org/grpc/credentials/insecure/insecure.go
generated
vendored
|
@ -18,11 +18,6 @@
|
||||||
|
|
||||||
// Package insecure provides an implementation of the
|
// Package insecure provides an implementation of the
|
||||||
// credentials.TransportCredentials interface which disables transport security.
|
// credentials.TransportCredentials interface which disables transport security.
|
||||||
//
|
|
||||||
// Experimental
|
|
||||||
//
|
|
||||||
// Notice: This package is EXPERIMENTAL and may be changed or removed in a
|
|
||||||
// later release.
|
|
||||||
package insecure
|
package insecure
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
|
4
vendor/google.golang.org/grpc/dialoptions.go
generated
vendored
4
vendor/google.golang.org/grpc/dialoptions.go
generated
vendored
|
@ -272,7 +272,7 @@ func withBackoff(bs internalbackoff.Strategy) DialOption {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// WithBlock returns a DialOption which makes caller of Dial blocks until the
|
// WithBlock returns a DialOption which makes callers of Dial block until the
|
||||||
// underlying connection is up. Without this, Dial returns immediately and
|
// underlying connection is up. Without this, Dial returns immediately and
|
||||||
// connecting the server happens in background.
|
// connecting the server happens in background.
|
||||||
func WithBlock() DialOption {
|
func WithBlock() DialOption {
|
||||||
|
@ -304,7 +304,7 @@ func WithReturnConnectionError() DialOption {
|
||||||
// WithCredentialsBundle or WithPerRPCCredentials) which require transport
|
// WithCredentialsBundle or WithPerRPCCredentials) which require transport
|
||||||
// security is incompatible and will cause grpc.Dial() to fail.
|
// security is incompatible and will cause grpc.Dial() to fail.
|
||||||
//
|
//
|
||||||
// Deprecated: use insecure.NewCredentials() instead.
|
// Deprecated: use WithTransportCredentials and insecure.NewCredentials() instead.
|
||||||
// Will be supported throughout 1.x.
|
// Will be supported throughout 1.x.
|
||||||
func WithInsecure() DialOption {
|
func WithInsecure() DialOption {
|
||||||
return newFuncDialOption(func(o *dialOptions) {
|
return newFuncDialOption(func(o *dialOptions) {
|
||||||
|
|
8
vendor/google.golang.org/grpc/grpclog/loggerv2.go
generated
vendored
8
vendor/google.golang.org/grpc/grpclog/loggerv2.go
generated
vendored
|
@ -248,12 +248,12 @@ func (g *loggerT) V(l int) bool {
|
||||||
// later release.
|
// later release.
|
||||||
type DepthLoggerV2 interface {
|
type DepthLoggerV2 interface {
|
||||||
LoggerV2
|
LoggerV2
|
||||||
// InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Print.
|
// InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println.
|
||||||
InfoDepth(depth int, args ...interface{})
|
InfoDepth(depth int, args ...interface{})
|
||||||
// WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Print.
|
// WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println.
|
||||||
WarningDepth(depth int, args ...interface{})
|
WarningDepth(depth int, args ...interface{})
|
||||||
// ErrorDetph logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Print.
|
// ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println.
|
||||||
ErrorDepth(depth int, args ...interface{})
|
ErrorDepth(depth int, args ...interface{})
|
||||||
// FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Print.
|
// FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println.
|
||||||
FatalDepth(depth int, args ...interface{})
|
FatalDepth(depth int, args ...interface{})
|
||||||
}
|
}
|
||||||
|
|
7
vendor/google.golang.org/grpc/internal/envconfig/xds.go
generated
vendored
7
vendor/google.golang.org/grpc/internal/envconfig/xds.go
generated
vendored
|
@ -42,6 +42,7 @@ const (
|
||||||
aggregateAndDNSSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER"
|
aggregateAndDNSSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER"
|
||||||
rbacSupportEnv = "GRPC_XDS_EXPERIMENTAL_RBAC"
|
rbacSupportEnv = "GRPC_XDS_EXPERIMENTAL_RBAC"
|
||||||
federationEnv = "GRPC_EXPERIMENTAL_XDS_FEDERATION"
|
federationEnv = "GRPC_EXPERIMENTAL_XDS_FEDERATION"
|
||||||
|
rlsInXDSEnv = "GRPC_EXPERIMENTAL_XDS_RLS_LB"
|
||||||
|
|
||||||
c2pResolverTestOnlyTrafficDirectorURIEnv = "GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI"
|
c2pResolverTestOnlyTrafficDirectorURIEnv = "GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI"
|
||||||
)
|
)
|
||||||
|
@ -85,6 +86,12 @@ var (
|
||||||
// XDSFederation indicates whether federation support is enabled.
|
// XDSFederation indicates whether federation support is enabled.
|
||||||
XDSFederation = strings.EqualFold(os.Getenv(federationEnv), "true")
|
XDSFederation = strings.EqualFold(os.Getenv(federationEnv), "true")
|
||||||
|
|
||||||
|
// XDSRLS indicates whether processing of Cluster Specifier plugins and
|
||||||
|
// support for the RLS CLuster Specifier is enabled, which can be enabled by
|
||||||
|
// setting the environment variable "GRPC_EXPERIMENTAL_XDS_RLS_LB" to
|
||||||
|
// "true".
|
||||||
|
XDSRLS = strings.EqualFold(os.Getenv(rlsInXDSEnv), "true")
|
||||||
|
|
||||||
// C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing.
|
// C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing.
|
||||||
C2PResolverTestOnlyTrafficDirectorURI = os.Getenv(c2pResolverTestOnlyTrafficDirectorURIEnv)
|
C2PResolverTestOnlyTrafficDirectorURI = os.Getenv(c2pResolverTestOnlyTrafficDirectorURIEnv)
|
||||||
)
|
)
|
||||||
|
|
8
vendor/google.golang.org/grpc/internal/grpclog/grpclog.go
generated
vendored
8
vendor/google.golang.org/grpc/internal/grpclog/grpclog.go
generated
vendored
|
@ -115,12 +115,12 @@ type LoggerV2 interface {
|
||||||
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
||||||
// later release.
|
// later release.
|
||||||
type DepthLoggerV2 interface {
|
type DepthLoggerV2 interface {
|
||||||
// InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Print.
|
// InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println.
|
||||||
InfoDepth(depth int, args ...interface{})
|
InfoDepth(depth int, args ...interface{})
|
||||||
// WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Print.
|
// WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println.
|
||||||
WarningDepth(depth int, args ...interface{})
|
WarningDepth(depth int, args ...interface{})
|
||||||
// ErrorDetph logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Print.
|
// ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println.
|
||||||
ErrorDepth(depth int, args ...interface{})
|
ErrorDepth(depth int, args ...interface{})
|
||||||
// FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Print.
|
// FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println.
|
||||||
FatalDepth(depth int, args ...interface{})
|
FatalDepth(depth int, args ...interface{})
|
||||||
}
|
}
|
||||||
|
|
11
vendor/google.golang.org/grpc/internal/grpcutil/regex.go
generated
vendored
11
vendor/google.golang.org/grpc/internal/grpcutil/regex.go
generated
vendored
|
@ -20,9 +20,12 @@ package grpcutil
|
||||||
|
|
||||||
import "regexp"
|
import "regexp"
|
||||||
|
|
||||||
// FullMatchWithRegex returns whether the full string matches the regex provided.
|
// FullMatchWithRegex returns whether the full text matches the regex provided.
|
||||||
func FullMatchWithRegex(re *regexp.Regexp, string string) bool {
|
func FullMatchWithRegex(re *regexp.Regexp, text string) bool {
|
||||||
|
if len(text) == 0 {
|
||||||
|
return re.MatchString(text)
|
||||||
|
}
|
||||||
re.Longest()
|
re.Longest()
|
||||||
rem := re.FindString(string)
|
rem := re.FindString(text)
|
||||||
return len(rem) == len(string)
|
return len(rem) == len(text)
|
||||||
}
|
}
|
||||||
|
|
18
vendor/google.golang.org/grpc/regenerate.sh
generated
vendored
18
vendor/google.golang.org/grpc/regenerate.sh
generated
vendored
|
@ -76,7 +76,21 @@ SOURCES=(
|
||||||
# These options of the form 'Mfoo.proto=bar' instruct the codegen to use an
|
# These options of the form 'Mfoo.proto=bar' instruct the codegen to use an
|
||||||
# import path of 'bar' in the generated code when 'foo.proto' is imported in
|
# import path of 'bar' in the generated code when 'foo.proto' is imported in
|
||||||
# one of the sources.
|
# one of the sources.
|
||||||
OPTS=Mgrpc/service_config/service_config.proto=/internal/proto/grpc_service_config,Mgrpc/core/stats.proto=google.golang.org/grpc/interop/grpc_testing/core
|
#
|
||||||
|
# Note that the protos listed here are all for testing purposes. All protos to
|
||||||
|
# be used externally should have a go_package option (and they don't need to be
|
||||||
|
# listed here).
|
||||||
|
OPTS=Mgrpc/service_config/service_config.proto=/internal/proto/grpc_service_config,\
|
||||||
|
Mgrpc/core/stats.proto=google.golang.org/grpc/interop/grpc_testing/core,\
|
||||||
|
Mgrpc/testing/benchmark_service.proto=google.golang.org/grpc/interop/grpc_testing,\
|
||||||
|
Mgrpc/testing/stats.proto=google.golang.org/grpc/interop/grpc_testing,\
|
||||||
|
Mgrpc/testing/report_qps_scenario_service.proto=google.golang.org/grpc/interop/grpc_testing,\
|
||||||
|
Mgrpc/testing/messages.proto=google.golang.org/grpc/interop/grpc_testing,\
|
||||||
|
Mgrpc/testing/worker_service.proto=google.golang.org/grpc/interop/grpc_testing,\
|
||||||
|
Mgrpc/testing/control.proto=google.golang.org/grpc/interop/grpc_testing,\
|
||||||
|
Mgrpc/testing/test.proto=google.golang.org/grpc/interop/grpc_testing,\
|
||||||
|
Mgrpc/testing/payloads.proto=google.golang.org/grpc/interop/grpc_testing,\
|
||||||
|
Mgrpc/testing/empty.proto=google.golang.org/grpc/interop/grpc_testing
|
||||||
|
|
||||||
for src in ${SOURCES[@]}; do
|
for src in ${SOURCES[@]}; do
|
||||||
echo "protoc ${src}"
|
echo "protoc ${src}"
|
||||||
|
@ -85,7 +99,6 @@ for src in ${SOURCES[@]}; do
|
||||||
-I${WORKDIR}/grpc-proto \
|
-I${WORKDIR}/grpc-proto \
|
||||||
-I${WORKDIR}/googleapis \
|
-I${WORKDIR}/googleapis \
|
||||||
-I${WORKDIR}/protobuf/src \
|
-I${WORKDIR}/protobuf/src \
|
||||||
-I${WORKDIR}/istio \
|
|
||||||
${src}
|
${src}
|
||||||
done
|
done
|
||||||
|
|
||||||
|
@ -96,7 +109,6 @@ for src in ${LEGACY_SOURCES[@]}; do
|
||||||
-I${WORKDIR}/grpc-proto \
|
-I${WORKDIR}/grpc-proto \
|
||||||
-I${WORKDIR}/googleapis \
|
-I${WORKDIR}/googleapis \
|
||||||
-I${WORKDIR}/protobuf/src \
|
-I${WORKDIR}/protobuf/src \
|
||||||
-I${WORKDIR}/istio \
|
|
||||||
${src}
|
${src}
|
||||||
done
|
done
|
||||||
|
|
||||||
|
|
2
vendor/google.golang.org/grpc/version.go
generated
vendored
2
vendor/google.golang.org/grpc/version.go
generated
vendored
|
@ -19,4 +19,4 @@
|
||||||
package grpc
|
package grpc
|
||||||
|
|
||||||
// Version is the current grpc version.
|
// Version is the current grpc version.
|
||||||
const Version = "1.43.0"
|
const Version = "1.44.1-dev"
|
||||||
|
|
12
vendor/modules.txt
vendored
12
vendor/modules.txt
vendored
|
@ -11,7 +11,7 @@ cloud.google.com/go/compute/metadata
|
||||||
# cloud.google.com/go/iam v0.1.1
|
# cloud.google.com/go/iam v0.1.1
|
||||||
## explicit; go 1.11
|
## explicit; go 1.11
|
||||||
cloud.google.com/go/iam
|
cloud.google.com/go/iam
|
||||||
# cloud.google.com/go/storage v1.18.2
|
# cloud.google.com/go/storage v1.19.0
|
||||||
## explicit; go 1.11
|
## explicit; go 1.11
|
||||||
cloud.google.com/go/storage
|
cloud.google.com/go/storage
|
||||||
cloud.google.com/go/storage/internal/apiv2
|
cloud.google.com/go/storage/internal/apiv2
|
||||||
|
@ -33,7 +33,7 @@ github.com/VictoriaMetrics/metricsql/binaryop
|
||||||
# github.com/VividCortex/ewma v1.2.0
|
# github.com/VividCortex/ewma v1.2.0
|
||||||
## explicit; go 1.12
|
## explicit; go 1.12
|
||||||
github.com/VividCortex/ewma
|
github.com/VividCortex/ewma
|
||||||
# github.com/aws/aws-sdk-go v1.42.39
|
# github.com/aws/aws-sdk-go v1.42.42
|
||||||
## explicit; go 1.11
|
## explicit; go 1.11
|
||||||
github.com/aws/aws-sdk-go/aws
|
github.com/aws/aws-sdk-go/aws
|
||||||
github.com/aws/aws-sdk-go/aws/arn
|
github.com/aws/aws-sdk-go/aws/arn
|
||||||
|
@ -146,7 +146,7 @@ github.com/influxdata/influxdb/pkg/escape
|
||||||
# github.com/jmespath/go-jmespath v0.4.0
|
# github.com/jmespath/go-jmespath v0.4.0
|
||||||
## explicit; go 1.14
|
## explicit; go 1.14
|
||||||
github.com/jmespath/go-jmespath
|
github.com/jmespath/go-jmespath
|
||||||
# github.com/klauspost/compress v1.14.1
|
# github.com/klauspost/compress v1.14.2
|
||||||
## explicit; go 1.15
|
## explicit; go 1.15
|
||||||
github.com/klauspost/compress
|
github.com/klauspost/compress
|
||||||
github.com/klauspost/compress/flate
|
github.com/klauspost/compress/flate
|
||||||
|
@ -264,7 +264,7 @@ go.opencensus.io/trace/tracestate
|
||||||
go.uber.org/atomic
|
go.uber.org/atomic
|
||||||
# go.uber.org/goleak v1.1.11-0.20210813005559-691160354723
|
# go.uber.org/goleak v1.1.11-0.20210813005559-691160354723
|
||||||
## explicit; go 1.13
|
## explicit; go 1.13
|
||||||
# golang.org/x/net v0.0.0-20220121210141-e204ce36a2ba
|
# golang.org/x/net v0.0.0-20220127074510-2fabfed7e28f
|
||||||
## explicit; go 1.17
|
## explicit; go 1.17
|
||||||
golang.org/x/net/context
|
golang.org/x/net/context
|
||||||
golang.org/x/net/context/ctxhttp
|
golang.org/x/net/context/ctxhttp
|
||||||
|
@ -337,7 +337,7 @@ google.golang.org/appengine/internal/socket
|
||||||
google.golang.org/appengine/internal/urlfetch
|
google.golang.org/appengine/internal/urlfetch
|
||||||
google.golang.org/appengine/socket
|
google.golang.org/appengine/socket
|
||||||
google.golang.org/appengine/urlfetch
|
google.golang.org/appengine/urlfetch
|
||||||
# google.golang.org/genproto v0.0.0-20220118154757-00ab72f36ad5
|
# google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350
|
||||||
## explicit; go 1.11
|
## explicit; go 1.11
|
||||||
google.golang.org/genproto/googleapis/api/annotations
|
google.golang.org/genproto/googleapis/api/annotations
|
||||||
google.golang.org/genproto/googleapis/iam/v1
|
google.golang.org/genproto/googleapis/iam/v1
|
||||||
|
@ -347,7 +347,7 @@ google.golang.org/genproto/googleapis/rpc/status
|
||||||
google.golang.org/genproto/googleapis/storage/v2
|
google.golang.org/genproto/googleapis/storage/v2
|
||||||
google.golang.org/genproto/googleapis/type/date
|
google.golang.org/genproto/googleapis/type/date
|
||||||
google.golang.org/genproto/googleapis/type/expr
|
google.golang.org/genproto/googleapis/type/expr
|
||||||
# google.golang.org/grpc v1.43.0
|
# google.golang.org/grpc v1.44.0
|
||||||
## explicit; go 1.14
|
## explicit; go 1.14
|
||||||
google.golang.org/grpc
|
google.golang.org/grpc
|
||||||
google.golang.org/grpc/attributes
|
google.golang.org/grpc/attributes
|
||||||
|
|
Loading…
Reference in a new issue