mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2024-11-21 14:44:00 +00:00
vendor: make vendor-update
This commit is contained in:
parent
7ac3f19ee1
commit
6b54ad8cea
653 changed files with 58481 additions and 29808 deletions
74
go.mod
74
go.mod
|
@ -3,15 +3,15 @@ module github.com/VictoriaMetrics/VictoriaMetrics
|
|||
go 1.17
|
||||
|
||||
require (
|
||||
cloud.google.com/go/storage v1.23.0
|
||||
github.com/VictoriaMetrics/fastcache v1.10.0
|
||||
cloud.google.com/go/storage v1.28.0
|
||||
github.com/VictoriaMetrics/fastcache v1.12.0
|
||||
|
||||
// Do not use the original github.com/valyala/fasthttp because of issues
|
||||
// like https://github.com/valyala/fasthttp/commit/996610f021ff45fdc98c2ce7884d5fa4e7f9199b
|
||||
github.com/VictoriaMetrics/fasthttp v1.1.0
|
||||
github.com/VictoriaMetrics/metrics v1.18.1
|
||||
github.com/VictoriaMetrics/metricsql v0.44.1
|
||||
github.com/aws/aws-sdk-go v1.44.53
|
||||
github.com/VictoriaMetrics/metrics v1.23.0
|
||||
github.com/VictoriaMetrics/metricsql v0.45.0
|
||||
github.com/aws/aws-sdk-go v1.44.134
|
||||
github.com/cespare/xxhash/v2 v2.1.2
|
||||
|
||||
// TODO: switch back to https://github.com/cheggaaa/pb/v3 when v3-pooling branch
|
||||
|
@ -19,26 +19,27 @@ require (
|
|||
// See https://github.com/cheggaaa/pb/pull/192#issuecomment-1121285954 for details.
|
||||
github.com/dmitryk-dk/pb/v3 v3.0.9
|
||||
github.com/golang/snappy v0.0.4
|
||||
github.com/influxdata/influxdb v1.9.8
|
||||
github.com/klauspost/compress v1.15.8
|
||||
github.com/influxdata/influxdb v1.10.0
|
||||
github.com/klauspost/compress v1.15.12
|
||||
github.com/prometheus/prometheus v1.8.2-0.20201119142752-3ad25a6dc3d9
|
||||
github.com/urfave/cli/v2 v2.11.0
|
||||
github.com/urfave/cli/v2 v2.23.5
|
||||
github.com/valyala/fastjson v1.6.3
|
||||
github.com/valyala/fastrand v1.1.0
|
||||
github.com/valyala/fasttemplate v1.2.1
|
||||
github.com/valyala/fasttemplate v1.2.2
|
||||
github.com/valyala/gozstd v1.17.0
|
||||
github.com/valyala/quicktemplate v1.7.0
|
||||
golang.org/x/net v0.0.0-20220708220712-1185a9018129
|
||||
golang.org/x/oauth2 v0.0.0-20220630143837-2104d58473e0
|
||||
golang.org/x/sys v0.0.0-20220712014510-0a85c31ab51e
|
||||
google.golang.org/api v0.87.0
|
||||
golang.org/x/net v0.2.0
|
||||
golang.org/x/oauth2 v0.2.0
|
||||
golang.org/x/sys v0.2.0
|
||||
google.golang.org/api v0.103.0
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
)
|
||||
|
||||
require (
|
||||
cloud.google.com/go v0.103.0 // indirect
|
||||
cloud.google.com/go/compute v1.7.0 // indirect
|
||||
cloud.google.com/go/iam v0.3.0 // indirect
|
||||
cloud.google.com/go v0.106.0 // indirect
|
||||
cloud.google.com/go/compute v1.12.1 // indirect
|
||||
cloud.google.com/go/compute/metadata v0.2.1 // indirect
|
||||
cloud.google.com/go/iam v0.7.0 // indirect
|
||||
github.com/VividCortex/ewma v1.2.0 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
|
||||
|
@ -48,35 +49,34 @@ require (
|
|||
github.com/go-logfmt/logfmt v0.5.1 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/google/go-cmp v0.5.8 // indirect
|
||||
github.com/google/go-cmp v0.5.9 // indirect
|
||||
github.com/google/uuid v1.3.0 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.1.0 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.4.0 // indirect
|
||||
github.com/googleapis/go-type-adapters v1.0.0 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.2.0 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.7.0 // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/mattn/go-colorable v0.1.12 // indirect
|
||||
github.com/mattn/go-isatty v0.0.14 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.13 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.16 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.14 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||
github.com/oklog/ulid v1.3.1 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/prometheus/client_golang v1.12.2 // indirect
|
||||
github.com/prometheus/client_model v0.2.0 // indirect
|
||||
github.com/prometheus/common v0.36.0 // indirect
|
||||
github.com/prometheus/procfs v0.7.3 // indirect
|
||||
github.com/rivo/uniseg v0.2.0 // indirect
|
||||
github.com/prometheus/client_golang v1.14.0 // indirect
|
||||
github.com/prometheus/client_model v0.3.0 // indirect
|
||||
github.com/prometheus/common v0.37.0 // indirect
|
||||
github.com/prometheus/procfs v0.8.0 // indirect
|
||||
github.com/rivo/uniseg v0.4.2 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/valyala/bytebufferpool v1.0.0 // indirect
|
||||
github.com/valyala/histogram v1.2.0 // indirect
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
|
||||
go.opencensus.io v0.23.0 // indirect
|
||||
go.uber.org/atomic v1.9.0 // indirect
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
go.uber.org/atomic v1.10.0 // indirect
|
||||
go.uber.org/goleak v1.1.11-0.20210813005559-691160354723 // indirect
|
||||
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f // indirect
|
||||
golang.org/x/text v0.3.7 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f // indirect
|
||||
golang.org/x/sync v0.1.0 // indirect
|
||||
golang.org/x/text v0.4.0 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20220712132514-bdd2acd4974d // indirect
|
||||
google.golang.org/grpc v1.48.0 // indirect
|
||||
google.golang.org/protobuf v1.28.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20221109142239-94d6d90a7d66 // indirect
|
||||
google.golang.org/grpc v1.50.1 // indirect
|
||||
google.golang.org/protobuf v1.28.1 // indirect
|
||||
)
|
||||
|
|
374
go.sum
374
go.sum
|
@ -15,24 +15,8 @@ cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKV
|
|||
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
|
||||
cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
|
||||
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
|
||||
cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
|
||||
cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
|
||||
cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg=
|
||||
cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8=
|
||||
cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0=
|
||||
cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY=
|
||||
cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM=
|
||||
cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY=
|
||||
cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ=
|
||||
cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI=
|
||||
cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4=
|
||||
cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc=
|
||||
cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA=
|
||||
cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A=
|
||||
cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc=
|
||||
cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU=
|
||||
cloud.google.com/go v0.103.0 h1:YXtxp9ymmZjlGzxV7VrYQ8aaQuAgcqxSy6YhDX4I458=
|
||||
cloud.google.com/go v0.103.0/go.mod h1:vwLx1nqLrzLX/fpwSMOXmFIqBOyHsvHbnAdbGSJ+mKk=
|
||||
cloud.google.com/go v0.106.0 h1:AWaMWuZb2oFeiV91OfNHZbmwUhMVuXEaLPm9sqDAOl8=
|
||||
cloud.google.com/go v0.106.0/go.mod h1:5NEGxGuIeMQiPaWLwLYZ7kfNWiP6w1+QJK+xqyIT+dw=
|
||||
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
||||
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
|
||||
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
|
||||
|
@ -40,17 +24,15 @@ cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUM
|
|||
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
|
||||
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
|
||||
cloud.google.com/go/bigtable v1.2.0/go.mod h1:JcVAOl45lrTmQfLj7T6TxyMzIN/3FGGcFm+2xVAli2o=
|
||||
cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow=
|
||||
cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM=
|
||||
cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M=
|
||||
cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s=
|
||||
cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU=
|
||||
cloud.google.com/go/compute v1.7.0 h1:v/k9Eueb8aAJ0vZuxKMrgm6kPhCLZU9HxFU+AFDs9Uk=
|
||||
cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U=
|
||||
cloud.google.com/go/compute v1.12.1 h1:gKVJMEyqV5c/UnpzjjQbo3Rjvvqpr9B1DFSbJC4OXr0=
|
||||
cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU=
|
||||
cloud.google.com/go/compute/metadata v0.2.1 h1:efOwf5ymceDhK6PKMnnrTHP4pppY5L22mle96M1yP48=
|
||||
cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM=
|
||||
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
|
||||
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
|
||||
cloud.google.com/go/iam v0.3.0 h1:exkAomrVUuzx9kWFI1wm3KI0uoDeUFPB4kKGzx6x+Gc=
|
||||
cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY=
|
||||
cloud.google.com/go/iam v0.7.0 h1:k4MuwOsS7zGJJ+QfZ5vBK8SgHBAvYN/23BWsiihJ1vs=
|
||||
cloud.google.com/go/iam v0.7.0/go.mod h1:H5Br8wRaDGNc8XP3keLc4unfUUZeyH3Sfl9XpQEYOeg=
|
||||
cloud.google.com/go/longrunning v0.3.0 h1:NjljC+FYPV3uh5/OwWT6pVU+doBqMg2x/rZlE+CamDs=
|
||||
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
|
||||
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
|
||||
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
|
||||
|
@ -60,9 +42,8 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo
|
|||
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
|
||||
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
|
||||
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
|
||||
cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y=
|
||||
cloud.google.com/go/storage v1.23.0 h1:wWRIaDURQA8xxHguFCshYepGlrWIrbBnAmc7wfg07qY=
|
||||
cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc=
|
||||
cloud.google.com/go/storage v1.28.0 h1:DLrIZ6xkeZX6K70fU/boWx5INJumt6f+nwwWSHXzzGY=
|
||||
cloud.google.com/go/storage v1.28.0/go.mod h1:qlgZML35PXA3zoEnIkiPLY4/TOkUleufRlu6qmcf7sI=
|
||||
collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
github.com/Azure/azure-sdk-for-go v48.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
|
||||
|
@ -103,14 +84,15 @@ github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdko
|
|||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
||||
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
|
||||
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
|
||||
github.com/VictoriaMetrics/fastcache v1.10.0 h1:5hDJnLsKLpnUEToub7ETuRu8RCkb40woBZAUiKonXzY=
|
||||
github.com/VictoriaMetrics/fastcache v1.10.0/go.mod h1:tjiYeEfYXCqacuvYw/7UoDIeJaNxq6132xHICNP77w8=
|
||||
github.com/VictoriaMetrics/fastcache v1.12.0 h1:vnVi/y9yKDcD9akmc4NqAoqgQhJrOwUF+j9LTgn4QDE=
|
||||
github.com/VictoriaMetrics/fastcache v1.12.0/go.mod h1:tjiYeEfYXCqacuvYw/7UoDIeJaNxq6132xHICNP77w8=
|
||||
github.com/VictoriaMetrics/fasthttp v1.1.0 h1:3crd4YWHsMwu60GUXRH6OstowiFvqrwS4a/ueoLdLL0=
|
||||
github.com/VictoriaMetrics/fasthttp v1.1.0/go.mod h1:/7DMcogqd+aaD3G3Hg5kFgoFwlR2uydjiWvoLp5ZTqQ=
|
||||
github.com/VictoriaMetrics/metrics v1.18.1 h1:OZ0+kTTto8oPfHnVAnTOoyl0XlRhRkoQrD2n2cOuRw0=
|
||||
github.com/VictoriaMetrics/metrics v1.18.1/go.mod h1:ArjwVz7WpgpegX/JpB0zpNF2h2232kErkEnzH1sxMmA=
|
||||
github.com/VictoriaMetrics/metricsql v0.44.1 h1:qGoRt0g84uMUscVjS7P3uDZKmjJubWKaIx9v0iHKgck=
|
||||
github.com/VictoriaMetrics/metricsql v0.44.1/go.mod h1:6pP1ZeLVJHqJrHlF6Ij3gmpQIznSsgktEcZgsAWYel0=
|
||||
github.com/VictoriaMetrics/metrics v1.23.0 h1:WzfqyzCaxUZip+OBbg1+lV33WChDSu4ssYII3nxtpeA=
|
||||
github.com/VictoriaMetrics/metrics v1.23.0/go.mod h1:rAr/llLpEnAdTehiNlUxKgnjcOuROSzpw0GvjpEbvFc=
|
||||
github.com/VictoriaMetrics/metricsql v0.45.0 h1:kVQHnkDJm4qyJ8f5msTclmwqAtlUdPbbEJ7zoa/FTNs=
|
||||
github.com/VictoriaMetrics/metricsql v0.45.0/go.mod h1:6pP1ZeLVJHqJrHlF6Ij3gmpQIznSsgktEcZgsAWYel0=
|
||||
github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow=
|
||||
github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4=
|
||||
github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g=
|
||||
|
@ -146,8 +128,8 @@ github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQ
|
|||
github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||
github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48=
|
||||
github.com/aws/aws-sdk-go v1.35.31/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
||||
github.com/aws/aws-sdk-go v1.44.53 h1:2MErE8gRyBLuE1fuH2Sqlj1xoN3S6/jXb0aO/A1jGfk=
|
||||
github.com/aws/aws-sdk-go v1.44.53/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
|
||||
github.com/aws/aws-sdk-go v1.44.134 h1:TzFxjVHPPsibtkD7y6KHI4V00rEKg4yzNlMNGy2ZHeg=
|
||||
github.com/aws/aws-sdk-go v1.44.134/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
|
||||
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
|
@ -173,14 +155,6 @@ github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp
|
|||
github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||
github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
|
||||
github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
|
||||
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
|
||||
github.com/containerd/containerd v1.3.4/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
||||
|
@ -221,12 +195,6 @@ github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4s
|
|||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||
github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
|
||||
github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
|
||||
github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
|
@ -388,8 +356,6 @@ github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt
|
|||
github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||
github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||
github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
|
||||
github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8=
|
||||
github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
|
@ -405,7 +371,6 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD
|
|||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM=
|
||||
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
|
||||
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
|
@ -428,19 +393,15 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
|||
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
|
||||
github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
|
||||
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
|
||||
github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
|
||||
github.com/google/martian/v3 v3.2.1 h1:d8MncMlErDFTwQGBK1xhv026j9kqhvw1Qv9IbWT1VLQ=
|
||||
github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk=
|
||||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
|
@ -448,34 +409,20 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf
|
|||
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20201117184057-ae444373da19/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
|
||||
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.1.0 h1:zO8WHNx/MYiAKJ3d5spxZXZE6KHmIQGQcAzwUzV7qQw=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.2.0 h1:y8Yozv7SZtlU//QXbezB6QkpuE6jMD2/gfzk4AftXjs=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0=
|
||||
github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM=
|
||||
github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM=
|
||||
github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM=
|
||||
github.com/googleapis/gax-go/v2 v2.4.0 h1:dS9eYAjhrE2RjmzYw2XAPvcXfmcQLtFEQWn0CR82awk=
|
||||
github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c=
|
||||
github.com/googleapis/gax-go/v2 v2.7.0 h1:IcsPKeInNvYi7eqSaDjiZqDDKu5rsmunY0Y1YupQSSQ=
|
||||
github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8=
|
||||
github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
|
||||
github.com/googleapis/go-type-adapters v1.0.0 h1:9XdMn+d/G57qq1s8dNc5IesGCXHf6V2HZ2JwRxfA2tA=
|
||||
github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4=
|
||||
github.com/gophercloud/gophercloud v0.14.0/go.mod h1:VX0Ibx85B60B5XOrZr6kaNwrmPUzcmMpwxvQ1WQIIWM=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
|
||||
|
@ -530,8 +477,8 @@ github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJ
|
|||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/influxdata/flux v0.65.1/go.mod h1:J754/zds0vvpfwuq7Gc2wRdVwEodfpCFM7mYlOw2LqY=
|
||||
github.com/influxdata/influxdb v1.8.3/go.mod h1:JugdFhsvvI8gadxOI6noqNeeBHvWNTbfYGtiAn+2jhI=
|
||||
github.com/influxdata/influxdb v1.9.8 h1:wuw8ZwyIZgg/jn//9cwr4OpKIF5z9o83lIfpb19aO2Q=
|
||||
github.com/influxdata/influxdb v1.9.8/go.mod h1:8Ft9mikW2GELpV154RV+F7ocPa5FS5G/rl4rH9INT/I=
|
||||
github.com/influxdata/influxdb v1.10.0 h1:8xDpt8KO3lzrzf/ss+l8r42AGUZvoITu5824berK7SE=
|
||||
github.com/influxdata/influxdb v1.10.0/go.mod h1:IVPuoA2pOOxau/NguX7ipW0Jp9Bn+dMWlo0+VOscevU=
|
||||
github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo=
|
||||
github.com/influxdata/influxql v1.1.1-0.20200828144457-65d3ef77d385/go.mod h1:gHp9y86a/pxhjJ+zMjNXiQAA197Xk9wLxaz+fGG+kWk=
|
||||
github.com/influxdata/line-protocol v0.0.0-20180522152040-32c6aa80de5e/go.mod h1:4kt73NQhadE3daL3WhR5EJ/J2ocX0PZzwxQ0gXJ7oFE=
|
||||
|
@ -573,8 +520,8 @@ github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0
|
|||
github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
||||
github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
|
||||
github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
||||
github.com/klauspost/compress v1.15.8 h1:JahtItbkWjf2jzm/T+qgMxkP9EMHsqEUA6vCMGmXvhA=
|
||||
github.com/klauspost/compress v1.15.8/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
|
||||
github.com/klauspost/compress v1.15.12 h1:YClS/PImqYbn+UILDnqxQCZ3RehC9N318SU3kElDUEM=
|
||||
github.com/klauspost/compress v1.15.12/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM=
|
||||
github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||
github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg=
|
||||
github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
|
||||
|
@ -609,24 +556,26 @@ github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaO
|
|||
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||
github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
|
||||
github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
|
||||
github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40=
|
||||
github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
|
||||
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
||||
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
|
||||
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84=
|
||||
github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
|
||||
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
||||
github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y=
|
||||
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
|
||||
github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ=
|
||||
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4OSgU=
|
||||
github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU=
|
||||
github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
|
||||
github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
||||
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
|
||||
github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso=
|
||||
github.com/miekg/dns v1.1.35/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM=
|
||||
|
@ -724,15 +673,16 @@ github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP
|
|||
github.com/prometheus/client_golang v1.8.0/go.mod h1:O9VU6huf47PktckDQfMTX0Y8tY0/7TSWwj+ITvv0TnM=
|
||||
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
||||
github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
|
||||
github.com/prometheus/client_golang v1.12.2 h1:51L9cDoUHVrXx4zWYlcLQIZ+d+VXHgqnYKkIuq4g/34=
|
||||
github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
|
||||
github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw=
|
||||
github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
|
||||
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4=
|
||||
github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
|
||||
github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
|
||||
|
@ -743,8 +693,8 @@ github.com/prometheus/common v0.14.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16
|
|||
github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s=
|
||||
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
|
||||
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
|
||||
github.com/prometheus/common v0.36.0 h1:78hJTing+BLYLjhXE+Z2BubeEymH5Lr0/Mt8FKkxxYo=
|
||||
github.com/prometheus/common v0.36.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
|
||||
github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE=
|
||||
github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
|
@ -753,14 +703,16 @@ github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4
|
|||
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||
github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU=
|
||||
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||
github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo=
|
||||
github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
|
||||
github.com/prometheus/prometheus v1.8.2-0.20201119142752-3ad25a6dc3d9 h1:F2A86PGVYqn3P7oWbrSmSlJHae9y6wwpAdoWb/pZi6Q=
|
||||
github.com/prometheus/prometheus v1.8.2-0.20201119142752-3ad25a6dc3d9/go.mod h1:1MDE/bXgu4gqd5w/otko6WQpXZX9vu8QX4KbitCmaPg=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1RftBQPUCDRw6SmxeaREsAaRKnOclghuzp/WRzc=
|
||||
github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/rivo/uniseg v0.4.2 h1:YwD0ulJSJytLpiaWua0sBDusfsCZohxjxzVTYjwxfV8=
|
||||
github.com/rivo/uniseg v0.4.2/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
|
||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
||||
github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
|
@ -806,14 +758,19 @@ github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5J
|
|||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
|
||||
github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
|
@ -822,8 +779,8 @@ github.com/uber/jaeger-client-go v2.25.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMW
|
|||
github.com/uber/jaeger-lib v2.4.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U=
|
||||
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
|
||||
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||
github.com/urfave/cli/v2 v2.11.0 h1:c6bD90aLd2iEsokxhxkY5Er0zA2V9fId2aJfwmrF+do=
|
||||
github.com/urfave/cli/v2 v2.11.0/go.mod h1:f8iq5LtQ/bLxafbdBSLPPNsgaW0l/2fYYEHhAyPlwvo=
|
||||
github.com/urfave/cli/v2 v2.23.5 h1:xbrU7tAYviSpqeR3X4nEFWUdB/uDZ6DE+HxmRU7Xtyw=
|
||||
github.com/urfave/cli/v2 v2.23.5/go.mod h1:GHupkWPMM0M/sj1a2b4wUrWBPzazNrIjouW6fmdJLxc=
|
||||
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
|
||||
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
||||
github.com/valyala/fasthttp v1.30.0/go.mod h1:2rsYD01CKFrjjsvFxx75KlEUNpWNBY9JWD3K/7o2Cus=
|
||||
|
@ -831,8 +788,8 @@ github.com/valyala/fastjson v1.6.3 h1:tAKFnnwmeMGPbwJ7IwxcTPCNr3uIzoIj3/Fh90ra4x
|
|||
github.com/valyala/fastjson v1.6.3/go.mod h1:CLCAqky6SMuOcxStkYQvblddUtoRxhYMGLrsQns1aXY=
|
||||
github.com/valyala/fastrand v1.1.0 h1:f+5HkLW4rsgzdNoleUOB69hyT9IlD2ZQh9GyDMfb5G8=
|
||||
github.com/valyala/fastrand v1.1.0/go.mod h1:HWqCzkrkg6QXT8V2EXWvXCoow7vLwOFN002oeRzjapQ=
|
||||
github.com/valyala/fasttemplate v1.2.1 h1:TVEnxayobAdVkhQfrfes2IzOB6o+z4roRkPF52WA1u4=
|
||||
github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
|
||||
github.com/valyala/fasttemplate v1.2.2 h1:lxLXG0uE3Qnshl9QyaK6XJxMXlQZELvChBOCmQD0Loo=
|
||||
github.com/valyala/fasttemplate v1.2.2/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
|
||||
github.com/valyala/gozstd v1.17.0 h1:M4Ds4MIrw+pD+s6vYtuFZ8D3iEw9htzfdytOV3C3iQU=
|
||||
github.com/valyala/gozstd v1.17.0/go.mod h1:y5Ew47GLlP37EkTB+B4s7r6A5rdaeB7ftbl9zoYiIPQ=
|
||||
github.com/valyala/histogram v1.2.0 h1:wyYGAZZt3CpwUiIb9AU/Zbllg1llXyrtApRS815OLoQ=
|
||||
|
@ -854,6 +811,7 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
|
|||
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
|
||||
go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
|
||||
|
@ -869,15 +827,13 @@ go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
|||
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
|
||||
go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M=
|
||||
go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
|
||||
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
|
||||
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
|
||||
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
|
||||
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
||||
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||
go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE=
|
||||
go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||
go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ=
|
||||
go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
|
||||
go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
|
||||
go.uber.org/goleak v1.1.11-0.20210813005559-691160354723 h1:sHOAIxRGBp443oHZIPB+HsUGaksVCXVQENPxwTfQdH4=
|
||||
go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
|
||||
|
@ -905,6 +861,7 @@ golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPh
|
|||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
|
@ -931,8 +888,6 @@ golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHl
|
|||
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
|
||||
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
|
||||
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
|
||||
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
|
||||
|
@ -941,9 +896,8 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB
|
|||
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
|
@ -984,26 +938,17 @@ golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81R
|
|||
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
|
||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||
golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210510120150-4163338589ed/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.0.0-20220708220712-1185a9018129 h1:vucSRfWwTsoXro7P+3Cjlr6flUMtzCwzlvkxEQtHHB0=
|
||||
golang.org/x/net v0.0.0-20220708220712-1185a9018129/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
|
||||
golang.org/x/net v0.2.0 h1:sZfSu1wtKLGlWI4ZZayP0ck9Y73K1ynO6gqzTdBVdPU=
|
||||
golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
|
@ -1011,22 +956,10 @@ golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4Iltr
|
|||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
|
||||
golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
|
||||
golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
|
||||
golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE=
|
||||
golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE=
|
||||
golang.org/x/oauth2 v0.0.0-20220630143837-2104d58473e0 h1:VnGaRqoLmqZH/3TMLJwYCEWkR4j1nuIU1U9TvbqsDUw=
|
||||
golang.org/x/oauth2 v0.0.0-20220630143837-2104d58473e0/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg=
|
||||
golang.org/x/oauth2 v0.2.0 h1:GtQkldQ9m7yvzCL1V+LrYow3Khe0eJH0w7RbX/VbaIU=
|
||||
golang.org/x/oauth2 v0.2.0/go.mod h1:Cwn6afJ8jrQwYMxQDTpISoXmXW9I6qF6vDeuuoX3Ibs=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
|
@ -1039,8 +972,9 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ
|
|||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f h1:Ax0t5p6N38Ga0dThY21weqDEyz2oklo4IvDkpigvkD8=
|
||||
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
|
@ -1100,57 +1034,36 @@ golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220405052023-b1e9470b6e64/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220712014510-0a85c31ab51e h1:NHvCuwuS43lGnYhten69ZWqi2QOj/CiDNcKbVqwVoew=
|
||||
golang.org/x/sys v0.0.0-20220712014510-0a85c31ab51e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.2.0 h1:ljd4t30dBnAvMZaQCevtY0xLLD0A+bRZXbgLMLU1F/A=
|
||||
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.4.0 h1:BrVqGRd7+k1DiOgtnFvAkoQEWQvBc25ouMJM6429SFg=
|
||||
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
|
@ -1219,25 +1132,15 @@ golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc
|
|||
golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
|
||||
golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20201119054027-25dc3e1ccc3c/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
|
||||
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
|
||||
golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f h1:uF6paiQQebLeSXkrTqHqz0MXhXXS1KgF41eUdBNvxK0=
|
||||
golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
|
||||
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk=
|
||||
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
|
||||
gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
|
||||
gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
|
||||
gonum.org/v1/gonum v0.6.0/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU=
|
||||
|
@ -1262,32 +1165,8 @@ google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M
|
|||
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
|
||||
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
|
||||
google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
|
||||
google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=
|
||||
google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
|
||||
google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU=
|
||||
google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94=
|
||||
google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo=
|
||||
google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4=
|
||||
google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw=
|
||||
google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU=
|
||||
google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k=
|
||||
google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
|
||||
google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
|
||||
google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI=
|
||||
google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I=
|
||||
google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo=
|
||||
google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g=
|
||||
google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA=
|
||||
google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8=
|
||||
google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs=
|
||||
google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA=
|
||||
google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw=
|
||||
google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg=
|
||||
google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o=
|
||||
google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g=
|
||||
google.golang.org/api v0.86.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw=
|
||||
google.golang.org/api v0.87.0 h1:pUQVF/F+X7Tl1lo4LJoJf5BOpjtmINU80p9XpYTU2p4=
|
||||
google.golang.org/api v0.87.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw=
|
||||
google.golang.org/api v0.103.0 h1:9yuVqlu2JCvcLg9p8S3fcFLZij8EPSyvODIY1rkMizQ=
|
||||
google.golang.org/api v0.103.0/go.mod h1:hGtW6nK1AC+d9si/UBhw8Xli+QMOf6xyNAyJw4qU9w0=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
|
@ -1331,58 +1210,8 @@ google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6D
|
|||
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
|
||||
google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
|
||||
google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A=
|
||||
google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
|
||||
google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
|
||||
google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
|
||||
google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24=
|
||||
google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=
|
||||
google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=
|
||||
google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=
|
||||
google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=
|
||||
google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w=
|
||||
google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
|
||||
google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
|
||||
google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
|
||||
google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
|
||||
google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
|
||||
google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
|
||||
google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
|
||||
google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
|
||||
google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
|
||||
google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E=
|
||||
google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
|
||||
google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
|
||||
google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
|
||||
google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
|
||||
google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
|
||||
google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
|
||||
google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
|
||||
google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
|
||||
google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
|
||||
google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
|
||||
google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
|
||||
google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
|
||||
google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
|
||||
google.golang.org/genproto v0.0.0-20220712132514-bdd2acd4974d h1:YbuF5+kdiC516xIP60RvlHeFbY9sRDR73QsAGHpkeVw=
|
||||
google.golang.org/genproto v0.0.0-20220712132514-bdd2acd4974d/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
|
||||
google.golang.org/genproto v0.0.0-20221109142239-94d6d90a7d66 h1:wx7sJ5GRBQLRcslTNcrTklsHhHevQvxgztW18txbbZM=
|
||||
google.golang.org/genproto v0.0.0-20221109142239-94d6d90a7d66/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg=
|
||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
|
||||
|
@ -1403,25 +1232,8 @@ google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM
|
|||
google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
|
||||
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
|
||||
google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
|
||||
google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
||||
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
||||
google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
||||
google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
|
||||
google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
|
||||
google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
|
||||
google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
|
||||
google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
|
||||
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
||||
google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
||||
google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
|
||||
google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ=
|
||||
google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
|
||||
google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
|
||||
google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
|
||||
google.golang.org/grpc v1.48.0 h1:rQOsyJ/8+ufEDJd/Gdsz7HG220Mh9HAhFHRGnIjda0w=
|
||||
google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
|
||||
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
|
||||
google.golang.org/grpc v1.50.1 h1:DS/BukOZWp8s6p4Dt/tOaJaTQyPyOoCcrjroHuCeLzY=
|
||||
google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
|
@ -1434,9 +1246,8 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj
|
|||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw=
|
||||
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=
|
||||
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
|
@ -1466,6 +1277,7 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C
|
|||
gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
|
||||
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
|
|
12
vendor/cloud.google.com/go/.gitignore
generated
vendored
12
vendor/cloud.google.com/go/.gitignore
generated
vendored
|
@ -1,12 +0,0 @@
|
|||
# Editors
|
||||
.idea
|
||||
.vscode
|
||||
*.swp
|
||||
.history
|
||||
|
||||
# Test files
|
||||
*.test
|
||||
coverage.txt
|
||||
|
||||
# Other
|
||||
.DS_Store
|
107
vendor/cloud.google.com/go/.release-please-manifest-submodules.json
generated
vendored
107
vendor/cloud.google.com/go/.release-please-manifest-submodules.json
generated
vendored
|
@ -1,107 +0,0 @@
|
|||
{
|
||||
"accessapproval": "1.3.0",
|
||||
"accesscontextmanager": "1.2.0",
|
||||
"aiplatform": "1.14.0",
|
||||
"analytics": "0.8.0",
|
||||
"apigateway": "1.2.0",
|
||||
"apigeeconnect": "1.2.0",
|
||||
"appengine": "1.3.0",
|
||||
"area120": "0.4.0",
|
||||
"artifactregistry": "1.3.0",
|
||||
"asset": "1.3.0",
|
||||
"assuredworkloads": "1.0.0",
|
||||
"automl": "1.4.0",
|
||||
"baremetalsolution": "0.2.0",
|
||||
"batch": "0.1.0",
|
||||
"billing": "1.2.0",
|
||||
"binaryauthorization": "1.0.0",
|
||||
"certificatemanager": "0.2.0",
|
||||
"channel": "1.7.0",
|
||||
"cloudbuild": "1.2.0",
|
||||
"clouddms": "1.2.0",
|
||||
"cloudtasks": "1.4.0",
|
||||
"compute": "1.7.0",
|
||||
"contactcenterinsights": "1.2.0",
|
||||
"container": "1.2.0",
|
||||
"containeranalysis": "0.4.0",
|
||||
"datacatalog": "1.3.0",
|
||||
"dataflow": "0.5.0",
|
||||
"datafusion": "1.3.0",
|
||||
"datalabeling": "0.3.0",
|
||||
"dataplex": "1.0.0",
|
||||
"dataproc": "1.5.0",
|
||||
"dataqna": "0.4.0",
|
||||
"datastream": "1.0.0",
|
||||
"deploy": "1.2.0",
|
||||
"dialogflow": "1.11.0",
|
||||
"dlp": "1.4.0",
|
||||
"documentai": "1.4.0",
|
||||
"domains": "0.5.0",
|
||||
"essentialcontacts": "1.2.0",
|
||||
"eventarc": "1.6.0",
|
||||
"filestore": "1.2.0",
|
||||
"functions": "1.4.0",
|
||||
"gaming": "1.3.0",
|
||||
"gkebackup": "0.1.0",
|
||||
"gkeconnect": "0.3.0",
|
||||
"gkehub": "0.8.0",
|
||||
"gkemulticloud": "0.2.0",
|
||||
"grafeas": "0.2.0",
|
||||
"gsuiteaddons": "1.2.0",
|
||||
"iam": "0.3.0",
|
||||
"iap": "1.3.0",
|
||||
"ids": "1.0.0",
|
||||
"iot": "1.2.0",
|
||||
"kms": "1.4.0",
|
||||
"language": "1.3.0",
|
||||
"lifesciences": "0.4.0",
|
||||
"managedidentities": "1.2.0",
|
||||
"mediatranslation": "0.3.0",
|
||||
"memcache": "1.3.0",
|
||||
"metastore": "1.3.0",
|
||||
"monitoring": "1.5.0",
|
||||
"networkconnectivity": "1.2.0",
|
||||
"networkmanagement": "1.3.0",
|
||||
"networksecurity": "0.3.0",
|
||||
"notebooks": "1.0.0",
|
||||
"optimization": "1.0.0",
|
||||
"orchestration": "1.2.0",
|
||||
"orgpolicy": "1.3.0",
|
||||
"osconfig": "1.6.0",
|
||||
"oslogin": "1.3.0",
|
||||
"phishingprotection": "0.4.0",
|
||||
"policytroubleshooter": "1.2.0",
|
||||
"privatecatalog": "0.4.0",
|
||||
"recaptchaenterprise/v2": "2.0.1",
|
||||
"recommendationengine": "0.2.0",
|
||||
"recommender": "1.4.0",
|
||||
"redis": "1.6.0",
|
||||
"resourcemanager": "1.2.0",
|
||||
"resourcesettings": "1.2.0",
|
||||
"retail": "1.4.0",
|
||||
"run": "0.1.1",
|
||||
"scheduler": "1.3.0",
|
||||
"secretmanager": "1.5.0",
|
||||
"security": "1.4.0",
|
||||
"securitycenter": "1.8.0",
|
||||
"servicecontrol": "1.3.0",
|
||||
"servicedirectory": "1.3.0",
|
||||
"servicemanagement": "1.3.0",
|
||||
"serviceusage": "1.2.0",
|
||||
"shell": "1.2.0",
|
||||
"speech": "1.5.0",
|
||||
"storagetransfer": "1.3.0",
|
||||
"talent": "0.9.0",
|
||||
"texttospeech": "1.3.0",
|
||||
"tpu": "1.2.0",
|
||||
"trace": "1.2.0",
|
||||
"translate": "1.2.0",
|
||||
"video": "1.7.0",
|
||||
"videointelligence": "1.4.0",
|
||||
"vision/v2": "2.0.0",
|
||||
"vmmigration": "1.0.0",
|
||||
"vpcaccess": "1.2.0",
|
||||
"webrisk": "1.3.0",
|
||||
"websecurityscanner": "1.2.0",
|
||||
"workflows": "1.5.0"
|
||||
}
|
3
vendor/cloud.google.com/go/.release-please-manifest.json
generated
vendored
3
vendor/cloud.google.com/go/.release-please-manifest.json
generated
vendored
|
@ -1,3 +0,0 @@
|
|||
{
|
||||
".": "0.103.0"
|
||||
}
|
2417
vendor/cloud.google.com/go/CHANGES.md
generated
vendored
2417
vendor/cloud.google.com/go/CHANGES.md
generated
vendored
File diff suppressed because it is too large
Load diff
44
vendor/cloud.google.com/go/CODE_OF_CONDUCT.md
generated
vendored
44
vendor/cloud.google.com/go/CODE_OF_CONDUCT.md
generated
vendored
|
@ -1,44 +0,0 @@
|
|||
# Contributor Code of Conduct
|
||||
|
||||
As contributors and maintainers of this project,
|
||||
and in the interest of fostering an open and welcoming community,
|
||||
we pledge to respect all people who contribute through reporting issues,
|
||||
posting feature requests, updating documentation,
|
||||
submitting pull requests or patches, and other activities.
|
||||
|
||||
We are committed to making participation in this project
|
||||
a harassment-free experience for everyone,
|
||||
regardless of level of experience, gender, gender identity and expression,
|
||||
sexual orientation, disability, personal appearance,
|
||||
body size, race, ethnicity, age, religion, or nationality.
|
||||
|
||||
Examples of unacceptable behavior by participants include:
|
||||
|
||||
* The use of sexualized language or imagery
|
||||
* Personal attacks
|
||||
* Trolling or insulting/derogatory comments
|
||||
* Public or private harassment
|
||||
* Publishing other's private information,
|
||||
such as physical or electronic
|
||||
addresses, without explicit permission
|
||||
* Other unethical or unprofessional conduct.
|
||||
|
||||
Project maintainers have the right and responsibility to remove, edit, or reject
|
||||
comments, commits, code, wiki edits, issues, and other contributions
|
||||
that are not aligned to this Code of Conduct.
|
||||
By adopting this Code of Conduct,
|
||||
project maintainers commit themselves to fairly and consistently
|
||||
applying these principles to every aspect of managing this project.
|
||||
Project maintainers who do not follow or enforce the Code of Conduct
|
||||
may be permanently removed from the project team.
|
||||
|
||||
This code of conduct applies both within project spaces and in public spaces
|
||||
when an individual is representing the project or its community.
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior
|
||||
may be reported by opening an issue
|
||||
or contacting one or more of the project maintainers.
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant](http://contributor-covenant.org), version 1.2.0,
|
||||
available at [http://contributor-covenant.org/version/1/2/0/](http://contributor-covenant.org/version/1/2/0/)
|
||||
|
327
vendor/cloud.google.com/go/CONTRIBUTING.md
generated
vendored
327
vendor/cloud.google.com/go/CONTRIBUTING.md
generated
vendored
|
@ -1,327 +0,0 @@
|
|||
# Contributing
|
||||
|
||||
1. [File an issue](https://github.com/googleapis/google-cloud-go/issues/new/choose).
|
||||
The issue will be used to discuss the bug or feature and should be created
|
||||
before sending a PR.
|
||||
|
||||
1. [Install Go](https://golang.org/dl/).
|
||||
1. Ensure that your `GOBIN` directory (by default `$(go env GOPATH)/bin`)
|
||||
is in your `PATH`.
|
||||
1. Check it's working by running `go version`.
|
||||
* If it doesn't work, check the install location, usually
|
||||
`/usr/local/go`, is on your `PATH`.
|
||||
|
||||
1. Sign one of the
|
||||
[contributor license agreements](#contributor-license-agreements) below.
|
||||
|
||||
1. Clone the repo:
|
||||
`git clone https://github.com/googleapis/google-cloud-go`
|
||||
|
||||
1. Change into the checked out source:
|
||||
`cd google-cloud-go`
|
||||
|
||||
1. Fork the repo.
|
||||
|
||||
1. Set your fork as a remote:
|
||||
`git remote add fork git@github.com:GITHUB_USERNAME/google-cloud-go.git`
|
||||
|
||||
1. Make changes, commit to your fork.
|
||||
|
||||
Commit messages should follow the
|
||||
[Conventional Commits Style](https://www.conventionalcommits.org). The scope
|
||||
portion should always be filled with the name of the package affected by the
|
||||
changes being made. For example:
|
||||
```
|
||||
feat(functions): add gophers codelab
|
||||
```
|
||||
|
||||
1. Send a pull request with your changes.
|
||||
|
||||
To minimize friction, consider setting `Allow edits from maintainers` on the
|
||||
PR, which will enable project committers and automation to update your PR.
|
||||
|
||||
1. A maintainer will review the pull request and make comments.
|
||||
|
||||
Prefer adding additional commits over amending and force-pushing since it can
|
||||
be difficult to follow code reviews when the commit history changes.
|
||||
|
||||
Commits will be squashed when they're merged.
|
||||
|
||||
## Testing
|
||||
|
||||
We test code against two versions of Go, the minimum and maximum versions
|
||||
supported by our clients. To see which versions these are checkout our
|
||||
[README](README.md#supported-versions).
|
||||
|
||||
### Integration Tests
|
||||
|
||||
In addition to the unit tests, you may run the integration test suite. These
|
||||
directions describe setting up your environment to run integration tests for
|
||||
_all_ packages: note that many of these instructions may be redundant if you
|
||||
intend only to run integration tests on a single package.
|
||||
|
||||
#### GCP Setup
|
||||
|
||||
To run the integrations tests, creation and configuration of two projects in
|
||||
the Google Developers Console is required: one specifically for Firestore
|
||||
integration tests, and another for all other integration tests. We'll refer to
|
||||
these projects as "general project" and "Firestore project".
|
||||
|
||||
After creating each project, you must [create a service account](https://developers.google.com/identity/protocols/OAuth2ServiceAccount#creatinganaccount)
|
||||
for each project. Ensure the project-level **Owner**
|
||||
[IAM role](https://console.cloud.google.com/iam-admin/iam/project) role is added to
|
||||
each service account. During the creation of the service account, you should
|
||||
download the JSON credential file for use later.
|
||||
|
||||
Next, ensure the following APIs are enabled in the general project:
|
||||
|
||||
- BigQuery API
|
||||
- BigQuery Data Transfer API
|
||||
- Cloud Dataproc API
|
||||
- Cloud Dataproc Control API Private
|
||||
- Cloud Datastore API
|
||||
- Cloud Firestore API
|
||||
- Cloud Key Management Service (KMS) API
|
||||
- Cloud Natural Language API
|
||||
- Cloud OS Login API
|
||||
- Cloud Pub/Sub API
|
||||
- Cloud Resource Manager API
|
||||
- Cloud Spanner API
|
||||
- Cloud Speech API
|
||||
- Cloud Translation API
|
||||
- Cloud Video Intelligence API
|
||||
- Cloud Vision API
|
||||
- Compute Engine API
|
||||
- Compute Engine Instance Group Manager API
|
||||
- Container Registry API
|
||||
- Firebase Rules API
|
||||
- Google Cloud APIs
|
||||
- Google Cloud Deployment Manager V2 API
|
||||
- Google Cloud SQL
|
||||
- Google Cloud Storage
|
||||
- Google Cloud Storage JSON API
|
||||
- Google Compute Engine Instance Group Updater API
|
||||
- Google Compute Engine Instance Groups API
|
||||
- Kubernetes Engine API
|
||||
- Cloud Error Reporting API
|
||||
- Pub/Sub Lite API
|
||||
|
||||
Next, create a Datastore database in the general project, and a Firestore
|
||||
database in the Firestore project.
|
||||
|
||||
Finally, in the general project, create an API key for the translate API:
|
||||
|
||||
- Go to GCP Developer Console.
|
||||
- Navigate to APIs & Services > Credentials.
|
||||
- Click Create Credentials > API Key.
|
||||
- Save this key for use in `GCLOUD_TESTS_API_KEY` as described below.
|
||||
|
||||
#### Local Setup
|
||||
|
||||
Once the two projects are created and configured, set the following environment
|
||||
variables:
|
||||
|
||||
- `GCLOUD_TESTS_GOLANG_PROJECT_ID`: Developers Console project's ID (e.g.
|
||||
bamboo-shift-455) for the general project.
|
||||
- `GCLOUD_TESTS_GOLANG_KEY`: The path to the JSON key file of the general
|
||||
project's service account.
|
||||
- `GCLOUD_TESTS_GOLANG_FIRESTORE_PROJECT_ID`: Developers Console project's ID
|
||||
(e.g. doorway-cliff-677) for the Firestore project.
|
||||
- `GCLOUD_TESTS_GOLANG_FIRESTORE_KEY`: The path to the JSON key file of the
|
||||
Firestore project's service account.
|
||||
- `GCLOUD_TESTS_API_KEY`: API key for using the Translate API created above.
|
||||
|
||||
As part of the setup that follows, the following variables will be configured:
|
||||
|
||||
- `GCLOUD_TESTS_GOLANG_KEYRING`: The full name of the keyring for the tests,
|
||||
in the form
|
||||
"projects/P/locations/L/keyRings/R". The creation of this is described below.
|
||||
- `GCLOUD_TESTS_BIGTABLE_KEYRING`: The full name of the keyring for the bigtable tests,
|
||||
in the form
|
||||
"projects/P/locations/L/keyRings/R". The creation of this is described below. Expected to be single region.
|
||||
- `GCLOUD_TESTS_GOLANG_ZONE`: Compute Engine zone.
|
||||
|
||||
Install the [gcloud command-line tool][gcloudcli] to your machine and use it to
|
||||
create some resources used in integration tests.
|
||||
|
||||
From the project's root directory:
|
||||
|
||||
``` sh
|
||||
# Sets the default project in your env.
|
||||
$ gcloud config set project $GCLOUD_TESTS_GOLANG_PROJECT_ID
|
||||
|
||||
# Authenticates the gcloud tool with your account.
|
||||
$ gcloud auth login
|
||||
|
||||
# Create the indexes used in the datastore integration tests.
|
||||
$ gcloud datastore indexes create datastore/testdata/index.yaml
|
||||
|
||||
# Creates a Google Cloud storage bucket with the same name as your test project,
|
||||
# and with the Cloud Logging service account as owner, for the sink
|
||||
# integration tests in logging.
|
||||
$ gsutil mb gs://$GCLOUD_TESTS_GOLANG_PROJECT_ID
|
||||
$ gsutil acl ch -g cloud-logs@google.com:O gs://$GCLOUD_TESTS_GOLANG_PROJECT_ID
|
||||
|
||||
# Creates a PubSub topic for integration tests of storage notifications.
|
||||
$ gcloud beta pubsub topics create go-storage-notification-test
|
||||
# Next, go to the Pub/Sub dashboard in GCP console. Authorize the user
|
||||
# "service-<numeric project id>@gs-project-accounts.iam.gserviceaccount.com"
|
||||
# as a publisher to that topic.
|
||||
|
||||
# Creates a Spanner instance for the spanner integration tests.
|
||||
$ gcloud beta spanner instances create go-integration-test --config regional-us-central1 --nodes 10 --description 'Instance for go client test'
|
||||
# NOTE: Spanner instances are priced by the node-hour, so you may want to
|
||||
# delete the instance after testing with 'gcloud beta spanner instances delete'.
|
||||
|
||||
$ export MY_KEYRING=some-keyring-name
|
||||
$ export MY_LOCATION=global
|
||||
$ export MY_SINGLE_LOCATION=us-central1
|
||||
# Creates a KMS keyring, in the same location as the default location for your
|
||||
# project's buckets.
|
||||
$ gcloud kms keyrings create $MY_KEYRING --location $MY_LOCATION
|
||||
# Creates two keys in the keyring, named key1 and key2.
|
||||
$ gcloud kms keys create key1 --keyring $MY_KEYRING --location $MY_LOCATION --purpose encryption
|
||||
$ gcloud kms keys create key2 --keyring $MY_KEYRING --location $MY_LOCATION --purpose encryption
|
||||
# Sets the GCLOUD_TESTS_GOLANG_KEYRING environment variable.
|
||||
$ export GCLOUD_TESTS_GOLANG_KEYRING=projects/$GCLOUD_TESTS_GOLANG_PROJECT_ID/locations/$MY_LOCATION/keyRings/$MY_KEYRING
|
||||
# Authorizes Google Cloud Storage to encrypt and decrypt using key1.
|
||||
$ gsutil kms authorize -p $GCLOUD_TESTS_GOLANG_PROJECT_ID -k $GCLOUD_TESTS_GOLANG_KEYRING/cryptoKeys/key1
|
||||
|
||||
# Create KMS Key in one region for Bigtable
|
||||
$ gcloud kms keyrings create $MY_KEYRING --location $MY_SINGLE_LOCATION
|
||||
$ gcloud kms keys create key1 --keyring $MY_KEYRING --location $MY_SINGLE_LOCATION --purpose encryption
|
||||
# Sets the GCLOUD_TESTS_BIGTABLE_KEYRING environment variable.
|
||||
$ export GCLOUD_TESTS_BIGTABLE_KEYRING=projects/$GCLOUD_TESTS_GOLANG_PROJECT_ID/locations/$MY_SINGLE_LOCATION/keyRings/$MY_KEYRING
|
||||
# Create a service agent, https://cloud.google.com/bigtable/docs/use-cmek#gcloud:
|
||||
$ gcloud beta services identity create \
|
||||
--service=bigtableadmin.googleapis.com \
|
||||
--project $GCLOUD_TESTS_GOLANG_PROJECT_ID
|
||||
# Note the service agent email for the agent created.
|
||||
$ export SERVICE_AGENT_EMAIL=<service agent email, from last step>
|
||||
|
||||
# Authorizes Google Cloud Bigtable to encrypt and decrypt using key1
|
||||
$ gcloud kms keys add-iam-policy-binding key1 \
|
||||
--keyring $MY_KEYRING \
|
||||
--location $MY_SINGLE_LOCATION \
|
||||
--role roles/cloudkms.cryptoKeyEncrypterDecrypter \
|
||||
--member "serviceAccount:$SERVICE_AGENT_EMAIL" \
|
||||
--project $GCLOUD_TESTS_GOLANG_PROJECT_ID
|
||||
```
|
||||
|
||||
It may be useful to add exports to your shell initialization for future use.
|
||||
For instance, in `.zshrc`:
|
||||
|
||||
```sh
|
||||
#### START GO SDK Test Variables
|
||||
# Developers Console project's ID (e.g. bamboo-shift-455) for the general project.
|
||||
export GCLOUD_TESTS_GOLANG_PROJECT_ID=your-project
|
||||
|
||||
# The path to the JSON key file of the general project's service account.
|
||||
export GCLOUD_TESTS_GOLANG_KEY=~/directory/your-project-abcd1234.json
|
||||
|
||||
# Developers Console project's ID (e.g. doorway-cliff-677) for the Firestore project.
|
||||
export GCLOUD_TESTS_GOLANG_FIRESTORE_PROJECT_ID=your-firestore-project
|
||||
|
||||
# The path to the JSON key file of the Firestore project's service account.
|
||||
export GCLOUD_TESTS_GOLANG_FIRESTORE_KEY=~/directory/your-firestore-project-abcd1234.json
|
||||
|
||||
# The full name of the keyring for the tests, in the form "projects/P/locations/L/keyRings/R".
|
||||
# The creation of this is described below.
|
||||
export MY_KEYRING=my-golang-sdk-test
|
||||
export MY_LOCATION=global
|
||||
export GCLOUD_TESTS_GOLANG_KEYRING=projects/$GCLOUD_TESTS_GOLANG_PROJECT_ID/locations/$MY_LOCATION/keyRings/$MY_KEYRING
|
||||
|
||||
# API key for using the Translate API.
|
||||
export GCLOUD_TESTS_API_KEY=abcdefghijk123456789
|
||||
|
||||
# Compute Engine zone. (https://cloud.google.com/compute/docs/regions-zones)
|
||||
export GCLOUD_TESTS_GOLANG_ZONE=your-chosen-region
|
||||
#### END GO SDK Test Variables
|
||||
```
|
||||
|
||||
#### Running
|
||||
|
||||
Once you've done the necessary setup, you can run the integration tests by
|
||||
running:
|
||||
|
||||
``` sh
|
||||
$ go test -v ./...
|
||||
```
|
||||
|
||||
Note that the above command will not run the tests in other modules. To run
|
||||
tests on other modules, first navigate to the appropriate
|
||||
subdirectory. For instance, to run only the tests for datastore:
|
||||
``` sh
|
||||
$ cd datastore
|
||||
$ go test -v ./...
|
||||
```
|
||||
|
||||
#### Replay
|
||||
|
||||
Some packages can record the RPCs during integration tests to a file for
|
||||
subsequent replay. To record, pass the `-record` flag to `go test`. The
|
||||
recording will be saved to the _package_`.replay` file. To replay integration
|
||||
tests from a saved recording, the replay file must be present, the `-short`
|
||||
flag must be passed to `go test`, and the `GCLOUD_TESTS_GOLANG_ENABLE_REPLAY`
|
||||
environment variable must have a non-empty value.
|
||||
|
||||
## Contributor License Agreements
|
||||
|
||||
Before we can accept your pull requests you'll need to sign a Contributor
|
||||
License Agreement (CLA):
|
||||
|
||||
- **If you are an individual writing original source code** and **you own the
|
||||
intellectual property**, then you'll need to sign an [individual CLA][indvcla].
|
||||
- **If you work for a company that wants to allow you to contribute your
|
||||
work**, then you'll need to sign a [corporate CLA][corpcla].
|
||||
|
||||
You can sign these electronically (just scroll to the bottom). After that,
|
||||
we'll be able to accept your pull requests.
|
||||
|
||||
## Contributor Code of Conduct
|
||||
|
||||
As contributors and maintainers of this project,
|
||||
and in the interest of fostering an open and welcoming community,
|
||||
we pledge to respect all people who contribute through reporting issues,
|
||||
posting feature requests, updating documentation,
|
||||
submitting pull requests or patches, and other activities.
|
||||
|
||||
We are committed to making participation in this project
|
||||
a harassment-free experience for everyone,
|
||||
regardless of level of experience, gender, gender identity and expression,
|
||||
sexual orientation, disability, personal appearance,
|
||||
body size, race, ethnicity, age, religion, or nationality.
|
||||
|
||||
Examples of unacceptable behavior by participants include:
|
||||
|
||||
* The use of sexualized language or imagery
|
||||
* Personal attacks
|
||||
* Trolling or insulting/derogatory comments
|
||||
* Public or private harassment
|
||||
* Publishing other's private information,
|
||||
such as physical or electronic
|
||||
addresses, without explicit permission
|
||||
* Other unethical or unprofessional conduct.
|
||||
|
||||
Project maintainers have the right and responsibility to remove, edit, or reject
|
||||
comments, commits, code, wiki edits, issues, and other contributions
|
||||
that are not aligned to this Code of Conduct.
|
||||
By adopting this Code of Conduct,
|
||||
project maintainers commit themselves to fairly and consistently
|
||||
applying these principles to every aspect of managing this project.
|
||||
Project maintainers who do not follow or enforce the Code of Conduct
|
||||
may be permanently removed from the project team.
|
||||
|
||||
This code of conduct applies both within project spaces and in public spaces
|
||||
when an individual is representing the project or its community.
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior
|
||||
may be reported by opening an issue
|
||||
or contacting one or more of the project maintainers.
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant](https://contributor-covenant.org), version 1.2.0,
|
||||
available at [https://contributor-covenant.org/version/1/2/0/](https://contributor-covenant.org/version/1/2/0/)
|
||||
|
||||
[gcloudcli]: https://developers.google.com/cloud/sdk/gcloud/
|
||||
[indvcla]: https://developers.google.com/open-source/cla/individual
|
||||
[corpcla]: https://developers.google.com/open-source/cla/corporate
|
138
vendor/cloud.google.com/go/README.md
generated
vendored
138
vendor/cloud.google.com/go/README.md
generated
vendored
|
@ -1,138 +0,0 @@
|
|||
# Google Cloud Client Libraries for Go
|
||||
|
||||
[![Go Reference](https://pkg.go.dev/badge/cloud.google.com/go.svg)](https://pkg.go.dev/cloud.google.com/go)
|
||||
|
||||
Go packages for [Google Cloud Platform](https://cloud.google.com) services.
|
||||
|
||||
``` go
|
||||
import "cloud.google.com/go"
|
||||
```
|
||||
|
||||
To install the packages on your system, *do not clone the repo*. Instead:
|
||||
|
||||
1. Change to your project directory:
|
||||
|
||||
```bash
|
||||
cd /my/cloud/project
|
||||
```
|
||||
1. Get the package you want to use. Some products have their own module, so it's
|
||||
best to `go get` the package(s) you want to use:
|
||||
|
||||
```
|
||||
$ go get cloud.google.com/go/firestore # Replace with the package you want to use.
|
||||
```
|
||||
|
||||
**NOTE:** Some of these packages are under development, and may occasionally
|
||||
make backwards-incompatible changes.
|
||||
|
||||
## Supported APIs
|
||||
|
||||
For an updated list of all of our released APIs please see our
|
||||
[reference docs](https://cloud.google.com/go/docs/reference).
|
||||
|
||||
## [Go Versions Supported](#supported-versions)
|
||||
|
||||
Our libraries are compatible with at least the three most recent, major Go
|
||||
releases. They are currently compatible with:
|
||||
|
||||
- Go 1.18
|
||||
- Go 1.17
|
||||
- Go 1.16
|
||||
- Go 1.15
|
||||
|
||||
## Authorization
|
||||
|
||||
By default, each API will use [Google Application Default Credentials](https://developers.google.com/identity/protocols/application-default-credentials)
|
||||
for authorization credentials used in calling the API endpoints. This will allow your
|
||||
application to run in many environments without requiring explicit configuration.
|
||||
|
||||
[snip]:# (auth)
|
||||
```go
|
||||
client, err := storage.NewClient(ctx)
|
||||
```
|
||||
|
||||
To authorize using a
|
||||
[JSON key file](https://cloud.google.com/iam/docs/managing-service-account-keys),
|
||||
pass
|
||||
[`option.WithCredentialsFile`](https://pkg.go.dev/google.golang.org/api/option#WithCredentialsFile)
|
||||
to the `NewClient` function of the desired package. For example:
|
||||
|
||||
[snip]:# (auth-JSON)
|
||||
```go
|
||||
client, err := storage.NewClient(ctx, option.WithCredentialsFile("path/to/keyfile.json"))
|
||||
```
|
||||
|
||||
You can exert more control over authorization by using the
|
||||
[`golang.org/x/oauth2`](https://pkg.go.dev/golang.org/x/oauth2) package to
|
||||
create an `oauth2.TokenSource`. Then pass
|
||||
[`option.WithTokenSource`](https://pkg.go.dev/google.golang.org/api/option#WithTokenSource)
|
||||
to the `NewClient` function:
|
||||
[snip]:# (auth-ts)
|
||||
```go
|
||||
tokenSource := ...
|
||||
client, err := storage.NewClient(ctx, option.WithTokenSource(tokenSource))
|
||||
```
|
||||
|
||||
## Contributing
|
||||
|
||||
Contributions are welcome. Please, see the
|
||||
[CONTRIBUTING](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md)
|
||||
document for details.
|
||||
|
||||
Please note that this project is released with a Contributor Code of Conduct.
|
||||
By participating in this project you agree to abide by its terms.
|
||||
See [Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md#contributor-code-of-conduct)
|
||||
for more information.
|
||||
|
||||
[cloud-asset]: https://cloud.google.com/security-command-center/docs/how-to-asset-inventory
|
||||
[cloud-automl]: https://cloud.google.com/automl
|
||||
[cloud-build]: https://cloud.google.com/cloud-build/
|
||||
[cloud-bigquery]: https://cloud.google.com/bigquery/
|
||||
[cloud-bigtable]: https://cloud.google.com/bigtable/
|
||||
[cloud-compute]: https://cloud.google.com/compute
|
||||
[cloud-container]: https://cloud.google.com/containers/
|
||||
[cloud-containeranalysis]: https://cloud.google.com/container-registry/docs/container-analysis
|
||||
[cloud-dataproc]: https://cloud.google.com/dataproc/
|
||||
[cloud-datastore]: https://cloud.google.com/datastore/
|
||||
[cloud-dialogflow]: https://cloud.google.com/dialogflow-enterprise/
|
||||
[cloud-debugger]: https://cloud.google.com/debugger/
|
||||
[cloud-dlp]: https://cloud.google.com/dlp/
|
||||
[cloud-errors]: https://cloud.google.com/error-reporting/
|
||||
[cloud-firestore]: https://cloud.google.com/firestore/
|
||||
[cloud-iam]: https://cloud.google.com/iam/
|
||||
[cloud-iot]: https://cloud.google.com/iot-core/
|
||||
[cloud-irm]: https://cloud.google.com/incident-response/docs/concepts
|
||||
[cloud-kms]: https://cloud.google.com/kms/
|
||||
[cloud-pubsub]: https://cloud.google.com/pubsub/
|
||||
[cloud-pubsublite]: https://cloud.google.com/pubsub/lite
|
||||
[cloud-storage]: https://cloud.google.com/storage/
|
||||
[cloud-language]: https://cloud.google.com/natural-language
|
||||
[cloud-logging]: https://cloud.google.com/logging/
|
||||
[cloud-natural-language]: https://cloud.google.com/natural-language/
|
||||
[cloud-memorystore]: https://cloud.google.com/memorystore/
|
||||
[cloud-monitoring]: https://cloud.google.com/monitoring/
|
||||
[cloud-oslogin]: https://cloud.google.com/compute/docs/oslogin/rest
|
||||
[cloud-phishingprotection]: https://cloud.google.com/phishing-protection/
|
||||
[cloud-securitycenter]: https://cloud.google.com/security-command-center/
|
||||
[cloud-scheduler]: https://cloud.google.com/scheduler
|
||||
[cloud-spanner]: https://cloud.google.com/spanner/
|
||||
[cloud-speech]: https://cloud.google.com/speech
|
||||
[cloud-talent]: https://cloud.google.com/solutions/talent-solution/
|
||||
[cloud-tasks]: https://cloud.google.com/tasks/
|
||||
[cloud-texttospeech]: https://cloud.google.com/texttospeech/
|
||||
[cloud-talent]: https://cloud.google.com/solutions/talent-solution/
|
||||
[cloud-trace]: https://cloud.google.com/trace/
|
||||
[cloud-translate]: https://cloud.google.com/translate
|
||||
[cloud-recaptcha]: https://cloud.google.com/recaptcha-enterprise/
|
||||
[cloud-recommender]: https://cloud.google.com/recommendations/
|
||||
[cloud-video]: https://cloud.google.com/video-intelligence/
|
||||
[cloud-vision]: https://cloud.google.com/vision
|
||||
[cloud-webrisk]: https://cloud.google.com/web-risk/
|
||||
|
||||
## Links
|
||||
|
||||
- [Go on Google Cloud](https://cloud.google.com/go/home)
|
||||
- [Getting started with Go on Google Cloud](https://cloud.google.com/go/getting-started)
|
||||
- [App Engine Quickstart](https://cloud.google.com/appengine/docs/standard/go/quickstart)
|
||||
- [Cloud Functions Quickstart](https://cloud.google.com/functions/docs/quickstart-go)
|
||||
- [Cloud Run Quickstart](https://cloud.google.com/run/docs/quickstarts/build-and-deploy#go)
|
141
vendor/cloud.google.com/go/RELEASING.md
generated
vendored
141
vendor/cloud.google.com/go/RELEASING.md
generated
vendored
|
@ -1,141 +0,0 @@
|
|||
# Releasing
|
||||
|
||||
## Determine which module to release
|
||||
|
||||
The Go client libraries have several modules. Each module does not strictly
|
||||
correspond to a single library - they correspond to trees of directories. If a
|
||||
file needs to be released, you must release the closest ancestor module.
|
||||
|
||||
To see all modules:
|
||||
|
||||
```bash
|
||||
$ cat `find . -name go.mod` | grep module
|
||||
module cloud.google.com/go/pubsub
|
||||
module cloud.google.com/go/spanner
|
||||
module cloud.google.com/go
|
||||
module cloud.google.com/go/bigtable
|
||||
module cloud.google.com/go/bigquery
|
||||
module cloud.google.com/go/storage
|
||||
module cloud.google.com/go/pubsublite
|
||||
module cloud.google.com/go/firestore
|
||||
module cloud.google.com/go/logging
|
||||
module cloud.google.com/go/internal/gapicgen
|
||||
module cloud.google.com/go/internal/godocfx
|
||||
module cloud.google.com/go/internal/examples/fake
|
||||
module cloud.google.com/go/internal/examples/mock
|
||||
module cloud.google.com/go/datastore
|
||||
```
|
||||
|
||||
The `cloud.google.com/go` is the repository root module. Each other module is
|
||||
a submodule.
|
||||
|
||||
So, if you need to release a change in `bigtable/bttest/inmem.go`, the closest
|
||||
ancestor module is `cloud.google.com/go/bigtable` - so you should release a new
|
||||
version of the `cloud.google.com/go/bigtable` submodule.
|
||||
|
||||
If you need to release a change in `asset/apiv1/asset_client.go`, the closest
|
||||
ancestor module is `cloud.google.com/go` - so you should release a new version
|
||||
of the `cloud.google.com/go` repository root module. Note: releasing
|
||||
`cloud.google.com/go` has no impact on any of the submodules, and vice-versa.
|
||||
They are released entirely independently.
|
||||
|
||||
## Test failures
|
||||
|
||||
If there are any test failures in the Kokoro build, releases are blocked until
|
||||
the failures have been resolved.
|
||||
|
||||
## How to release
|
||||
|
||||
### Automated Releases (`cloud.google.com/go` and submodules)
|
||||
|
||||
We now use [release-please](https://github.com/googleapis/release-please) to
|
||||
perform automated releases for `cloud.google.com/go` and all submodules.
|
||||
|
||||
1. If there are changes that have not yet been released, a
|
||||
[pull request](https://github.com/googleapis/google-cloud-go/pull/2971) will
|
||||
be automatically opened by release-please
|
||||
with a title like "chore: release X.Y.Z" (for the root module) or
|
||||
"chore: release datastore X.Y.Z" (for the datastore submodule), where X.Y.Z
|
||||
is the next version to be released. Find the desired pull request
|
||||
[here](https://github.com/googleapis/google-cloud-go/pulls)
|
||||
1. Check for failures in the
|
||||
[continuous Kokoro build](http://go/google-cloud-go-continuous). If there are
|
||||
any failures in the most recent build, address them before proceeding with
|
||||
the release. (This applies even if the failures are in a different submodule
|
||||
from the one being released.)
|
||||
1. Review the release notes. These are automatically generated from the titles
|
||||
of any merged commits since the previous release. If you would like to edit
|
||||
them, this can be done by updating the changes in the release PR.
|
||||
1. To cut a release, approve and merge the pull request. Doing so will
|
||||
update the `CHANGES.md`, tag the merged commit with the appropriate version,
|
||||
and draft a GitHub release which will copy the notes from `CHANGES.md`.
|
||||
|
||||
### Manual Release (`cloud.google.com/go`)
|
||||
|
||||
If for whatever reason the automated release process is not working as expected,
|
||||
here is how to manually cut a release of `cloud.google.com/go`.
|
||||
|
||||
1. Check for failures in the
|
||||
[continuous Kokoro build](http://go/google-cloud-go-continuous). If there are
|
||||
any failures in the most recent build, address them before proceeding with
|
||||
the release.
|
||||
1. Navigate to `google-cloud-go/` and switch to main.
|
||||
1. `git pull`
|
||||
1. Run `git tag -l | grep -v beta | grep -v alpha` to see all existing releases.
|
||||
The current latest tag `$CV` is the largest tag. It should look something
|
||||
like `vX.Y.Z` (note: ignore all `LIB/vX.Y.Z` tags - these are tags for a
|
||||
specific library, not the module root). We'll call the current version `$CV`
|
||||
and the new version `$NV`.
|
||||
1. On main, run `git log $CV...` to list all the changes since the last
|
||||
release. NOTE: You must manually visually parse out changes to submodules [1]
|
||||
(the `git log` is going to show you things in submodules, which are not going
|
||||
to be part of your release).
|
||||
1. Edit `CHANGES.md` to include a summary of the changes.
|
||||
1. In `internal/version/version.go`, update `const Repo` to today's date with
|
||||
the format `YYYYMMDD`.
|
||||
1. In `internal/version` run `go generate`.
|
||||
1. Commit the changes, ignoring the generated `.go-r` file. Push to your fork,
|
||||
and create a PR titled `chore: release $NV`.
|
||||
1. Wait for the PR to be reviewed and merged. Once it's merged, and without
|
||||
merging any other PRs in the meantime:
|
||||
a. Switch to main.
|
||||
b. `git pull`
|
||||
c. Tag the repo with the next version: `git tag $NV`.
|
||||
d. Push the tag to origin:
|
||||
`git push origin $NV`
|
||||
1. Update [the releases page](https://github.com/googleapis/google-cloud-go/releases)
|
||||
with the new release, copying the contents of `CHANGES.md`.
|
||||
|
||||
### Manual Releases (submodules)
|
||||
|
||||
If for whatever reason the automated release process is not working as expected,
|
||||
here is how to manually cut a release of a submodule.
|
||||
|
||||
(these instructions assume we're releasing `cloud.google.com/go/datastore` - adjust accordingly)
|
||||
|
||||
1. Check for failures in the
|
||||
[continuous Kokoro build](http://go/google-cloud-go-continuous). If there are
|
||||
any failures in the most recent build, address them before proceeding with
|
||||
the release. (This applies even if the failures are in a different submodule
|
||||
from the one being released.)
|
||||
1. Navigate to `google-cloud-go/` and switch to main.
|
||||
1. `git pull`
|
||||
1. Run `git tag -l | grep datastore | grep -v beta | grep -v alpha` to see all
|
||||
existing releases. The current latest tag `$CV` is the largest tag. It
|
||||
should look something like `datastore/vX.Y.Z`. We'll call the current version
|
||||
`$CV` and the new version `$NV`.
|
||||
1. On main, run `git log $CV.. -- datastore/` to list all the changes to the
|
||||
submodule directory since the last release.
|
||||
1. Edit `datastore/CHANGES.md` to include a summary of the changes.
|
||||
1. In `internal/version` run `go generate`.
|
||||
1. Commit the changes, ignoring the generated `.go-r` file. Push to your fork,
|
||||
and create a PR titled `chore(datastore): release $NV`.
|
||||
1. Wait for the PR to be reviewed and merged. Once it's merged, and without
|
||||
merging any other PRs in the meantime:
|
||||
a. Switch to main.
|
||||
b. `git pull`
|
||||
c. Tag the repo with the next version: `git tag $NV`.
|
||||
d. Push the tag to origin:
|
||||
`git push origin $NV`
|
||||
1. Update [the releases page](https://github.com/googleapis/google-cloud-go/releases)
|
||||
with the new release, copying the contents of `datastore/CHANGES.md`.
|
7
vendor/cloud.google.com/go/SECURITY.md
generated
vendored
7
vendor/cloud.google.com/go/SECURITY.md
generated
vendored
|
@ -1,7 +0,0 @@
|
|||
# Security Policy
|
||||
|
||||
To report a security issue, please use [g.co/vulnz](https://g.co/vulnz).
|
||||
|
||||
The Google Security Team will respond within 5 working days of your report on g.co/vulnz.
|
||||
|
||||
We use g.co/vulnz for our intake, and do coordination and disclosure here using GitHub Security Advisory to privately discuss and fix the issue.
|
|
@ -1,10 +1,10 @@
|
|||
// Copyright 2021 Google LLC
|
||||
// Copyright 2022 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
@ -12,6 +12,7 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// package adapters provides helper functions for the google.type protobuf
|
||||
// messages (Decimal, Fraction, etc.).
|
||||
package adapters
|
||||
package internal
|
||||
|
||||
// Version is the current tagged release of the library.
|
||||
const Version = "1.12.1"
|
5
vendor/cloud.google.com/go/compute/metadata/CHANGES.md
generated
vendored
Normal file
5
vendor/cloud.google.com/go/compute/metadata/CHANGES.md
generated
vendored
Normal file
|
@ -0,0 +1,5 @@
|
|||
# Changes
|
||||
|
||||
## [0.1.0] (2022-10-26)
|
||||
|
||||
Initial release of metadata being it's own module.
|
|
@ -193,7 +193,7 @@
|
|||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
https://www.apache.org/licenses/LICENSE-2.0
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
27
vendor/cloud.google.com/go/compute/metadata/README.md
generated
vendored
Normal file
27
vendor/cloud.google.com/go/compute/metadata/README.md
generated
vendored
Normal file
|
@ -0,0 +1,27 @@
|
|||
# Compute API
|
||||
|
||||
[![Go Reference](https://pkg.go.dev/badge/cloud.google.com/go/compute.svg)](https://pkg.go.dev/cloud.google.com/go/compute/metadata)
|
||||
|
||||
This is a utility library for communicating with Google Cloud metadata service
|
||||
on Google Cloud.
|
||||
|
||||
## Install
|
||||
|
||||
```bash
|
||||
go get cloud.google.com/go/compute/metadata
|
||||
```
|
||||
|
||||
## Go Version Support
|
||||
|
||||
See the [Go Versions Supported](https://github.com/googleapis/google-cloud-go#go-versions-supported)
|
||||
section in the root directory's README.
|
||||
|
||||
## Contributing
|
||||
|
||||
Contributions are welcome. Please, see the [CONTRIBUTING](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md)
|
||||
document for details.
|
||||
|
||||
Please note that this project is released with a Contributor Code of Conduct.
|
||||
By participating in this project you agree to abide by its terms. See
|
||||
[Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md#contributor-code-of-conduct)
|
||||
for more information.
|
1
vendor/cloud.google.com/go/compute/metadata/metadata.go
generated
vendored
1
vendor/cloud.google.com/go/compute/metadata/metadata.go
generated
vendored
|
@ -71,6 +71,7 @@ func newDefaultHTTPClient() *http.Client {
|
|||
KeepAlive: 30 * time.Second,
|
||||
}).Dial,
|
||||
},
|
||||
Timeout: 5 * time.Second,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -12,12 +12,12 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// This file, and the cloud.google.com/go import, won't actually become part of
|
||||
// This file, and the {{.RootMod}} import, won't actually become part of
|
||||
// the resultant binary.
|
||||
//go:build modhack
|
||||
// +build modhack
|
||||
|
||||
package iam
|
||||
package metadata
|
||||
|
||||
// Necessary for safely adding multi-module repo. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository
|
||||
import _ "cloud.google.com/go"
|
||||
import _ "cloud.google.com/go/compute/internal"
|
227
vendor/cloud.google.com/go/doc.go
generated
vendored
227
vendor/cloud.google.com/go/doc.go
generated
vendored
|
@ -1,227 +0,0 @@
|
|||
// Copyright 2014 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
/*
|
||||
Package cloud is the root of the packages used to access Google Cloud
|
||||
Services. See https://godoc.org/cloud.google.com/go for a full list
|
||||
of sub-packages.
|
||||
|
||||
|
||||
Client Options
|
||||
|
||||
All clients in sub-packages are configurable via client options. These options are
|
||||
described here: https://godoc.org/google.golang.org/api/option.
|
||||
|
||||
|
||||
Authentication and Authorization
|
||||
|
||||
All the clients in sub-packages support authentication via Google Application Default
|
||||
Credentials (see https://cloud.google.com/docs/authentication/production), or
|
||||
by providing a JSON key file for a Service Account. See examples below.
|
||||
|
||||
Google Application Default Credentials (ADC) is the recommended way to authorize
|
||||
and authenticate clients. For information on how to create and obtain
|
||||
Application Default Credentials, see
|
||||
https://cloud.google.com/docs/authentication/production. Here is an example
|
||||
of a client using ADC to authenticate:
|
||||
client, err := secretmanager.NewClient(context.Background())
|
||||
if err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
_ = client // Use the client.
|
||||
|
||||
You can use a file with credentials to authenticate and authorize, such as a JSON
|
||||
key file associated with a Google service account. Service Account keys can be
|
||||
created and downloaded from
|
||||
https://console.cloud.google.com/iam-admin/serviceaccounts. This example uses
|
||||
the Secret Manger client, but the same steps apply to the other client libraries
|
||||
underneath this package. Example:
|
||||
client, err := secretmanager.NewClient(context.Background(),
|
||||
option.WithCredentialsFile("/path/to/service-account-key.json"))
|
||||
if err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
_ = client // Use the client.
|
||||
|
||||
In some cases (for instance, you don't want to store secrets on disk), you can
|
||||
create credentials from in-memory JSON and use the WithCredentials option.
|
||||
The google package in this example is at golang.org/x/oauth2/google.
|
||||
This example uses the Secret Manager client, but the same steps apply to
|
||||
the other client libraries underneath this package. Note that scopes can be
|
||||
found at https://developers.google.com/identity/protocols/oauth2/scopes, and
|
||||
are also provided in all auto-generated libraries: for example,
|
||||
cloud.google.com/go/secretmanager/apiv1 provides DefaultAuthScopes. Example:
|
||||
ctx := context.Background()
|
||||
creds, err := google.CredentialsFromJSON(ctx, []byte("JSON creds"), secretmanager.DefaultAuthScopes()...)
|
||||
if err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
client, err := secretmanager.NewClient(ctx, option.WithCredentials(creds))
|
||||
if err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
_ = client // Use the client.
|
||||
|
||||
|
||||
Timeouts and Cancellation
|
||||
|
||||
By default, non-streaming methods, like Create or Get, will have a default deadline applied to the
|
||||
context provided at call time, unless a context deadline is already set. Streaming
|
||||
methods have no default deadline and will run indefinitely. To set timeouts or
|
||||
arrange for cancellation, use contexts. Transient
|
||||
errors will be retried when correctness allows.
|
||||
|
||||
Here is an example of how to set a timeout for an RPC, use context.WithTimeout:
|
||||
ctx := context.Background()
|
||||
// Do not set a timeout on the context passed to NewClient: dialing happens
|
||||
// asynchronously, and the context is used to refresh credentials in the
|
||||
// background.
|
||||
client, err := secretmanager.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
// Time out if it takes more than 10 seconds to create a dataset.
|
||||
tctx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
||||
defer cancel() // Always call cancel.
|
||||
|
||||
req := &secretmanagerpb.DeleteSecretRequest{Name: "projects/project-id/secrets/name"}
|
||||
if err := client.DeleteSecret(tctx, req); err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
|
||||
Here is an example of how to arrange for an RPC to be canceled, use context.WithCancel:
|
||||
ctx := context.Background()
|
||||
// Do not cancel the context passed to NewClient: dialing happens asynchronously,
|
||||
// and the context is used to refresh credentials in the background.
|
||||
client, err := secretmanager.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
cctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel() // Always call cancel.
|
||||
|
||||
// TODO: Make the cancel function available to whatever might want to cancel the
|
||||
// call--perhaps a GUI button.
|
||||
req := &secretmanagerpb.DeleteSecretRequest{Name: "projects/proj/secrets/name"}
|
||||
if err := client.DeleteSecret(cctx, req); err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
|
||||
To opt out of default deadlines, set the temporary environment variable
|
||||
GOOGLE_API_GO_EXPERIMENTAL_DISABLE_DEFAULT_DEADLINE to "true" prior to client
|
||||
creation. This affects all Google Cloud Go client libraries. This opt-out
|
||||
mechanism will be removed in a future release. File an issue at
|
||||
https://github.com/googleapis/google-cloud-go if the default deadlines
|
||||
cannot work for you.
|
||||
|
||||
Do not attempt to control the initial connection (dialing) of a service by setting a
|
||||
timeout on the context passed to NewClient. Dialing is non-blocking, so timeouts
|
||||
would be ineffective and would only interfere with credential refreshing, which uses
|
||||
the same context.
|
||||
|
||||
|
||||
Connection Pooling
|
||||
|
||||
Connection pooling differs in clients based on their transport. Cloud
|
||||
clients either rely on HTTP or gRPC transports to communicate
|
||||
with Google Cloud.
|
||||
|
||||
Cloud clients that use HTTP (bigquery, compute, storage, and translate) rely on the
|
||||
underlying HTTP transport to cache connections for later re-use. These are cached to
|
||||
the default http.MaxIdleConns and http.MaxIdleConnsPerHost settings in
|
||||
http.DefaultTransport.
|
||||
|
||||
For gRPC clients (all others in this repo), connection pooling is configurable. Users
|
||||
of cloud client libraries may specify option.WithGRPCConnectionPool(n) as a client
|
||||
option to NewClient calls. This configures the underlying gRPC connections to be
|
||||
pooled and addressed in a round robin fashion.
|
||||
|
||||
|
||||
Using the Libraries with Docker
|
||||
|
||||
Minimal docker images like Alpine lack CA certificates. This causes RPCs to appear to
|
||||
hang, because gRPC retries indefinitely. See https://github.com/googleapis/google-cloud-go/issues/928
|
||||
for more information.
|
||||
|
||||
|
||||
Debugging
|
||||
|
||||
To see gRPC logs, set the environment variable GRPC_GO_LOG_SEVERITY_LEVEL. See
|
||||
https://godoc.org/google.golang.org/grpc/grpclog for more information.
|
||||
|
||||
For HTTP logging, set the GODEBUG environment variable to "http2debug=1" or "http2debug=2".
|
||||
|
||||
|
||||
Inspecting errors
|
||||
|
||||
Most of the errors returned by the generated clients are wrapped in an
|
||||
`apierror.APIError` (https://pkg.go.dev/github.com/googleapis/gax-go/v2/apierror)
|
||||
and can be further unwrapped into a `grpc.Status` or `googleapi.Error` depending
|
||||
on the transport used to make the call (gRPC or REST). Converting your errors to
|
||||
these types can be a useful way to get more information about what went wrong
|
||||
while debugging.
|
||||
|
||||
`apierror.APIError` gives access to specific details in the
|
||||
error. The transport-specific errors can still be unwrapped using the
|
||||
`apierror.APIError`.
|
||||
if err != nil {
|
||||
var ae *apierror.APIError
|
||||
if errors.As(err, &ae) {
|
||||
log.Println(ae.Reason())
|
||||
log.Println(ae.Details().Help.GetLinks())
|
||||
}
|
||||
}
|
||||
|
||||
If the gRPC transport was used, the `grpc.Status` can still be parsed using the
|
||||
`status.FromError` function.
|
||||
if err != nil {
|
||||
if s, ok := status.FromError(err); ok {
|
||||
log.Println(s.Message())
|
||||
for _, d := range s.Proto().Details {
|
||||
log.Println(d)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
If the REST transport was used, the `googleapi.Error` can be parsed in a similar
|
||||
way.
|
||||
if err != nil {
|
||||
var gerr *googleapi.Error
|
||||
if errors.As(err, &gerr) {
|
||||
log.Println(gerr.Message)
|
||||
}
|
||||
}
|
||||
|
||||
Client Stability
|
||||
|
||||
Clients in this repository are considered alpha or beta unless otherwise
|
||||
marked as stable in the README.md. Semver is not used to communicate stability
|
||||
of clients.
|
||||
|
||||
Alpha and beta clients may change or go away without notice.
|
||||
|
||||
Clients marked stable will maintain compatibility with future versions for as
|
||||
long as we can reasonably sustain. Incompatible changes might be made in some
|
||||
situations, including:
|
||||
|
||||
- Security bugs may prompt backwards-incompatible changes.
|
||||
|
||||
- Situations in which components are no longer feasible to maintain without
|
||||
making breaking changes, including removal.
|
||||
|
||||
- Parts of the client surface may be outright unstable and subject to change.
|
||||
These parts of the surface will be labeled with the note, "It is EXPERIMENTAL
|
||||
and subject to change or removal without notice."
|
||||
*/
|
||||
package cloud // import "cloud.google.com/go"
|
28
vendor/cloud.google.com/go/iam/CHANGES.md
generated
vendored
28
vendor/cloud.google.com/go/iam/CHANGES.md
generated
vendored
|
@ -1,5 +1,33 @@
|
|||
# Changes
|
||||
|
||||
## [0.7.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.6.0...iam/v0.7.0) (2022-11-03)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **iam:** rewrite signatures in terms of new location ([3c4b2b3](https://github.com/googleapis/google-cloud-go/commit/3c4b2b34565795537aac1661e6af2442437e34ad))
|
||||
|
||||
## [0.6.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.5.0...iam/v0.6.0) (2022-10-25)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **iam:** start generating stubs dir ([de2d180](https://github.com/googleapis/google-cloud-go/commit/de2d18066dc613b72f6f8db93ca60146dabcfdcc))
|
||||
|
||||
## [0.5.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.4.0...iam/v0.5.0) (2022-09-28)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **iam:** remove ListApplicablePolicies ([52dddd1](https://github.com/googleapis/google-cloud-go/commit/52dddd1ed89fbe77e1859311c3b993a77a82bfc7))
|
||||
|
||||
## [0.4.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.3.0...iam/v0.4.0) (2022-09-06)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **iam:** start generating apiv2 ([#6605](https://github.com/googleapis/google-cloud-go/issues/6605)) ([a6004e7](https://github.com/googleapis/google-cloud-go/commit/a6004e762f782869cd85688937475744f7b17e50))
|
||||
|
||||
## [0.3.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.2.0...iam/v0.3.0) (2022-02-23)
|
||||
|
||||
|
||||
|
|
237
vendor/cloud.google.com/go/internal/.repo-metadata-full.json
generated
vendored
237
vendor/cloud.google.com/go/internal/.repo-metadata-full.json
generated
vendored
|
@ -62,6 +62,24 @@
|
|||
"release_level": "ga",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/apigeeregistry/apiv1": {
|
||||
"distribution_name": "cloud.google.com/go/apigeeregistry/apiv1",
|
||||
"description": "Apigee Registry API",
|
||||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/apigeeregistry/latest/apiv1",
|
||||
"release_level": "beta",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/apikeys/apiv2": {
|
||||
"distribution_name": "cloud.google.com/go/apikeys/apiv2",
|
||||
"description": "API Keys API",
|
||||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/apikeys/latest/apiv2",
|
||||
"release_level": "beta",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/appengine/apiv1": {
|
||||
"distribution_name": "cloud.google.com/go/appengine/apiv1",
|
||||
"description": "App Engine Admin API",
|
||||
|
@ -80,6 +98,15 @@
|
|||
"release_level": "alpha",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/artifactregistry/apiv1": {
|
||||
"distribution_name": "cloud.google.com/go/artifactregistry/apiv1",
|
||||
"description": "Artifact Registry API",
|
||||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/artifactregistry/latest/apiv1",
|
||||
"release_level": "beta",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/artifactregistry/apiv1beta2": {
|
||||
"distribution_name": "cloud.google.com/go/artifactregistry/apiv1beta2",
|
||||
"description": "Artifact Registry API",
|
||||
|
@ -170,6 +197,51 @@
|
|||
"release_level": "beta",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/beyondcorp/appconnections/apiv1": {
|
||||
"distribution_name": "cloud.google.com/go/beyondcorp/appconnections/apiv1",
|
||||
"description": "BeyondCorp API",
|
||||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/beyondcorp/latest/appconnections/apiv1",
|
||||
"release_level": "beta",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/beyondcorp/appconnectors/apiv1": {
|
||||
"distribution_name": "cloud.google.com/go/beyondcorp/appconnectors/apiv1",
|
||||
"description": "BeyondCorp API",
|
||||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/beyondcorp/latest/appconnectors/apiv1",
|
||||
"release_level": "beta",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/beyondcorp/appgateways/apiv1": {
|
||||
"distribution_name": "cloud.google.com/go/beyondcorp/appgateways/apiv1",
|
||||
"description": "BeyondCorp API",
|
||||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/beyondcorp/latest/appgateways/apiv1",
|
||||
"release_level": "beta",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/beyondcorp/clientconnectorservices/apiv1": {
|
||||
"distribution_name": "cloud.google.com/go/beyondcorp/clientconnectorservices/apiv1",
|
||||
"description": "BeyondCorp API",
|
||||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/beyondcorp/latest/clientconnectorservices/apiv1",
|
||||
"release_level": "beta",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/beyondcorp/clientgateways/apiv1": {
|
||||
"distribution_name": "cloud.google.com/go/beyondcorp/clientgateways/apiv1",
|
||||
"description": "BeyondCorp API",
|
||||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/beyondcorp/latest/clientgateways/apiv1",
|
||||
"release_level": "beta",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/bigquery": {
|
||||
"distribution_name": "cloud.google.com/go/bigquery",
|
||||
"description": "BigQuery",
|
||||
|
@ -179,6 +251,15 @@
|
|||
"release_level": "ga",
|
||||
"library_type": "GAPIC_MANUAL"
|
||||
},
|
||||
"cloud.google.com/go/bigquery/analyticshub/apiv1": {
|
||||
"distribution_name": "cloud.google.com/go/bigquery/analyticshub/apiv1",
|
||||
"description": "Analytics Hub API",
|
||||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/analyticshub/apiv1",
|
||||
"release_level": "beta",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/bigquery/connection/apiv1": {
|
||||
"distribution_name": "cloud.google.com/go/bigquery/connection/apiv1",
|
||||
"description": "BigQuery Connection API",
|
||||
|
@ -206,6 +287,15 @@
|
|||
"release_level": "beta",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/bigquery/datapolicies/apiv1beta1": {
|
||||
"distribution_name": "cloud.google.com/go/bigquery/datapolicies/apiv1beta1",
|
||||
"description": "BigQuery Data Policy API",
|
||||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/datapolicies/apiv1beta1",
|
||||
"release_level": "beta",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/bigquery/datatransfer/apiv1": {
|
||||
"distribution_name": "cloud.google.com/go/bigquery/datatransfer/apiv1",
|
||||
"description": "BigQuery Data Transfer API",
|
||||
|
@ -242,15 +332,6 @@
|
|||
"release_level": "ga",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/bigquery/reservation/apiv1beta1": {
|
||||
"distribution_name": "cloud.google.com/go/bigquery/reservation/apiv1beta1",
|
||||
"description": "BigQuery Reservation API",
|
||||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/reservation/apiv1beta1",
|
||||
"release_level": "beta",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/bigquery/storage/apiv1": {
|
||||
"distribution_name": "cloud.google.com/go/bigquery/storage/apiv1",
|
||||
"description": "BigQuery Storage API",
|
||||
|
@ -338,7 +419,7 @@
|
|||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/certificatemanager/latest/apiv1",
|
||||
"release_level": "beta",
|
||||
"release_level": "ga",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/channel/apiv1": {
|
||||
|
@ -467,6 +548,24 @@
|
|||
"release_level": "beta",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/dataform/apiv1alpha2": {
|
||||
"distribution_name": "cloud.google.com/go/dataform/apiv1alpha2",
|
||||
"description": "Dataform API",
|
||||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dataform/latest/apiv1alpha2",
|
||||
"release_level": "alpha",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/dataform/apiv1beta1": {
|
||||
"distribution_name": "cloud.google.com/go/dataform/apiv1beta1",
|
||||
"description": "Dataform API",
|
||||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dataform/latest/apiv1beta1",
|
||||
"release_level": "beta",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/datafusion/apiv1": {
|
||||
"distribution_name": "cloud.google.com/go/datafusion/apiv1",
|
||||
"description": "Cloud Data Fusion API",
|
||||
|
@ -575,6 +674,15 @@
|
|||
"release_level": "ga",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/dialogflow/apiv2beta1": {
|
||||
"distribution_name": "cloud.google.com/go/dialogflow/apiv2beta1",
|
||||
"description": "Dialogflow API",
|
||||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dialogflow/latest/apiv2beta1",
|
||||
"release_level": "beta",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/dialogflow/cx/apiv3": {
|
||||
"distribution_name": "cloud.google.com/go/dialogflow/cx/apiv3",
|
||||
"description": "Dialogflow API",
|
||||
|
@ -629,6 +737,15 @@
|
|||
"release_level": "beta",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/edgecontainer/apiv1": {
|
||||
"distribution_name": "cloud.google.com/go/edgecontainer/apiv1",
|
||||
"description": "Distributed Cloud Edge Container API",
|
||||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/edgecontainer/latest/apiv1",
|
||||
"release_level": "beta",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/errorreporting": {
|
||||
"distribution_name": "cloud.google.com/go/errorreporting",
|
||||
"description": "Cloud Error Reporting API",
|
||||
|
@ -719,6 +836,24 @@
|
|||
"release_level": "ga",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/functions/apiv2": {
|
||||
"distribution_name": "cloud.google.com/go/functions/apiv2",
|
||||
"description": "Cloud Functions API",
|
||||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/functions/latest/apiv2",
|
||||
"release_level": "beta",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/functions/apiv2beta": {
|
||||
"distribution_name": "cloud.google.com/go/functions/apiv2beta",
|
||||
"description": "Cloud Functions API",
|
||||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/functions/latest/apiv2beta",
|
||||
"release_level": "beta",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/functions/metadata": {
|
||||
"distribution_name": "cloud.google.com/go/functions/metadata",
|
||||
"description": "Cloud Functions",
|
||||
|
@ -800,6 +935,15 @@
|
|||
"release_level": "ga",
|
||||
"library_type": "CORE"
|
||||
},
|
||||
"cloud.google.com/go/iam/apiv2": {
|
||||
"distribution_name": "cloud.google.com/go/iam/apiv2",
|
||||
"description": "Identity and Access Management (IAM) API",
|
||||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/iam/latest/apiv2",
|
||||
"release_level": "beta",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/iam/credentials/apiv1": {
|
||||
"distribution_name": "cloud.google.com/go/iam/credentials/apiv1",
|
||||
"description": "IAM Service Account Credentials API",
|
||||
|
@ -856,7 +1000,7 @@
|
|||
},
|
||||
"cloud.google.com/go/language/apiv1beta2": {
|
||||
"distribution_name": "cloud.google.com/go/language/apiv1beta2",
|
||||
"description": "Google Cloud Natural Language API",
|
||||
"description": "Cloud Natural Language API",
|
||||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/language/latest/apiv1beta2",
|
||||
|
@ -895,7 +1039,7 @@
|
|||
"description": "Long Running Operations API",
|
||||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/longrunning/autogen",
|
||||
"docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/longrunning/latest/autogen",
|
||||
"release_level": "alpha",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
|
@ -1193,7 +1337,7 @@
|
|||
"language": "Go",
|
||||
"client_library_type": "manual",
|
||||
"docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/pubsublite/latest",
|
||||
"release_level": "beta",
|
||||
"release_level": "ga",
|
||||
"library_type": "GAPIC_MANUAL"
|
||||
},
|
||||
"cloud.google.com/go/pubsublite/apiv1": {
|
||||
|
@ -1205,15 +1349,6 @@
|
|||
"release_level": "ga",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/recaptchaenterprise/apiv1beta1": {
|
||||
"distribution_name": "cloud.google.com/go/recaptchaenterprise/apiv1beta1",
|
||||
"description": "reCAPTCHA Enterprise API",
|
||||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/recaptchaenterprise/latest/apiv1beta1",
|
||||
"release_level": "beta",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/recaptchaenterprise/v2/apiv1": {
|
||||
"distribution_name": "cloud.google.com/go/recaptchaenterprise/v2/apiv1",
|
||||
"description": "reCAPTCHA Enterprise API",
|
||||
|
@ -1223,6 +1358,15 @@
|
|||
"release_level": "ga",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/recaptchaenterprise/v2/apiv1beta1": {
|
||||
"distribution_name": "cloud.google.com/go/recaptchaenterprise/v2/apiv1beta1",
|
||||
"description": "reCAPTCHA Enterprise API",
|
||||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/recaptchaenterprise/v2/latest/apiv1beta1",
|
||||
"release_level": "beta",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/recommendationengine/apiv1beta1": {
|
||||
"distribution_name": "cloud.google.com/go/recommendationengine/apiv1beta1",
|
||||
"description": "Recommendations AI",
|
||||
|
@ -1367,15 +1511,6 @@
|
|||
"release_level": "ga",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/secretmanager/apiv1beta1": {
|
||||
"distribution_name": "cloud.google.com/go/secretmanager/apiv1beta1",
|
||||
"description": "Secret Manager API",
|
||||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/secretmanager/latest/apiv1beta1",
|
||||
"release_level": "beta",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/security/privateca/apiv1": {
|
||||
"distribution_name": "cloud.google.com/go/security/privateca/apiv1",
|
||||
"description": "Certificate Authority API",
|
||||
|
@ -1394,6 +1529,15 @@
|
|||
"release_level": "beta",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/security/publicca/apiv1beta1": {
|
||||
"distribution_name": "cloud.google.com/go/security/publicca/apiv1beta1",
|
||||
"description": "Public Certificate Authority API",
|
||||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/security/latest/publicca/apiv1beta1",
|
||||
"release_level": "beta",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/securitycenter/apiv1": {
|
||||
"distribution_name": "cloud.google.com/go/securitycenter/apiv1",
|
||||
"description": "Security Command Center API",
|
||||
|
@ -1495,7 +1639,7 @@
|
|||
},
|
||||
"cloud.google.com/go/spanner/admin/database/apiv1": {
|
||||
"distribution_name": "cloud.google.com/go/spanner/admin/database/apiv1",
|
||||
"description": "Cloud Spanner Database Admin API",
|
||||
"description": "Cloud Spanner API",
|
||||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/spanner/latest/admin/database/apiv1",
|
||||
|
@ -1538,6 +1682,15 @@
|
|||
"release_level": "beta",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/speech/apiv2": {
|
||||
"distribution_name": "cloud.google.com/go/speech/apiv2",
|
||||
"description": "Cloud Speech-to-Text API",
|
||||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/speech/latest/apiv2",
|
||||
"release_level": "beta",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/storage": {
|
||||
"distribution_name": "cloud.google.com/go/storage",
|
||||
"description": "Cloud Storage (GCS)",
|
||||
|
@ -1571,7 +1724,7 @@
|
|||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/talent/latest/apiv4",
|
||||
"release_level": "beta",
|
||||
"release_level": "ga",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/talent/apiv4beta1": {
|
||||
|
@ -1682,15 +1835,6 @@
|
|||
"release_level": "beta",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/vision/apiv1p1beta1": {
|
||||
"distribution_name": "cloud.google.com/go/vision/apiv1p1beta1",
|
||||
"description": "Cloud Vision API",
|
||||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/vision/latest/apiv1p1beta1",
|
||||
"release_level": "beta",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/vision/v2/apiv1": {
|
||||
"distribution_name": "cloud.google.com/go/vision/v2/apiv1",
|
||||
"description": "Cloud Vision API",
|
||||
|
@ -1700,6 +1844,15 @@
|
|||
"release_level": "ga",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/vision/v2/apiv1p1beta1": {
|
||||
"distribution_name": "cloud.google.com/go/vision/v2/apiv1p1beta1",
|
||||
"description": "Cloud Vision API",
|
||||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/vision/v2/latest/apiv1p1beta1",
|
||||
"release_level": "beta",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/vmmigration/apiv1": {
|
||||
"distribution_name": "cloud.google.com/go/vmmigration/apiv1",
|
||||
"description": "VM Migration API",
|
||||
|
|
3
vendor/cloud.google.com/go/internal/annotate.go
generated
vendored
3
vendor/cloud.google.com/go/internal/annotate.go
generated
vendored
|
@ -31,7 +31,8 @@ import (
|
|||
// - "google.golang.org/api/googleapi".Error
|
||||
// If the error is not one of these types, Annotate behaves
|
||||
// like
|
||||
// fmt.Errorf("%s: %v", msg, err)
|
||||
//
|
||||
// fmt.Errorf("%s: %v", msg, err)
|
||||
func Annotate(err error, msg string) error {
|
||||
if err == nil {
|
||||
panic("Annotate called with nil")
|
||||
|
|
322
vendor/cloud.google.com/go/release-please-config-yoshi-submodules.json
generated
vendored
322
vendor/cloud.google.com/go/release-please-config-yoshi-submodules.json
generated
vendored
|
@ -1,322 +0,0 @@
|
|||
{
|
||||
"release-type": "go-yoshi",
|
||||
"include-component-in-tag": true,
|
||||
"tag-separator": "/",
|
||||
"packages": {
|
||||
"accessapproval": {
|
||||
"component": "accessapproval"
|
||||
},
|
||||
"accesscontextmanager": {
|
||||
"component": "accesscontextmanager"
|
||||
},
|
||||
"aiplatform": {
|
||||
"component": "aiplatform"
|
||||
},
|
||||
"analytics": {
|
||||
"component": "analytics"
|
||||
},
|
||||
"apigateway": {
|
||||
"component": "apigateway"
|
||||
},
|
||||
"apigeeconnect": {
|
||||
"component": "apigeeconnect"
|
||||
},
|
||||
"appengine": {
|
||||
"component": "appengine"
|
||||
},
|
||||
"area120": {
|
||||
"component": "area120"
|
||||
},
|
||||
"artifactregistry": {
|
||||
"component": "artifactregistry"
|
||||
},
|
||||
"asset": {
|
||||
"component": "asset"
|
||||
},
|
||||
"assuredworkloads": {
|
||||
"component": "assuredworkloads"
|
||||
},
|
||||
"automl": {
|
||||
"component": "automl"
|
||||
},
|
||||
"baremetalsolution": {
|
||||
"component": "baremetalsolution"
|
||||
},
|
||||
"batch": {
|
||||
"component": "batch"
|
||||
},
|
||||
"billing": {
|
||||
"component": "billing"
|
||||
},
|
||||
"binaryauthorization": {
|
||||
"component": "binaryauthorization"
|
||||
},
|
||||
"certificatemanager": {
|
||||
"component": "certificatemanager"
|
||||
},
|
||||
"channel": {
|
||||
"component": "channel"
|
||||
},
|
||||
"cloudbuild": {
|
||||
"component": "cloudbuild"
|
||||
},
|
||||
"clouddms": {
|
||||
"component": "clouddms"
|
||||
},
|
||||
"cloudtasks": {
|
||||
"component": "cloudtasks"
|
||||
},
|
||||
"compute": {
|
||||
"component": "compute"
|
||||
},
|
||||
"contactcenterinsights": {
|
||||
"component": "contactcenterinsights"
|
||||
},
|
||||
"container": {
|
||||
"component": "container"
|
||||
},
|
||||
"containeranalysis": {
|
||||
"component": "containeranalysis"
|
||||
},
|
||||
"datacatalog": {
|
||||
"component": "datacatalog"
|
||||
},
|
||||
"dataflow": {
|
||||
"component": "dataflow"
|
||||
},
|
||||
"datafusion": {
|
||||
"component": "datafusion"
|
||||
},
|
||||
"datalabeling": {
|
||||
"component": "datalabeling"
|
||||
},
|
||||
"dataplex": {
|
||||
"component": "dataplex"
|
||||
},
|
||||
"dataproc": {
|
||||
"component": "dataproc"
|
||||
},
|
||||
"dataqna": {
|
||||
"component": "dataqna"
|
||||
},
|
||||
"datastream": {
|
||||
"component": "datastream"
|
||||
},
|
||||
"deploy": {
|
||||
"component": "deploy"
|
||||
},
|
||||
"dialogflow": {
|
||||
"component": "dialogflow"
|
||||
},
|
||||
"dlp": {
|
||||
"component": "dlp"
|
||||
},
|
||||
"documentai": {
|
||||
"component": "documentai"
|
||||
},
|
||||
"domains": {
|
||||
"component": "domains"
|
||||
},
|
||||
"essentialcontacts": {
|
||||
"component": "essentialcontacts"
|
||||
},
|
||||
"eventarc": {
|
||||
"component": "eventarc"
|
||||
},
|
||||
"filestore": {
|
||||
"component": "filestore"
|
||||
},
|
||||
"functions": {
|
||||
"component": "functions"
|
||||
},
|
||||
"gaming": {
|
||||
"component": "gaming"
|
||||
},
|
||||
"gkebackup": {
|
||||
"component": "gkebackup"
|
||||
},
|
||||
"gkeconnect": {
|
||||
"component": "gkeconnect"
|
||||
},
|
||||
"gkehub": {
|
||||
"component": "gkehub"
|
||||
},
|
||||
"gkemulticloud": {
|
||||
"component": "gkemulticloud"
|
||||
},
|
||||
"grafeas": {
|
||||
"component": "grafeas"
|
||||
},
|
||||
"gsuiteaddons": {
|
||||
"component": "gsuiteaddons"
|
||||
},
|
||||
"iam": {
|
||||
"component": "iam"
|
||||
},
|
||||
"iap": {
|
||||
"component": "iap"
|
||||
},
|
||||
"ids": {
|
||||
"component": "ids"
|
||||
},
|
||||
"iot": {
|
||||
"component": "iot"
|
||||
},
|
||||
"kms": {
|
||||
"component": "kms"
|
||||
},
|
||||
"language": {
|
||||
"component": "language"
|
||||
},
|
||||
"lifesciences": {
|
||||
"component": "lifesciences"
|
||||
},
|
||||
"managedidentities": {
|
||||
"component": "managedidentities"
|
||||
},
|
||||
"mediatranslation": {
|
||||
"component": "mediatranslation"
|
||||
},
|
||||
"memcache": {
|
||||
"component": "memcache"
|
||||
},
|
||||
"metastore": {
|
||||
"component": "metastore"
|
||||
},
|
||||
"monitoring": {
|
||||
"component": "monitoring"
|
||||
},
|
||||
"networkconnectivity": {
|
||||
"component": "networkconnectivity"
|
||||
},
|
||||
"networkmanagement": {
|
||||
"component": "networkmanagement"
|
||||
},
|
||||
"networksecurity": {
|
||||
"component": "networksecurity"
|
||||
},
|
||||
"notebooks": {
|
||||
"component": "notebooks"
|
||||
},
|
||||
"optimization": {
|
||||
"component": "optimization"
|
||||
},
|
||||
"orchestration": {
|
||||
"component": "orchestration"
|
||||
},
|
||||
"orgpolicy": {
|
||||
"component": "orgpolicy"
|
||||
},
|
||||
"osconfig": {
|
||||
"component": "osconfig"
|
||||
},
|
||||
"oslogin": {
|
||||
"component": "oslogin"
|
||||
},
|
||||
"phishingprotection": {
|
||||
"component": "phishingprotection"
|
||||
},
|
||||
"policytroubleshooter": {
|
||||
"component": "policytroubleshooter"
|
||||
},
|
||||
"privatecatalog": {
|
||||
"component": "privatecatalog"
|
||||
},
|
||||
"recaptchaenterprise/v2": {
|
||||
"component": "recaptchaenterprise"
|
||||
},
|
||||
"recommendationengine": {
|
||||
"component": "recommendationengine"
|
||||
},
|
||||
"recommender": {
|
||||
"component": "recommender"
|
||||
},
|
||||
"redis": {
|
||||
"component": "redis"
|
||||
},
|
||||
"resourcemanager": {
|
||||
"component": "resourcemanager"
|
||||
},
|
||||
"resourcesettings": {
|
||||
"component": "resourcesettings"
|
||||
},
|
||||
"retail": {
|
||||
"component": "retail"
|
||||
},
|
||||
"run": {
|
||||
"component": "run"
|
||||
},
|
||||
"scheduler": {
|
||||
"component": "scheduler"
|
||||
},
|
||||
"secretmanager": {
|
||||
"component": "secretmanager"
|
||||
},
|
||||
"security": {
|
||||
"component": "security"
|
||||
},
|
||||
"securitycenter": {
|
||||
"component": "securitycenter"
|
||||
},
|
||||
"servicecontrol": {
|
||||
"component": "servicecontrol"
|
||||
},
|
||||
"servicedirectory": {
|
||||
"component": "servicedirectory"
|
||||
},
|
||||
"servicemanagement": {
|
||||
"component": "servicemanagement"
|
||||
},
|
||||
"serviceusage": {
|
||||
"component": "serviceusage"
|
||||
},
|
||||
"shell": {
|
||||
"component": "shell"
|
||||
},
|
||||
"speech": {
|
||||
"component": "speech"
|
||||
},
|
||||
"storagetransfer": {
|
||||
"component": "storagetransfer"
|
||||
},
|
||||
"talent": {
|
||||
"component": "talent"
|
||||
},
|
||||
"texttospeech": {
|
||||
"component": "texttospeech"
|
||||
},
|
||||
"tpu": {
|
||||
"component": "tpu"
|
||||
},
|
||||
"trace": {
|
||||
"component": "trace"
|
||||
},
|
||||
"translate": {
|
||||
"component": "translate"
|
||||
},
|
||||
"video": {
|
||||
"component": "video"
|
||||
},
|
||||
"videointelligence": {
|
||||
"component": "videointelligence"
|
||||
},
|
||||
"vision/v2": {
|
||||
"component": "vision"
|
||||
},
|
||||
"vmmigration": {
|
||||
"component": "vmmigration"
|
||||
},
|
||||
"vpcaccess": {
|
||||
"component": "vpcaccess"
|
||||
},
|
||||
"webrisk": {
|
||||
"component": "webrisk"
|
||||
},
|
||||
"websecurityscanner": {
|
||||
"component": "websecurityscanner"
|
||||
},
|
||||
"workflows": {
|
||||
"component": "workflows"
|
||||
}
|
||||
}
|
||||
}
|
10
vendor/cloud.google.com/go/release-please-config.json
generated
vendored
10
vendor/cloud.google.com/go/release-please-config.json
generated
vendored
|
@ -1,10 +0,0 @@
|
|||
{
|
||||
"release-type": "go-yoshi",
|
||||
"separate-pull-requests": true,
|
||||
"include-component-in-tag": false,
|
||||
"packages": {
|
||||
".": {
|
||||
"component": "main"
|
||||
}
|
||||
}
|
||||
}
|
3
vendor/cloud.google.com/go/storage/.release-please-manifest.json
generated
vendored
3
vendor/cloud.google.com/go/storage/.release-please-manifest.json
generated
vendored
|
@ -1,3 +0,0 @@
|
|||
{
|
||||
"storage": "1.23.0"
|
||||
}
|
53
vendor/cloud.google.com/go/storage/CHANGES.md
generated
vendored
53
vendor/cloud.google.com/go/storage/CHANGES.md
generated
vendored
|
@ -1,6 +1,59 @@
|
|||
# Changes
|
||||
|
||||
|
||||
## [1.28.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.27.0...storage/v1.28.0) (2022-11-03)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **storage/internal:** Add routing annotations ([ce3f945](https://github.com/googleapis/google-cloud-go/commit/ce3f9458e511eca0910992763232abbcd64698f1))
|
||||
* **storage:** Add Autoclass support ([#6828](https://github.com/googleapis/google-cloud-go/issues/6828)) ([f7c7f41](https://github.com/googleapis/google-cloud-go/commit/f7c7f41e4d7fcffe05860e1114cb20f40c869da8))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **storage:** Fix read-write race in Writer.Write ([#6817](https://github.com/googleapis/google-cloud-go/issues/6817)) ([4766d3e](https://github.com/googleapis/google-cloud-go/commit/4766d3e1004119b93c6bd352024b5bf3404252eb))
|
||||
* **storage:** Fix request token passing for Copier.Run ([#6863](https://github.com/googleapis/google-cloud-go/issues/6863)) ([faaab06](https://github.com/googleapis/google-cloud-go/commit/faaab066d8e509dc440bcbc87391557ecee7dbf2)), refs [#6857](https://github.com/googleapis/google-cloud-go/issues/6857)
|
||||
|
||||
|
||||
### Documentation
|
||||
|
||||
* **storage:** Update broken links for SignURL and PostPolicy ([#6779](https://github.com/googleapis/google-cloud-go/issues/6779)) ([776138b](https://github.com/googleapis/google-cloud-go/commit/776138bc06a1e5fd45acbf8f9d36e9dc6ce31dd3))
|
||||
|
||||
## [1.27.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.26.0...storage/v1.27.0) (2022-09-22)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **storage:** Find GoogleAccessID when using impersonated creds ([#6591](https://github.com/googleapis/google-cloud-go/issues/6591)) ([a2d16a7](https://github.com/googleapis/google-cloud-go/commit/a2d16a7a778c85d13217fc67955ec5dac1da34e8))
|
||||
|
||||
## [1.26.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.25.0...storage/v1.26.0) (2022-08-29)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **storage:** export ShouldRetry ([#6370](https://github.com/googleapis/google-cloud-go/issues/6370)) ([0da9ab0](https://github.com/googleapis/google-cloud-go/commit/0da9ab0831540569dc04c0a23437b084b1564e15)), refs [#6362](https://github.com/googleapis/google-cloud-go/issues/6362)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **storage:** allow to use age=0 in OLM conditions ([#6204](https://github.com/googleapis/google-cloud-go/issues/6204)) ([c85704f](https://github.com/googleapis/google-cloud-go/commit/c85704f4284626ce728cb48f3b130f2ce2a0165e))
|
||||
|
||||
## [1.25.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.24.0...storage/v1.25.0) (2022-08-11)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **storage/internal:** Add routing annotations ([8a8ba85](https://github.com/googleapis/google-cloud-go/commit/8a8ba85311f85701c97fd7c10f1d88b738ce423f))
|
||||
* **storage:** refactor to use transport-agnostic interface ([#6465](https://github.com/googleapis/google-cloud-go/issues/6465)) ([d03c3e1](https://github.com/googleapis/google-cloud-go/commit/d03c3e15a79fe9afa1232d9c8bd4c484a9bb927e))
|
||||
|
||||
## [1.24.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.23.0...storage/v1.24.0) (2022-07-20)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **storage:** add Custom Placement Config Dual Region Support ([#6294](https://github.com/googleapis/google-cloud-go/issues/6294)) ([5a8c607](https://github.com/googleapis/google-cloud-go/commit/5a8c607e3a9a3265887e27cb13f8943f3e3fa23d))
|
||||
|
||||
## [1.23.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.22.1...storage/v1.23.0) (2022-06-23)
|
||||
|
||||
|
||||
|
|
4
vendor/cloud.google.com/go/storage/README.md
generated
vendored
4
vendor/cloud.google.com/go/storage/README.md
generated
vendored
|
@ -2,7 +2,7 @@
|
|||
|
||||
- [About Cloud Storage](https://cloud.google.com/storage/)
|
||||
- [API documentation](https://cloud.google.com/storage/docs)
|
||||
- [Go client documentation](https://pkg.go.dev/cloud.google.com/go/storage)
|
||||
- [Go client documentation](https://cloud.google.com/go/docs/reference/cloud.google.com/go/storage/latest)
|
||||
- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/main/storage)
|
||||
|
||||
### Example Usage
|
||||
|
@ -25,7 +25,7 @@ if err != nil {
|
|||
log.Fatal(err)
|
||||
}
|
||||
defer rc.Close()
|
||||
body, err := ioutil.ReadAll(rc)
|
||||
body, err := io.ReadAll(rc)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
|
102
vendor/cloud.google.com/go/storage/acl.go
generated
vendored
102
vendor/cloud.google.com/go/storage/acl.go
generated
vendored
|
@ -20,9 +20,8 @@ import (
|
|||
"reflect"
|
||||
|
||||
"cloud.google.com/go/internal/trace"
|
||||
"google.golang.org/api/googleapi"
|
||||
storagepb "cloud.google.com/go/storage/internal/apiv2/stubs"
|
||||
raw "google.golang.org/api/storage/v1"
|
||||
storagepb "google.golang.org/genproto/googleapis/storage/v2"
|
||||
)
|
||||
|
||||
// ACLRole is the level of access to grant.
|
||||
|
@ -121,111 +120,46 @@ func (a *ACLHandle) List(ctx context.Context) (rules []ACLRule, err error) {
|
|||
}
|
||||
|
||||
func (a *ACLHandle) bucketDefaultList(ctx context.Context) ([]ACLRule, error) {
|
||||
var acls *raw.ObjectAccessControls
|
||||
var err error
|
||||
req := a.c.raw.DefaultObjectAccessControls.List(a.bucket)
|
||||
a.configureCall(ctx, req)
|
||||
err = run(ctx, func() error {
|
||||
acls, err = req.Do()
|
||||
return err
|
||||
}, a.retry, true, setRetryHeaderHTTP(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return toObjectACLRules(acls.Items), nil
|
||||
opts := makeStorageOpts(true, a.retry, a.userProject)
|
||||
return a.c.tc.ListDefaultObjectACLs(ctx, a.bucket, opts...)
|
||||
}
|
||||
|
||||
func (a *ACLHandle) bucketDefaultDelete(ctx context.Context, entity ACLEntity) error {
|
||||
req := a.c.raw.DefaultObjectAccessControls.Delete(a.bucket, string(entity))
|
||||
a.configureCall(ctx, req)
|
||||
|
||||
return run(ctx, func() error {
|
||||
return req.Do()
|
||||
}, a.retry, false, setRetryHeaderHTTP(req))
|
||||
opts := makeStorageOpts(false, a.retry, a.userProject)
|
||||
return a.c.tc.DeleteDefaultObjectACL(ctx, a.bucket, entity, opts...)
|
||||
}
|
||||
|
||||
func (a *ACLHandle) bucketList(ctx context.Context) ([]ACLRule, error) {
|
||||
var acls *raw.BucketAccessControls
|
||||
var err error
|
||||
req := a.c.raw.BucketAccessControls.List(a.bucket)
|
||||
a.configureCall(ctx, req)
|
||||
err = run(ctx, func() error {
|
||||
acls, err = req.Do()
|
||||
return err
|
||||
}, a.retry, true, setRetryHeaderHTTP(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return toBucketACLRules(acls.Items), nil
|
||||
opts := makeStorageOpts(true, a.retry, a.userProject)
|
||||
return a.c.tc.ListBucketACLs(ctx, a.bucket, opts...)
|
||||
}
|
||||
|
||||
func (a *ACLHandle) bucketSet(ctx context.Context, entity ACLEntity, role ACLRole) error {
|
||||
acl := &raw.BucketAccessControl{
|
||||
Bucket: a.bucket,
|
||||
Entity: string(entity),
|
||||
Role: string(role),
|
||||
}
|
||||
req := a.c.raw.BucketAccessControls.Update(a.bucket, string(entity), acl)
|
||||
a.configureCall(ctx, req)
|
||||
return run(ctx, func() error {
|
||||
_, err := req.Do()
|
||||
return err
|
||||
}, a.retry, false, setRetryHeaderHTTP(req))
|
||||
opts := makeStorageOpts(false, a.retry, a.userProject)
|
||||
return a.c.tc.UpdateBucketACL(ctx, a.bucket, entity, role, opts...)
|
||||
}
|
||||
|
||||
func (a *ACLHandle) bucketDelete(ctx context.Context, entity ACLEntity) error {
|
||||
req := a.c.raw.BucketAccessControls.Delete(a.bucket, string(entity))
|
||||
a.configureCall(ctx, req)
|
||||
return run(ctx, func() error {
|
||||
return req.Do()
|
||||
}, a.retry, false, setRetryHeaderHTTP(req))
|
||||
opts := makeStorageOpts(false, a.retry, a.userProject)
|
||||
return a.c.tc.DeleteBucketACL(ctx, a.bucket, entity, opts...)
|
||||
}
|
||||
|
||||
func (a *ACLHandle) objectList(ctx context.Context) ([]ACLRule, error) {
|
||||
var acls *raw.ObjectAccessControls
|
||||
var err error
|
||||
req := a.c.raw.ObjectAccessControls.List(a.bucket, a.object)
|
||||
a.configureCall(ctx, req)
|
||||
err = run(ctx, func() error {
|
||||
acls, err = req.Do()
|
||||
return err
|
||||
}, a.retry, true, setRetryHeaderHTTP(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return toObjectACLRules(acls.Items), nil
|
||||
opts := makeStorageOpts(true, a.retry, a.userProject)
|
||||
return a.c.tc.ListObjectACLs(ctx, a.bucket, a.object, opts...)
|
||||
}
|
||||
|
||||
func (a *ACLHandle) objectSet(ctx context.Context, entity ACLEntity, role ACLRole, isBucketDefault bool) error {
|
||||
type setRequest interface {
|
||||
Do(opts ...googleapi.CallOption) (*raw.ObjectAccessControl, error)
|
||||
Header() http.Header
|
||||
}
|
||||
|
||||
acl := &raw.ObjectAccessControl{
|
||||
Bucket: a.bucket,
|
||||
Entity: string(entity),
|
||||
Role: string(role),
|
||||
}
|
||||
var req setRequest
|
||||
opts := makeStorageOpts(false, a.retry, a.userProject)
|
||||
if isBucketDefault {
|
||||
req = a.c.raw.DefaultObjectAccessControls.Update(a.bucket, string(entity), acl)
|
||||
} else {
|
||||
req = a.c.raw.ObjectAccessControls.Update(a.bucket, a.object, string(entity), acl)
|
||||
return a.c.tc.UpdateDefaultObjectACL(ctx, a.bucket, entity, role, opts...)
|
||||
}
|
||||
a.configureCall(ctx, req)
|
||||
return run(ctx, func() error {
|
||||
_, err := req.Do()
|
||||
return err
|
||||
}, a.retry, false, setRetryHeaderHTTP(req))
|
||||
return a.c.tc.UpdateObjectACL(ctx, a.bucket, a.object, entity, role, opts...)
|
||||
}
|
||||
|
||||
func (a *ACLHandle) objectDelete(ctx context.Context, entity ACLEntity) error {
|
||||
req := a.c.raw.ObjectAccessControls.Delete(a.bucket, a.object, string(entity))
|
||||
a.configureCall(ctx, req)
|
||||
return run(ctx, func() error {
|
||||
return req.Do()
|
||||
}, a.retry, false, setRetryHeaderHTTP(req))
|
||||
opts := makeStorageOpts(false, a.retry, a.userProject)
|
||||
return a.c.tc.DeleteObjectACL(ctx, a.bucket, a.object, entity, opts...)
|
||||
}
|
||||
|
||||
func (a *ACLHandle) configureCall(ctx context.Context, call interface{ Header() http.Header }) {
|
||||
|
|
663
vendor/cloud.google.com/go/storage/bucket.go
generated
vendored
663
vendor/cloud.google.com/go/storage/bucket.go
generated
vendored
|
@ -20,21 +20,20 @@ import (
|
|||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/compute/metadata"
|
||||
"cloud.google.com/go/internal/optional"
|
||||
"cloud.google.com/go/internal/trace"
|
||||
"github.com/googleapis/go-type-adapters/adapters"
|
||||
storagepb "cloud.google.com/go/storage/internal/apiv2/stubs"
|
||||
"google.golang.org/api/googleapi"
|
||||
"google.golang.org/api/iamcredentials/v1"
|
||||
"google.golang.org/api/iterator"
|
||||
"google.golang.org/api/option"
|
||||
raw "google.golang.org/api/storage/v1"
|
||||
"google.golang.org/genproto/googleapis/storage/v2"
|
||||
storagepb "google.golang.org/genproto/googleapis/storage/v2"
|
||||
dpb "google.golang.org/genproto/googleapis/type/date"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
|
@ -56,7 +55,8 @@ type BucketHandle struct {
|
|||
// The supplied name must contain only lowercase letters, numbers, dashes,
|
||||
// underscores, and dots. The full specification for valid bucket names can be
|
||||
// found at:
|
||||
// https://cloud.google.com/storage/docs/bucket-naming
|
||||
//
|
||||
// https://cloud.google.com/storage/docs/bucket-naming
|
||||
func (c *Client) Bucket(name string) *BucketHandle {
|
||||
retry := c.retry.clone()
|
||||
return &BucketHandle{
|
||||
|
@ -83,27 +83,11 @@ func (b *BucketHandle) Create(ctx context.Context, projectID string, attrs *Buck
|
|||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Create")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
|
||||
var bkt *raw.Bucket
|
||||
if attrs != nil {
|
||||
bkt = attrs.toRawBucket()
|
||||
} else {
|
||||
bkt = &raw.Bucket{}
|
||||
o := makeStorageOpts(true, b.retry, b.userProject)
|
||||
if _, err := b.c.tc.CreateBucket(ctx, projectID, b.name, attrs, o...); err != nil {
|
||||
return err
|
||||
}
|
||||
bkt.Name = b.name
|
||||
// If there is lifecycle information but no location, explicitly set
|
||||
// the location. This is a GCS quirk/bug.
|
||||
if bkt.Location == "" && bkt.Lifecycle != nil {
|
||||
bkt.Location = "US"
|
||||
}
|
||||
req := b.c.raw.Buckets.Insert(projectID, bkt)
|
||||
setClientHeader(req.Header())
|
||||
if attrs != nil && attrs.PredefinedACL != "" {
|
||||
req.PredefinedAcl(attrs.PredefinedACL)
|
||||
}
|
||||
if attrs != nil && attrs.PredefinedDefaultObjectACL != "" {
|
||||
req.PredefinedDefaultObjectAcl(attrs.PredefinedDefaultObjectACL)
|
||||
}
|
||||
return run(ctx, func() error { _, err := req.Context(ctx).Do(); return err }, b.retry, true, setRetryHeaderHTTP(req))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Delete deletes the Bucket.
|
||||
|
@ -111,24 +95,8 @@ func (b *BucketHandle) Delete(ctx context.Context) (err error) {
|
|||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Delete")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
|
||||
req, err := b.newDeleteCall()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return run(ctx, func() error { return req.Context(ctx).Do() }, b.retry, true, setRetryHeaderHTTP(req))
|
||||
}
|
||||
|
||||
func (b *BucketHandle) newDeleteCall() (*raw.BucketsDeleteCall, error) {
|
||||
req := b.c.raw.Buckets.Delete(b.name)
|
||||
setClientHeader(req.Header())
|
||||
if err := applyBucketConds("BucketHandle.Delete", b.conds, req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if b.userProject != "" {
|
||||
req.UserProject(b.userProject)
|
||||
}
|
||||
return req, nil
|
||||
o := makeStorageOpts(true, b.retry, b.userProject)
|
||||
return b.c.tc.DeleteBucket(ctx, b.name, b.conds, o...)
|
||||
}
|
||||
|
||||
// ACL returns an ACLHandle, which provides access to the bucket's access control list.
|
||||
|
@ -151,7 +119,8 @@ func (b *BucketHandle) DefaultObjectACL() *ACLHandle {
|
|||
//
|
||||
// name must consist entirely of valid UTF-8-encoded runes. The full specification
|
||||
// for valid object names can be found at:
|
||||
// https://cloud.google.com/storage/docs/naming-objects
|
||||
//
|
||||
// https://cloud.google.com/storage/docs/naming-objects
|
||||
func (b *BucketHandle) Object(name string) *ObjectHandle {
|
||||
retry := b.retry.clone()
|
||||
return &ObjectHandle{
|
||||
|
@ -176,35 +145,8 @@ func (b *BucketHandle) Attrs(ctx context.Context) (attrs *BucketAttrs, err error
|
|||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Attrs")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
|
||||
req, err := b.newGetCall()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var resp *raw.Bucket
|
||||
err = run(ctx, func() error {
|
||||
resp, err = req.Context(ctx).Do()
|
||||
return err
|
||||
}, b.retry, true, setRetryHeaderHTTP(req))
|
||||
var e *googleapi.Error
|
||||
if ok := errors.As(err, &e); ok && e.Code == http.StatusNotFound {
|
||||
return nil, ErrBucketNotExist
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newBucket(resp)
|
||||
}
|
||||
|
||||
func (b *BucketHandle) newGetCall() (*raw.BucketsGetCall, error) {
|
||||
req := b.c.raw.Buckets.Get(b.name).Projection("full")
|
||||
setClientHeader(req.Header())
|
||||
if err := applyBucketConds("BucketHandle.Attrs", b.conds, req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if b.userProject != "" {
|
||||
req.UserProject(b.userProject)
|
||||
}
|
||||
return req, nil
|
||||
o := makeStorageOpts(true, b.retry, b.userProject)
|
||||
return b.c.tc.GetBucket(ctx, b.name, b.conds, o...)
|
||||
}
|
||||
|
||||
// Update updates a bucket's attributes.
|
||||
|
@ -212,62 +154,23 @@ func (b *BucketHandle) Update(ctx context.Context, uattrs BucketAttrsToUpdate) (
|
|||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Create")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
|
||||
req, err := b.newPatchCall(&uattrs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if uattrs.PredefinedACL != "" {
|
||||
req.PredefinedAcl(uattrs.PredefinedACL)
|
||||
}
|
||||
if uattrs.PredefinedDefaultObjectACL != "" {
|
||||
req.PredefinedDefaultObjectAcl(uattrs.PredefinedDefaultObjectACL)
|
||||
}
|
||||
|
||||
isIdempotent := b.conds != nil && b.conds.MetagenerationMatch != 0
|
||||
|
||||
var rawBucket *raw.Bucket
|
||||
call := func() error {
|
||||
rb, err := req.Context(ctx).Do()
|
||||
rawBucket = rb
|
||||
return err
|
||||
}
|
||||
|
||||
if err := run(ctx, call, b.retry, isIdempotent, setRetryHeaderHTTP(req)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newBucket(rawBucket)
|
||||
}
|
||||
|
||||
func (b *BucketHandle) newPatchCall(uattrs *BucketAttrsToUpdate) (*raw.BucketsPatchCall, error) {
|
||||
rb := uattrs.toRawBucket()
|
||||
req := b.c.raw.Buckets.Patch(b.name, rb).Projection("full")
|
||||
setClientHeader(req.Header())
|
||||
if err := applyBucketConds("BucketHandle.Update", b.conds, req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if b.userProject != "" {
|
||||
req.UserProject(b.userProject)
|
||||
}
|
||||
return req, nil
|
||||
o := makeStorageOpts(isIdempotent, b.retry, b.userProject)
|
||||
return b.c.tc.UpdateBucket(ctx, b.name, &uattrs, b.conds, o...)
|
||||
}
|
||||
|
||||
// SignedURL returns a URL for the specified object. Signed URLs allow anyone
|
||||
// access to a restricted resource for a limited time without needing a
|
||||
// Google account or signing in. For more information about signed URLs, see
|
||||
// https://cloud.google.com/storage/docs/accesscontrol#signed_urls_query_string_authentication
|
||||
// access to a restricted resource for a limited time without needing a Google
|
||||
// account or signing in.
|
||||
// For more information about signed URLs, see "[Overview of access control]."
|
||||
//
|
||||
// This method only requires the Method and Expires fields in the specified
|
||||
// SignedURLOptions opts to be non-nil. If not provided, it attempts to fill the
|
||||
// GoogleAccessID and PrivateKey from the GOOGLE_APPLICATION_CREDENTIALS environment variable.
|
||||
// If you are authenticating with a custom HTTP client, Service Account based
|
||||
// auto-detection will be hindered.
|
||||
// This method requires the Method and Expires fields in the specified
|
||||
// SignedURLOptions to be non-nil. You may need to set the GoogleAccessID and
|
||||
// PrivateKey fields in some cases. Read more on the [automatic detection of credentials]
|
||||
// for this method.
|
||||
//
|
||||
// If no private key is found, it attempts to use the GoogleAccessID to sign the URL.
|
||||
// This requires the IAM Service Account Credentials API to be enabled
|
||||
// (https://console.developers.google.com/apis/api/iamcredentials.googleapis.com/overview)
|
||||
// and iam.serviceAccounts.signBlob permissions on the GoogleAccessID service account.
|
||||
// If you do not want these fields set for you, you may pass them in through opts or use
|
||||
// SignedURL(bucket, name string, opts *SignedURLOptions) instead.
|
||||
// [Overview of access control]: https://cloud.google.com/storage/docs/accesscontrol#signed_urls_query_string_authentication
|
||||
// [automatic detection of credentials]: https://pkg.go.dev/cloud.google.com/go/storage#hdr-Credential_requirements_for_signing
|
||||
func (b *BucketHandle) SignedURL(object string, opts *SignedURLOptions) (string, error) {
|
||||
if opts.GoogleAccessID != "" && (opts.SignBytes != nil || len(opts.PrivateKey) > 0) {
|
||||
return SignedURL(b.name, object, opts)
|
||||
|
@ -305,18 +208,11 @@ func (b *BucketHandle) SignedURL(object string, opts *SignedURLOptions) (string,
|
|||
// GenerateSignedPostPolicyV4 generates a PostPolicyV4 value from bucket, object and opts.
|
||||
// The generated URL and fields will then allow an unauthenticated client to perform multipart uploads.
|
||||
//
|
||||
// This method only requires the Expires field in the specified PostPolicyV4Options
|
||||
// to be non-nil. If not provided, it attempts to fill the GoogleAccessID and PrivateKey
|
||||
// from the GOOGLE_APPLICATION_CREDENTIALS environment variable.
|
||||
// If you are authenticating with a custom HTTP client, Service Account based
|
||||
// auto-detection will be hindered.
|
||||
// This method requires the Expires field in the specified PostPolicyV4Options
|
||||
// to be non-nil. You may need to set the GoogleAccessID and PrivateKey fields
|
||||
// in some cases. Read more on the [automatic detection of credentials] for this method.
|
||||
//
|
||||
// If no private key is found, it attempts to use the GoogleAccessID to sign the URL.
|
||||
// This requires the IAM Service Account Credentials API to be enabled
|
||||
// (https://console.developers.google.com/apis/api/iamcredentials.googleapis.com/overview)
|
||||
// and iam.serviceAccounts.signBlob permissions on the GoogleAccessID service account.
|
||||
// If you do not want these fields set for you, you may pass them in through opts or use
|
||||
// GenerateSignedPostPolicyV4(bucket, name string, opts *PostPolicyV4Options) instead.
|
||||
// [automatic detection of credentials]: https://pkg.go.dev/cloud.google.com/go/storage#hdr-Credential_requirements_for_signing
|
||||
func (b *BucketHandle) GenerateSignedPostPolicyV4(object string, opts *PostPolicyV4Options) (*PostPolicyV4, error) {
|
||||
if opts.GoogleAccessID != "" && (opts.SignRawBytes != nil || opts.SignBytes != nil || len(opts.PrivateKey) > 0) {
|
||||
return GenerateSignedPostPolicyV4(b.name, object, opts)
|
||||
|
@ -356,17 +252,27 @@ func (b *BucketHandle) detectDefaultGoogleAccessID() (string, error) {
|
|||
|
||||
if b.c.creds != nil && len(b.c.creds.JSON) > 0 {
|
||||
var sa struct {
|
||||
ClientEmail string `json:"client_email"`
|
||||
}
|
||||
err := json.Unmarshal(b.c.creds.JSON, &sa)
|
||||
if err == nil && sa.ClientEmail != "" {
|
||||
return sa.ClientEmail, nil
|
||||
} else if err != nil {
|
||||
returnErr = err
|
||||
} else {
|
||||
returnErr = errors.New("storage: empty client email in credentials")
|
||||
ClientEmail string `json:"client_email"`
|
||||
SAImpersonationURL string `json:"service_account_impersonation_url"`
|
||||
CredType string `json:"type"`
|
||||
}
|
||||
|
||||
err := json.Unmarshal(b.c.creds.JSON, &sa)
|
||||
if err != nil {
|
||||
returnErr = err
|
||||
} else if sa.CredType == "impersonated_service_account" {
|
||||
start, end := strings.LastIndex(sa.SAImpersonationURL, "/"), strings.LastIndex(sa.SAImpersonationURL, ":")
|
||||
|
||||
if end <= start {
|
||||
returnErr = errors.New("error parsing impersonated service account credentials")
|
||||
} else {
|
||||
return sa.SAImpersonationURL[start+1 : end], nil
|
||||
}
|
||||
} else if sa.CredType == "service_account" && sa.ClientEmail != "" {
|
||||
return sa.ClientEmail, nil
|
||||
} else {
|
||||
returnErr = errors.New("unable to parse credentials; only service_account and impersonated_service_account credentials are supported")
|
||||
}
|
||||
}
|
||||
|
||||
// Don't error out if we can't unmarshal, fallback to GCE check.
|
||||
|
@ -377,11 +283,11 @@ func (b *BucketHandle) detectDefaultGoogleAccessID() (string, error) {
|
|||
} else if err != nil {
|
||||
returnErr = err
|
||||
} else {
|
||||
returnErr = errors.New("got empty email from GCE metadata service")
|
||||
returnErr = errors.New("empty email from GCE metadata service")
|
||||
}
|
||||
|
||||
}
|
||||
return "", fmt.Errorf("storage: unable to detect default GoogleAccessID: %v", returnErr)
|
||||
return "", fmt.Errorf("storage: unable to detect default GoogleAccessID: %w. Please provide the GoogleAccessID or use a supported means for autodetecting it (see https://pkg.go.dev/cloud.google.com/go/storage#hdr-Credential_requirements_for_[BucketHandle.SignedURL]_and_[BucketHandle.GenerateSignedPostPolicyV4])", returnErr)
|
||||
}
|
||||
|
||||
func (b *BucketHandle) defaultSignBytesFunc(email string) func([]byte) ([]byte, error) {
|
||||
|
@ -392,18 +298,18 @@ func (b *BucketHandle) defaultSignBytesFunc(email string) func([]byte) ([]byte,
|
|||
// circumventing the cost of recreating the auth/transport layer
|
||||
svc, err := iamcredentials.NewService(ctx, option.WithHTTPClient(b.c.hc))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to create iamcredentials client: %v", err)
|
||||
return nil, fmt.Errorf("unable to create iamcredentials client: %w", err)
|
||||
}
|
||||
|
||||
resp, err := svc.Projects.ServiceAccounts.SignBlob(fmt.Sprintf("projects/-/serviceAccounts/%s", email), &iamcredentials.SignBlobRequest{
|
||||
Payload: base64.StdEncoding.EncodeToString(in),
|
||||
}).Do()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to sign bytes: %v", err)
|
||||
return nil, fmt.Errorf("unable to sign bytes: %w", err)
|
||||
}
|
||||
out, err := base64.StdEncoding.DecodeString(resp.SignedBlob)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to base64 decode response: %v", err)
|
||||
return nil, fmt.Errorf("unable to base64 decode response: %w", err)
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
@ -461,8 +367,13 @@ type BucketAttrs struct {
|
|||
PredefinedDefaultObjectACL string
|
||||
|
||||
// Location is the location of the bucket. It defaults to "US".
|
||||
// If specifying a dual-region, CustomPlacementConfig should be set in conjunction.
|
||||
Location string
|
||||
|
||||
// The bucket's custom placement configuration that holds a list of
|
||||
// regional locations for custom dual regions.
|
||||
CustomPlacementConfig *CustomPlacementConfig
|
||||
|
||||
// MetaGeneration is the metadata generation of the bucket.
|
||||
// This field is read-only.
|
||||
MetaGeneration int64
|
||||
|
@ -533,6 +444,11 @@ type BucketAttrs struct {
|
|||
// See https://cloud.google.com/storage/docs/managing-turbo-replication for
|
||||
// more information.
|
||||
RPO RPO
|
||||
|
||||
// Autoclass holds the bucket's autoclass configuration. If enabled,
|
||||
// allows for the automatic selection of the best storage class
|
||||
// based on object access patterns.
|
||||
Autoclass *Autoclass
|
||||
}
|
||||
|
||||
// BucketPolicyOnly is an alias for UniformBucketLevelAccess.
|
||||
|
@ -698,7 +614,12 @@ const (
|
|||
//
|
||||
// All configured conditions must be met for the associated action to be taken.
|
||||
type LifecycleCondition struct {
|
||||
// AllObjects is used to select all objects in a bucket by
|
||||
// setting AgeInDays to 0.
|
||||
AllObjects bool
|
||||
|
||||
// AgeInDays is the age of the object in days.
|
||||
// If you want to set AgeInDays to `0` use AllObjects set to `true`.
|
||||
AgeInDays int64
|
||||
|
||||
// CreatedBefore is the time the object was created.
|
||||
|
@ -716,10 +637,12 @@ type LifecycleCondition struct {
|
|||
|
||||
// DaysSinceCustomTime is the days elapsed since the CustomTime date of the
|
||||
// object. This condition can only be satisfied if CustomTime has been set.
|
||||
// Note: Using `0` as the value will be ignored by the library and not sent to the API.
|
||||
DaysSinceCustomTime int64
|
||||
|
||||
// DaysSinceNoncurrentTime is the days elapsed since the noncurrent timestamp
|
||||
// of the object. This condition is relevant only for versioned objects.
|
||||
// Note: Using `0` as the value will be ignored by the library and not sent to the API.
|
||||
DaysSinceNoncurrentTime int64
|
||||
|
||||
// Liveness specifies the object's liveness. Relevant only for versioned objects
|
||||
|
@ -751,6 +674,7 @@ type LifecycleCondition struct {
|
|||
// If the value is N, this condition is satisfied when there are at least N
|
||||
// versions (including the live version) newer than this version of the
|
||||
// object.
|
||||
// Note: Using `0` as the value will be ignored by the library and not sent to the API.
|
||||
NumNewerVersions int64
|
||||
}
|
||||
|
||||
|
@ -782,6 +706,29 @@ type BucketWebsite struct {
|
|||
NotFoundPage string
|
||||
}
|
||||
|
||||
// CustomPlacementConfig holds the bucket's custom placement
|
||||
// configuration for Custom Dual Regions. See
|
||||
// https://cloud.google.com/storage/docs/locations#location-dr for more information.
|
||||
type CustomPlacementConfig struct {
|
||||
// The list of regional locations in which data is placed.
|
||||
// Custom Dual Regions require exactly 2 regional locations.
|
||||
DataLocations []string
|
||||
}
|
||||
|
||||
// Autoclass holds the bucket's autoclass configuration. If enabled,
|
||||
// allows for the automatic selection of the best storage class
|
||||
// based on object access patterns. See
|
||||
// https://cloud.google.com/storage/docs/using-autoclass for more information.
|
||||
type Autoclass struct {
|
||||
// Enabled specifies whether the autoclass feature is enabled
|
||||
// on the bucket.
|
||||
Enabled bool
|
||||
// ToggleTime is the time from which Autoclass was last toggled.
|
||||
// If Autoclass is enabled when the bucket is created, the ToggleTime
|
||||
// is set to the bucket creation time. This field is read-only.
|
||||
ToggleTime time.Time
|
||||
}
|
||||
|
||||
func newBucket(b *raw.Bucket) (*BucketAttrs, error) {
|
||||
if b == nil {
|
||||
return nil, nil
|
||||
|
@ -815,6 +762,8 @@ func newBucket(b *raw.Bucket) (*BucketAttrs, error) {
|
|||
LocationType: b.LocationType,
|
||||
ProjectNumber: b.ProjectNumber,
|
||||
RPO: toRPO(b),
|
||||
CustomPlacementConfig: customPlacementFromRaw(b.CustomPlacementConfig),
|
||||
Autoclass: toAutoclassFromRaw(b.Autoclass),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -845,6 +794,9 @@ func newBucketFromProto(b *storagepb.Bucket) *BucketAttrs {
|
|||
PublicAccessPrevention: toPublicAccessPreventionFromProto(b.GetIamConfig()),
|
||||
LocationType: b.GetLocationType(),
|
||||
RPO: toRPOFromProto(b),
|
||||
CustomPlacementConfig: customPlacementFromProto(b.GetCustomPlacementConfig()),
|
||||
ProjectNumber: parseProjectNumber(b.GetProject()), // this can return 0 the project resource name is ID based
|
||||
Autoclass: toAutoclassFromProto(b.GetAutoclass()),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -882,22 +834,24 @@ func (b *BucketAttrs) toRawBucket() *raw.Bucket {
|
|||
}
|
||||
}
|
||||
return &raw.Bucket{
|
||||
Name: b.Name,
|
||||
Location: b.Location,
|
||||
StorageClass: b.StorageClass,
|
||||
Acl: toRawBucketACL(b.ACL),
|
||||
DefaultObjectAcl: toRawObjectACL(b.DefaultObjectACL),
|
||||
Versioning: v,
|
||||
Labels: labels,
|
||||
Billing: bb,
|
||||
Lifecycle: toRawLifecycle(b.Lifecycle),
|
||||
RetentionPolicy: b.RetentionPolicy.toRawRetentionPolicy(),
|
||||
Cors: toRawCORS(b.CORS),
|
||||
Encryption: b.Encryption.toRawBucketEncryption(),
|
||||
Logging: b.Logging.toRawBucketLogging(),
|
||||
Website: b.Website.toRawBucketWebsite(),
|
||||
IamConfiguration: bktIAM,
|
||||
Rpo: b.RPO.String(),
|
||||
Name: b.Name,
|
||||
Location: b.Location,
|
||||
StorageClass: b.StorageClass,
|
||||
Acl: toRawBucketACL(b.ACL),
|
||||
DefaultObjectAcl: toRawObjectACL(b.DefaultObjectACL),
|
||||
Versioning: v,
|
||||
Labels: labels,
|
||||
Billing: bb,
|
||||
Lifecycle: toRawLifecycle(b.Lifecycle),
|
||||
RetentionPolicy: b.RetentionPolicy.toRawRetentionPolicy(),
|
||||
Cors: toRawCORS(b.CORS),
|
||||
Encryption: b.Encryption.toRawBucketEncryption(),
|
||||
Logging: b.Logging.toRawBucketLogging(),
|
||||
Website: b.Website.toRawBucketWebsite(),
|
||||
IamConfiguration: bktIAM,
|
||||
Rpo: b.RPO.String(),
|
||||
CustomPlacementConfig: b.CustomPlacementConfig.toRawCustomPlacement(),
|
||||
Autoclass: b.Autoclass.toRawAutoclass(),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -924,7 +878,7 @@ func (b *BucketAttrs) toProtoBucket() *storagepb.Bucket {
|
|||
}
|
||||
var bb *storagepb.Bucket_Billing
|
||||
if b.RequesterPays {
|
||||
bb = &storage.Bucket_Billing{RequesterPays: true}
|
||||
bb = &storagepb.Bucket_Billing{RequesterPays: true}
|
||||
}
|
||||
var bktIAM *storagepb.Bucket_IamConfig
|
||||
if b.UniformBucketLevelAccess.Enabled || b.BucketPolicyOnly.Enabled || b.PublicAccessPrevention != PublicAccessPreventionUnknown {
|
||||
|
@ -940,22 +894,24 @@ func (b *BucketAttrs) toProtoBucket() *storagepb.Bucket {
|
|||
}
|
||||
|
||||
return &storagepb.Bucket{
|
||||
Name: b.Name,
|
||||
Location: b.Location,
|
||||
StorageClass: b.StorageClass,
|
||||
Acl: toProtoBucketACL(b.ACL),
|
||||
DefaultObjectAcl: toProtoObjectACL(b.DefaultObjectACL),
|
||||
Versioning: v,
|
||||
Labels: labels,
|
||||
Billing: bb,
|
||||
Lifecycle: toProtoLifecycle(b.Lifecycle),
|
||||
RetentionPolicy: b.RetentionPolicy.toProtoRetentionPolicy(),
|
||||
Cors: toProtoCORS(b.CORS),
|
||||
Encryption: b.Encryption.toProtoBucketEncryption(),
|
||||
Logging: b.Logging.toProtoBucketLogging(),
|
||||
Website: b.Website.toProtoBucketWebsite(),
|
||||
IamConfig: bktIAM,
|
||||
Rpo: b.RPO.String(),
|
||||
Name: b.Name,
|
||||
Location: b.Location,
|
||||
StorageClass: b.StorageClass,
|
||||
Acl: toProtoBucketACL(b.ACL),
|
||||
DefaultObjectAcl: toProtoObjectACL(b.DefaultObjectACL),
|
||||
Versioning: v,
|
||||
Labels: labels,
|
||||
Billing: bb,
|
||||
Lifecycle: toProtoLifecycle(b.Lifecycle),
|
||||
RetentionPolicy: b.RetentionPolicy.toProtoRetentionPolicy(),
|
||||
Cors: toProtoCORS(b.CORS),
|
||||
Encryption: b.Encryption.toProtoBucketEncryption(),
|
||||
Logging: b.Logging.toProtoBucketLogging(),
|
||||
Website: b.Website.toProtoBucketWebsite(),
|
||||
IamConfig: bktIAM,
|
||||
Rpo: b.RPO.String(),
|
||||
CustomPlacementConfig: b.CustomPlacementConfig.toProtoCustomPlacement(),
|
||||
Autoclass: b.Autoclass.toProtoAutoclass(),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -972,25 +928,32 @@ func (ua *BucketAttrsToUpdate) toProtoBucket() *storagepb.Bucket {
|
|||
}
|
||||
var bb *storagepb.Bucket_Billing
|
||||
if ua.RequesterPays != nil {
|
||||
bb = &storage.Bucket_Billing{RequesterPays: optional.ToBool(ua.RequesterPays)}
|
||||
bb = &storagepb.Bucket_Billing{RequesterPays: optional.ToBool(ua.RequesterPays)}
|
||||
}
|
||||
|
||||
var bktIAM *storagepb.Bucket_IamConfig
|
||||
var ublaEnabled bool
|
||||
var bktPolicyOnlyEnabled bool
|
||||
if ua.UniformBucketLevelAccess != nil {
|
||||
ublaEnabled = optional.ToBool(ua.UniformBucketLevelAccess.Enabled)
|
||||
}
|
||||
if ua.BucketPolicyOnly != nil {
|
||||
bktPolicyOnlyEnabled = optional.ToBool(ua.BucketPolicyOnly.Enabled)
|
||||
}
|
||||
if ublaEnabled || bktPolicyOnlyEnabled {
|
||||
bktIAM.UniformBucketLevelAccess = &storagepb.Bucket_IamConfig_UniformBucketLevelAccess{
|
||||
Enabled: true,
|
||||
if ua.UniformBucketLevelAccess != nil || ua.BucketPolicyOnly != nil || ua.PublicAccessPrevention != PublicAccessPreventionUnknown {
|
||||
bktIAM = &storagepb.Bucket_IamConfig{}
|
||||
|
||||
if ua.BucketPolicyOnly != nil {
|
||||
bktIAM.UniformBucketLevelAccess = &storagepb.Bucket_IamConfig_UniformBucketLevelAccess{
|
||||
Enabled: optional.ToBool(ua.BucketPolicyOnly.Enabled),
|
||||
}
|
||||
}
|
||||
|
||||
if ua.UniformBucketLevelAccess != nil {
|
||||
// UniformBucketLevelAccess takes precedence over BucketPolicyOnly,
|
||||
// so Enabled will be overriden here if both are set
|
||||
bktIAM.UniformBucketLevelAccess = &storagepb.Bucket_IamConfig_UniformBucketLevelAccess{
|
||||
Enabled: optional.ToBool(ua.UniformBucketLevelAccess.Enabled),
|
||||
}
|
||||
}
|
||||
|
||||
if ua.PublicAccessPrevention != PublicAccessPreventionUnknown {
|
||||
bktIAM.PublicAccessPrevention = ua.PublicAccessPrevention.String()
|
||||
}
|
||||
}
|
||||
if ua.PublicAccessPrevention != PublicAccessPreventionUnknown {
|
||||
bktIAM.PublicAccessPrevention = ua.PublicAccessPrevention.String()
|
||||
}
|
||||
|
||||
var defaultHold bool
|
||||
if ua.DefaultEventBasedHold != nil {
|
||||
defaultHold = optional.ToBool(ua.DefaultEventBasedHold)
|
||||
|
@ -1031,6 +994,7 @@ func (ua *BucketAttrsToUpdate) toProtoBucket() *storagepb.Bucket {
|
|||
Website: ua.Website.toProtoBucketWebsite(),
|
||||
IamConfig: bktIAM,
|
||||
Rpo: ua.RPO.String(),
|
||||
Autoclass: ua.Autoclass.toProtoAutoclass(),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1146,6 +1110,10 @@ type BucketAttrsToUpdate struct {
|
|||
// more information.
|
||||
RPO RPO
|
||||
|
||||
// If set, updates the autoclass configuration of the bucket.
|
||||
// See https://cloud.google.com/storage/docs/using-autoclass for more information.
|
||||
Autoclass *Autoclass
|
||||
|
||||
// acl is the list of access control rules on the bucket.
|
||||
// It is unexported and only used internally by the gRPC client.
|
||||
// Library users should use ACLHandle methods directly.
|
||||
|
@ -1259,6 +1227,12 @@ func (ua *BucketAttrsToUpdate) toRawBucket() *raw.Bucket {
|
|||
rb.Website = ua.Website.toRawBucketWebsite()
|
||||
}
|
||||
}
|
||||
if ua.Autoclass != nil {
|
||||
rb.Autoclass = &raw.BucketAutoclass{
|
||||
Enabled: ua.Autoclass.Enabled,
|
||||
ForceSendFields: []string{"Enabled"},
|
||||
}
|
||||
}
|
||||
if ua.PredefinedACL != "" {
|
||||
// Clear ACL or the call will fail.
|
||||
rb.Acl = nil
|
||||
|
@ -1347,15 +1321,8 @@ func (b *BucketHandle) UserProject(projectID string) *BucketHandle {
|
|||
// most customers. It might be changed in backwards-incompatible ways and is not
|
||||
// subject to any SLA or deprecation policy.
|
||||
func (b *BucketHandle) LockRetentionPolicy(ctx context.Context) error {
|
||||
var metageneration int64
|
||||
if b.conds != nil {
|
||||
metageneration = b.conds.MetagenerationMatch
|
||||
}
|
||||
req := b.c.raw.Buckets.LockRetentionPolicy(b.name, metageneration)
|
||||
return run(ctx, func() error {
|
||||
_, err := req.Context(ctx).Do()
|
||||
return err
|
||||
}, b.retry, true, setRetryHeaderHTTP(req))
|
||||
o := makeStorageOpts(true, b.retry, b.userProject)
|
||||
return b.c.tc.LockBucketRetentionPolicy(ctx, b.name, b.conds, o...)
|
||||
}
|
||||
|
||||
// applyBucketConds modifies the provided call using the conditions in conds.
|
||||
|
@ -1420,8 +1387,14 @@ func (rp *RetentionPolicy) toProtoRetentionPolicy() *storagepb.Bucket_RetentionP
|
|||
if rp == nil {
|
||||
return nil
|
||||
}
|
||||
// RetentionPeriod must be greater than 0, so if it is 0, the user left it
|
||||
// unset, and so we should not send it in the request i.e. nil is sent.
|
||||
var period *int64
|
||||
if rp.RetentionPeriod != 0 {
|
||||
period = proto.Int64(int64(rp.RetentionPeriod / time.Second))
|
||||
}
|
||||
return &storagepb.Bucket_RetentionPolicy{
|
||||
RetentionPeriod: int64(rp.RetentionPeriod / time.Second),
|
||||
RetentionPeriod: period,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1441,7 +1414,7 @@ func toRetentionPolicy(rp *raw.BucketRetentionPolicy) (*RetentionPolicy, error)
|
|||
}
|
||||
|
||||
func toRetentionPolicyFromProto(rp *storagepb.Bucket_RetentionPolicy) *RetentionPolicy {
|
||||
if rp == nil {
|
||||
if rp == nil || rp.GetEffectiveTime().AsTime().Unix() == 0 {
|
||||
return nil
|
||||
}
|
||||
return &RetentionPolicy{
|
||||
|
@ -1503,19 +1476,6 @@ func toCORSFromProto(rc []*storagepb.Bucket_Cors) []CORS {
|
|||
return out
|
||||
}
|
||||
|
||||
// Used to handle breaking change in Autogen Storage client OLM Age field
|
||||
// from int64 to *int64 gracefully in the manual client
|
||||
// TODO(#6240): Method should be removed once breaking change is made and introduced to this client
|
||||
func setAgeCondition(age int64, ageField interface{}) {
|
||||
c := reflect.ValueOf(ageField).Elem()
|
||||
switch c.Kind() {
|
||||
case reflect.Int64:
|
||||
c.SetInt(age)
|
||||
case reflect.Ptr:
|
||||
c.Set(reflect.ValueOf(&age))
|
||||
}
|
||||
}
|
||||
|
||||
func toRawLifecycle(l Lifecycle) *raw.BucketLifecycle {
|
||||
var rl raw.BucketLifecycle
|
||||
if len(l.Rules) == 0 {
|
||||
|
@ -1537,7 +1497,15 @@ func toRawLifecycle(l Lifecycle) *raw.BucketLifecycle {
|
|||
},
|
||||
}
|
||||
|
||||
setAgeCondition(r.Condition.AgeInDays, &rr.Condition.Age)
|
||||
// AllObjects takes precedent when both AllObjects and AgeInDays are set
|
||||
// Rationale: If you've opted into using AllObjects, it makes sense that you
|
||||
// understand the implications of how this option works with AgeInDays.
|
||||
if r.Condition.AllObjects {
|
||||
rr.Condition.Age = googleapi.Int64(0)
|
||||
rr.Condition.ForceSendFields = []string{"Age"}
|
||||
} else if r.Condition.AgeInDays > 0 {
|
||||
rr.Condition.Age = googleapi.Int64(r.Condition.AgeInDays)
|
||||
}
|
||||
|
||||
switch r.Condition.Liveness {
|
||||
case LiveAndArchived:
|
||||
|
@ -1586,6 +1554,11 @@ func toProtoLifecycle(l Lifecycle) *storagepb.Bucket_Lifecycle {
|
|||
},
|
||||
}
|
||||
|
||||
// TODO(#6205): This may not be needed for gRPC
|
||||
if r.Condition.AllObjects {
|
||||
rr.Condition.AgeDays = proto.Int32(0)
|
||||
}
|
||||
|
||||
switch r.Condition.Liveness {
|
||||
case LiveAndArchived:
|
||||
rr.Condition.IsLive = nil
|
||||
|
@ -1596,34 +1569,19 @@ func toProtoLifecycle(l Lifecycle) *storagepb.Bucket_Lifecycle {
|
|||
}
|
||||
|
||||
if !r.Condition.CreatedBefore.IsZero() {
|
||||
rr.Condition.CreatedBefore = adapters.TimeToProtoDate(r.Condition.CreatedBefore)
|
||||
rr.Condition.CreatedBefore = timeToProtoDate(r.Condition.CreatedBefore)
|
||||
}
|
||||
if !r.Condition.CustomTimeBefore.IsZero() {
|
||||
rr.Condition.CustomTimeBefore = adapters.TimeToProtoDate(r.Condition.CustomTimeBefore)
|
||||
rr.Condition.CustomTimeBefore = timeToProtoDate(r.Condition.CustomTimeBefore)
|
||||
}
|
||||
if !r.Condition.NoncurrentTimeBefore.IsZero() {
|
||||
rr.Condition.NoncurrentTimeBefore = adapters.TimeToProtoDate(r.Condition.NoncurrentTimeBefore)
|
||||
rr.Condition.NoncurrentTimeBefore = timeToProtoDate(r.Condition.NoncurrentTimeBefore)
|
||||
}
|
||||
rl.Rule = append(rl.Rule, rr)
|
||||
}
|
||||
return &rl
|
||||
}
|
||||
|
||||
// Used to handle breaking change in Autogen Storage client OLM Age field
|
||||
// from int64 to *int64 gracefully in the manual client
|
||||
// TODO(#6240): Method should be removed once breaking change is made and introduced to this client
|
||||
func getAgeCondition(ageField interface{}) int64 {
|
||||
v := reflect.ValueOf(ageField)
|
||||
if v.Kind() == reflect.Int64 {
|
||||
return v.Interface().(int64)
|
||||
} else if v.Kind() == reflect.Ptr {
|
||||
if val, ok := v.Interface().(*int64); ok {
|
||||
return *val
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func toLifecycle(rl *raw.BucketLifecycle) Lifecycle {
|
||||
var l Lifecycle
|
||||
if rl == nil {
|
||||
|
@ -1644,7 +1602,12 @@ func toLifecycle(rl *raw.BucketLifecycle) Lifecycle {
|
|||
NumNewerVersions: rr.Condition.NumNewerVersions,
|
||||
},
|
||||
}
|
||||
r.Condition.AgeInDays = getAgeCondition(rr.Condition.Age)
|
||||
if rr.Condition.Age != nil {
|
||||
r.Condition.AgeInDays = *rr.Condition.Age
|
||||
if *rr.Condition.Age == 0 {
|
||||
r.Condition.AllObjects = true
|
||||
}
|
||||
}
|
||||
|
||||
if rr.Condition.IsLive == nil {
|
||||
r.Condition.Liveness = LiveAndArchived
|
||||
|
@ -1690,6 +1653,11 @@ func toLifecycleFromProto(rl *storagepb.Bucket_Lifecycle) Lifecycle {
|
|||
},
|
||||
}
|
||||
|
||||
// TODO(#6205): This may not be needed for gRPC
|
||||
if rr.GetCondition().GetAgeDays() == 0 {
|
||||
r.Condition.AllObjects = true
|
||||
}
|
||||
|
||||
if rr.GetCondition().IsLive == nil {
|
||||
r.Condition.Liveness = LiveAndArchived
|
||||
} else if rr.GetCondition().GetIsLive() {
|
||||
|
@ -1699,13 +1667,13 @@ func toLifecycleFromProto(rl *storagepb.Bucket_Lifecycle) Lifecycle {
|
|||
}
|
||||
|
||||
if rr.GetCondition().GetCreatedBefore() != nil {
|
||||
r.Condition.CreatedBefore = adapters.ProtoDateToUTCTime(rr.GetCondition().GetCreatedBefore())
|
||||
r.Condition.CreatedBefore = protoDateToUTCTime(rr.GetCondition().GetCreatedBefore())
|
||||
}
|
||||
if rr.GetCondition().GetCustomTimeBefore() != nil {
|
||||
r.Condition.CustomTimeBefore = adapters.ProtoDateToUTCTime(rr.GetCondition().GetCustomTimeBefore())
|
||||
r.Condition.CustomTimeBefore = protoDateToUTCTime(rr.GetCondition().GetCustomTimeBefore())
|
||||
}
|
||||
if rr.GetCondition().GetNoncurrentTimeBefore() != nil {
|
||||
r.Condition.NoncurrentTimeBefore = adapters.ProtoDateToUTCTime(rr.GetCondition().GetNoncurrentTimeBefore())
|
||||
r.Condition.NoncurrentTimeBefore = protoDateToUTCTime(rr.GetCondition().GetNoncurrentTimeBefore())
|
||||
}
|
||||
l.Rules = append(l.Rules, r)
|
||||
}
|
||||
|
@ -1933,24 +1901,93 @@ func toRPOFromProto(b *storagepb.Bucket) RPO {
|
|||
}
|
||||
}
|
||||
|
||||
func customPlacementFromRaw(c *raw.BucketCustomPlacementConfig) *CustomPlacementConfig {
|
||||
if c == nil {
|
||||
return nil
|
||||
}
|
||||
return &CustomPlacementConfig{DataLocations: c.DataLocations}
|
||||
}
|
||||
|
||||
func (c *CustomPlacementConfig) toRawCustomPlacement() *raw.BucketCustomPlacementConfig {
|
||||
if c == nil {
|
||||
return nil
|
||||
}
|
||||
return &raw.BucketCustomPlacementConfig{
|
||||
DataLocations: c.DataLocations,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *CustomPlacementConfig) toProtoCustomPlacement() *storagepb.Bucket_CustomPlacementConfig {
|
||||
if c == nil {
|
||||
return nil
|
||||
}
|
||||
return &storagepb.Bucket_CustomPlacementConfig{
|
||||
DataLocations: c.DataLocations,
|
||||
}
|
||||
}
|
||||
|
||||
func customPlacementFromProto(c *storagepb.Bucket_CustomPlacementConfig) *CustomPlacementConfig {
|
||||
if c == nil {
|
||||
return nil
|
||||
}
|
||||
return &CustomPlacementConfig{DataLocations: c.GetDataLocations()}
|
||||
}
|
||||
|
||||
func (a *Autoclass) toRawAutoclass() *raw.BucketAutoclass {
|
||||
if a == nil {
|
||||
return nil
|
||||
}
|
||||
// Excluding read only field ToggleTime.
|
||||
return &raw.BucketAutoclass{
|
||||
Enabled: a.Enabled,
|
||||
}
|
||||
}
|
||||
|
||||
func (a *Autoclass) toProtoAutoclass() *storagepb.Bucket_Autoclass {
|
||||
if a == nil {
|
||||
return nil
|
||||
}
|
||||
// Excluding read only field ToggleTime.
|
||||
return &storagepb.Bucket_Autoclass{
|
||||
Enabled: a.Enabled,
|
||||
}
|
||||
}
|
||||
|
||||
func toAutoclassFromRaw(a *raw.BucketAutoclass) *Autoclass {
|
||||
if a == nil || a.ToggleTime == "" {
|
||||
return nil
|
||||
}
|
||||
// Return Autoclass.ToggleTime only if parsed with a valid value.
|
||||
t, err := time.Parse(time.RFC3339, a.ToggleTime)
|
||||
if err != nil {
|
||||
return &Autoclass{
|
||||
Enabled: a.Enabled,
|
||||
}
|
||||
}
|
||||
return &Autoclass{
|
||||
Enabled: a.Enabled,
|
||||
ToggleTime: t,
|
||||
}
|
||||
}
|
||||
|
||||
func toAutoclassFromProto(a *storagepb.Bucket_Autoclass) *Autoclass {
|
||||
if a == nil || a.GetToggleTime().AsTime().Unix() == 0 {
|
||||
return nil
|
||||
}
|
||||
return &Autoclass{
|
||||
Enabled: a.GetEnabled(),
|
||||
ToggleTime: a.GetToggleTime().AsTime(),
|
||||
}
|
||||
}
|
||||
|
||||
// Objects returns an iterator over the objects in the bucket that match the
|
||||
// Query q. If q is nil, no filtering is done. Objects will be iterated over
|
||||
// lexicographically by name.
|
||||
//
|
||||
// Note: The returned iterator is not safe for concurrent operations without explicit synchronization.
|
||||
func (b *BucketHandle) Objects(ctx context.Context, q *Query) *ObjectIterator {
|
||||
it := &ObjectIterator{
|
||||
ctx: ctx,
|
||||
bucket: b,
|
||||
}
|
||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(
|
||||
it.fetch,
|
||||
func() int { return len(it.items) },
|
||||
func() interface{} { b := it.items; it.items = nil; return b })
|
||||
if q != nil {
|
||||
it.query = *q
|
||||
}
|
||||
return it
|
||||
o := makeStorageOpts(true, b.retry, b.userProject)
|
||||
return b.c.tc.ListObjects(ctx, b.name, q, o...)
|
||||
}
|
||||
|
||||
// Retryer returns a bucket handle that is configured with custom retry
|
||||
|
@ -1985,7 +2022,6 @@ func (b *BucketHandle) Retryer(opts ...RetryOption) *BucketHandle {
|
|||
// Note: This iterator is not safe for concurrent operations without explicit synchronization.
|
||||
type ObjectIterator struct {
|
||||
ctx context.Context
|
||||
bucket *BucketHandle
|
||||
query Query
|
||||
pageInfo *iterator.PageInfo
|
||||
nextFunc func() error
|
||||
|
@ -2022,52 +2058,6 @@ func (it *ObjectIterator) Next() (*ObjectAttrs, error) {
|
|||
return item, nil
|
||||
}
|
||||
|
||||
func (it *ObjectIterator) fetch(pageSize int, pageToken string) (string, error) {
|
||||
req := it.bucket.c.raw.Objects.List(it.bucket.name)
|
||||
setClientHeader(req.Header())
|
||||
projection := it.query.Projection
|
||||
if projection == ProjectionDefault {
|
||||
projection = ProjectionFull
|
||||
}
|
||||
req.Projection(projection.String())
|
||||
req.Delimiter(it.query.Delimiter)
|
||||
req.Prefix(it.query.Prefix)
|
||||
req.StartOffset(it.query.StartOffset)
|
||||
req.EndOffset(it.query.EndOffset)
|
||||
req.Versions(it.query.Versions)
|
||||
req.IncludeTrailingDelimiter(it.query.IncludeTrailingDelimiter)
|
||||
if len(it.query.fieldSelection) > 0 {
|
||||
req.Fields("nextPageToken", googleapi.Field(it.query.fieldSelection))
|
||||
}
|
||||
req.PageToken(pageToken)
|
||||
if it.bucket.userProject != "" {
|
||||
req.UserProject(it.bucket.userProject)
|
||||
}
|
||||
if pageSize > 0 {
|
||||
req.MaxResults(int64(pageSize))
|
||||
}
|
||||
var resp *raw.Objects
|
||||
var err error
|
||||
err = run(it.ctx, func() error {
|
||||
resp, err = req.Context(it.ctx).Do()
|
||||
return err
|
||||
}, it.bucket.retry, true, setRetryHeaderHTTP(req))
|
||||
if err != nil {
|
||||
var e *googleapi.Error
|
||||
if ok := errors.As(err, &e); ok && e.Code == http.StatusNotFound {
|
||||
err = ErrBucketNotExist
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
for _, item := range resp.Items {
|
||||
it.items = append(it.items, newObject(item))
|
||||
}
|
||||
for _, prefix := range resp.Prefixes {
|
||||
it.items = append(it.items, &ObjectAttrs{Prefix: prefix})
|
||||
}
|
||||
return resp.NextPageToken, nil
|
||||
}
|
||||
|
||||
// Buckets returns an iterator over the buckets in the project. You may
|
||||
// optionally set the iterator's Prefix field to restrict the list to buckets
|
||||
// whose names begin with the prefix. By default, all buckets in the project
|
||||
|
@ -2075,17 +2065,8 @@ func (it *ObjectIterator) fetch(pageSize int, pageToken string) (string, error)
|
|||
//
|
||||
// Note: The returned iterator is not safe for concurrent operations without explicit synchronization.
|
||||
func (c *Client) Buckets(ctx context.Context, projectID string) *BucketIterator {
|
||||
it := &BucketIterator{
|
||||
ctx: ctx,
|
||||
client: c,
|
||||
projectID: projectID,
|
||||
}
|
||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(
|
||||
it.fetch,
|
||||
func() int { return len(it.buckets) },
|
||||
func() interface{} { b := it.buckets; it.buckets = nil; return b })
|
||||
|
||||
return it
|
||||
o := makeStorageOpts(true, c.retry, "")
|
||||
return c.tc.ListBuckets(ctx, projectID, o...)
|
||||
}
|
||||
|
||||
// A BucketIterator is an iterator over BucketAttrs.
|
||||
|
@ -2096,7 +2077,6 @@ type BucketIterator struct {
|
|||
Prefix string
|
||||
|
||||
ctx context.Context
|
||||
client *Client
|
||||
projectID string
|
||||
buckets []*BucketAttrs
|
||||
pageInfo *iterator.PageInfo
|
||||
|
@ -2122,36 +2102,6 @@ func (it *BucketIterator) Next() (*BucketAttrs, error) {
|
|||
// Note: This method is not safe for concurrent operations without explicit synchronization.
|
||||
func (it *BucketIterator) PageInfo() *iterator.PageInfo { return it.pageInfo }
|
||||
|
||||
// TODO: When the transport-agnostic client interface is integrated into the Veneer,
|
||||
// this method should be removed, and the iterator should be initialized by the
|
||||
// transport-specific client implementations.
|
||||
func (it *BucketIterator) fetch(pageSize int, pageToken string) (token string, err error) {
|
||||
req := it.client.raw.Buckets.List(it.projectID)
|
||||
setClientHeader(req.Header())
|
||||
req.Projection("full")
|
||||
req.Prefix(it.Prefix)
|
||||
req.PageToken(pageToken)
|
||||
if pageSize > 0 {
|
||||
req.MaxResults(int64(pageSize))
|
||||
}
|
||||
var resp *raw.Buckets
|
||||
err = run(it.ctx, func() error {
|
||||
resp, err = req.Context(it.ctx).Do()
|
||||
return err
|
||||
}, it.client.retry, true, setRetryHeaderHTTP(req))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
for _, item := range resp.Items {
|
||||
b, err := newBucket(item)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
it.buckets = append(it.buckets, b)
|
||||
}
|
||||
return resp.NextPageToken, nil
|
||||
}
|
||||
|
||||
// RPO (Recovery Point Objective) configures the turbo replication feature. See
|
||||
// https://cloud.google.com/storage/docs/managing-turbo-replication for more information.
|
||||
type RPO int
|
||||
|
@ -2187,3 +2137,28 @@ func (rpo RPO) String() string {
|
|||
return rpoUnknown
|
||||
}
|
||||
}
|
||||
|
||||
// protoDateToUTCTime returns a new Time based on the google.type.Date, in UTC.
|
||||
//
|
||||
// Hours, minutes, seconds, and nanoseconds are set to 0.
|
||||
func protoDateToUTCTime(d *dpb.Date) time.Time {
|
||||
return protoDateToTime(d, time.UTC)
|
||||
}
|
||||
|
||||
// protoDateToTime returns a new Time based on the google.type.Date and provided
|
||||
// *time.Location.
|
||||
//
|
||||
// Hours, minutes, seconds, and nanoseconds are set to 0.
|
||||
func protoDateToTime(d *dpb.Date, l *time.Location) time.Time {
|
||||
return time.Date(int(d.GetYear()), time.Month(d.GetMonth()), int(d.GetDay()), 0, 0, 0, 0, l)
|
||||
}
|
||||
|
||||
// timeToProtoDate returns a new google.type.Date based on the provided time.Time.
|
||||
// The location is ignored, as is anything more precise than the day.
|
||||
func timeToProtoDate(t time.Time) *dpb.Date {
|
||||
return &dpb.Date{
|
||||
Year: int32(t.Year()),
|
||||
Month: int32(t.Month()),
|
||||
Day: int32(t.Day()),
|
||||
}
|
||||
}
|
||||
|
|
85
vendor/cloud.google.com/go/storage/client.go
generated
vendored
85
vendor/cloud.google.com/go/storage/client.go
generated
vendored
|
@ -44,7 +44,7 @@ type storageClient interface {
|
|||
// Top-level methods.
|
||||
|
||||
GetServiceAccount(ctx context.Context, project string, opts ...storageOption) (string, error)
|
||||
CreateBucket(ctx context.Context, project string, attrs *BucketAttrs, opts ...storageOption) (*BucketAttrs, error)
|
||||
CreateBucket(ctx context.Context, project, bucket string, attrs *BucketAttrs, opts ...storageOption) (*BucketAttrs, error)
|
||||
ListBuckets(ctx context.Context, project string, opts ...storageOption) *BucketIterator
|
||||
Close() error
|
||||
|
||||
|
@ -66,19 +66,19 @@ type storageClient interface {
|
|||
|
||||
DeleteDefaultObjectACL(ctx context.Context, bucket string, entity ACLEntity, opts ...storageOption) error
|
||||
ListDefaultObjectACLs(ctx context.Context, bucket string, opts ...storageOption) ([]ACLRule, error)
|
||||
UpdateDefaultObjectACL(ctx context.Context, opts ...storageOption) (*ACLRule, error)
|
||||
UpdateDefaultObjectACL(ctx context.Context, bucket string, entity ACLEntity, role ACLRole, opts ...storageOption) error
|
||||
|
||||
// Bucket ACL methods.
|
||||
|
||||
DeleteBucketACL(ctx context.Context, bucket string, entity ACLEntity, opts ...storageOption) error
|
||||
ListBucketACLs(ctx context.Context, bucket string, opts ...storageOption) ([]ACLRule, error)
|
||||
UpdateBucketACL(ctx context.Context, bucket string, entity ACLEntity, role ACLRole, opts ...storageOption) (*ACLRule, error)
|
||||
UpdateBucketACL(ctx context.Context, bucket string, entity ACLEntity, role ACLRole, opts ...storageOption) error
|
||||
|
||||
// Object ACL methods.
|
||||
|
||||
DeleteObjectACL(ctx context.Context, bucket, object string, entity ACLEntity, opts ...storageOption) error
|
||||
ListObjectACLs(ctx context.Context, bucket, object string, opts ...storageOption) ([]ACLRule, error)
|
||||
UpdateObjectACL(ctx context.Context, bucket, object string, entity ACLEntity, role ACLRole, opts ...storageOption) (*ACLRule, error)
|
||||
UpdateObjectACL(ctx context.Context, bucket, object string, entity ACLEntity, role ACLRole, opts ...storageOption) error
|
||||
|
||||
// Media operations.
|
||||
|
||||
|
@ -96,11 +96,11 @@ type storageClient interface {
|
|||
|
||||
// HMAC Key methods.
|
||||
|
||||
GetHMACKey(ctx context.Context, desc *hmacKeyDesc, opts ...storageOption) (*HMACKey, error)
|
||||
ListHMACKey(ctx context.Context, desc *hmacKeyDesc, opts ...storageOption) *HMACKeysIterator
|
||||
UpdateHMACKey(ctx context.Context, desc *hmacKeyDesc, attrs *HMACKeyAttrsToUpdate, opts ...storageOption) (*HMACKey, error)
|
||||
CreateHMACKey(ctx context.Context, desc *hmacKeyDesc, opts ...storageOption) (*HMACKey, error)
|
||||
DeleteHMACKey(ctx context.Context, desc *hmacKeyDesc, opts ...storageOption) error
|
||||
GetHMACKey(ctx context.Context, project, accessID string, opts ...storageOption) (*HMACKey, error)
|
||||
ListHMACKeys(ctx context.Context, project, serviceAccountEmail string, showDeletedKeys bool, opts ...storageOption) *HMACKeysIterator
|
||||
UpdateHMACKey(ctx context.Context, project, serviceAccountEmail, accessID string, attrs *HMACKeyAttrsToUpdate, opts ...storageOption) (*HMACKey, error)
|
||||
CreateHMACKey(ctx context.Context, project, serviceAccountEmail string, opts ...storageOption) (*HMACKey, error)
|
||||
DeleteHMACKey(ctx context.Context, project, accessID string, opts ...storageOption) error
|
||||
|
||||
// Notification methods.
|
||||
ListNotifications(ctx context.Context, bucket string, opts ...storageOption) (map[string]*Notification, error)
|
||||
|
@ -162,6 +162,20 @@ func callSettings(defaults *settings, opts ...storageOption) *settings {
|
|||
return &cs
|
||||
}
|
||||
|
||||
// makeStorageOpts is a helper for generating a set of storageOption based on
|
||||
// idempotency, retryConfig, and userProject. All top-level client operations
|
||||
// will generally have to pass these options through the interface.
|
||||
func makeStorageOpts(isIdempotent bool, retry *retryConfig, userProject string) []storageOption {
|
||||
opts := []storageOption{idempotent(isIdempotent)}
|
||||
if retry != nil {
|
||||
opts = append(opts, withRetryConfig(retry))
|
||||
}
|
||||
if userProject != "" {
|
||||
opts = append(opts, withUserProject(userProject))
|
||||
}
|
||||
return opts
|
||||
}
|
||||
|
||||
// storageOption is the transport-agnostic call option for the storageClient
|
||||
// interface.
|
||||
type storageOption interface {
|
||||
|
@ -267,40 +281,53 @@ type openWriterParams struct {
|
|||
}
|
||||
|
||||
type newRangeReaderParams struct {
|
||||
bucket string
|
||||
conds *Conditions
|
||||
encryptionKey []byte
|
||||
gen int64
|
||||
length int64
|
||||
object string
|
||||
offset int64
|
||||
bucket string
|
||||
conds *Conditions
|
||||
encryptionKey []byte
|
||||
gen int64
|
||||
length int64
|
||||
object string
|
||||
offset int64
|
||||
readCompressed bool // Use accept-encoding: gzip. Only works for HTTP currently.
|
||||
}
|
||||
|
||||
type composeObjectRequest struct {
|
||||
dstBucket string
|
||||
dstObject string
|
||||
srcs []string
|
||||
dstObject destinationObject
|
||||
srcs []sourceObject
|
||||
predefinedACL string
|
||||
sendCRC32C bool
|
||||
}
|
||||
|
||||
type sourceObject struct {
|
||||
name string
|
||||
bucket string
|
||||
gen int64
|
||||
conds *Conditions
|
||||
predefinedACL string
|
||||
encryptionKey []byte
|
||||
}
|
||||
|
||||
type destinationObject struct {
|
||||
name string
|
||||
bucket string
|
||||
conds *Conditions
|
||||
attrs *ObjectAttrs // attrs to set on the destination object.
|
||||
encryptionKey []byte
|
||||
keyName string
|
||||
}
|
||||
|
||||
type rewriteObjectRequest struct {
|
||||
srcBucket string
|
||||
srcObject string
|
||||
dstBucket string
|
||||
dstObject string
|
||||
dstKeyName string
|
||||
attrs *ObjectAttrs
|
||||
gen int64
|
||||
conds *Conditions
|
||||
predefinedACL string
|
||||
token string
|
||||
srcObject sourceObject
|
||||
dstObject destinationObject
|
||||
predefinedACL string
|
||||
token string
|
||||
maxBytesRewrittenPerCall int64
|
||||
}
|
||||
|
||||
type rewriteObjectResponse struct {
|
||||
resource *ObjectAttrs
|
||||
done bool
|
||||
written int64
|
||||
size int64
|
||||
token string
|
||||
}
|
||||
|
|
153
vendor/cloud.google.com/go/storage/copy.go
generated
vendored
153
vendor/cloud.google.com/go/storage/copy.go
generated
vendored
|
@ -20,7 +20,6 @@ import (
|
|||
"fmt"
|
||||
|
||||
"cloud.google.com/go/internal/trace"
|
||||
raw "google.golang.org/api/storage/v1"
|
||||
)
|
||||
|
||||
// CopierFrom creates a Copier that can copy src to dst.
|
||||
|
@ -70,6 +69,15 @@ type Copier struct {
|
|||
DestinationKMSKeyName string
|
||||
|
||||
dst, src *ObjectHandle
|
||||
|
||||
// The maximum number of bytes that will be rewritten per rewrite request.
|
||||
// Most callers shouldn't need to specify this parameter - it is primarily
|
||||
// in place to support testing. If specified the value must be an integral
|
||||
// multiple of 1 MiB (1048576). Also, this only applies to requests where
|
||||
// the source and destination span locations and/or storage classes. Finally,
|
||||
// this value must not change across rewrite calls else you'll get an error
|
||||
// that the `rewriteToken` is invalid.
|
||||
maxBytesRewrittenPerCall int64
|
||||
}
|
||||
|
||||
// Run performs the copy.
|
||||
|
@ -86,69 +94,59 @@ func (c *Copier) Run(ctx context.Context) (attrs *ObjectAttrs, err error) {
|
|||
if c.DestinationKMSKeyName != "" && c.dst.encryptionKey != nil {
|
||||
return nil, errors.New("storage: cannot use DestinationKMSKeyName with a customer-supplied encryption key")
|
||||
}
|
||||
if c.dst.gen != defaultGen {
|
||||
return nil, fmt.Errorf("storage: generation cannot be specified on copy destination, got %v", c.dst.gen)
|
||||
}
|
||||
// Convert destination attributes to raw form, omitting the bucket.
|
||||
// If the bucket is included but name or content-type aren't, the service
|
||||
// returns a 400 with "Required" as the only message. Omitting the bucket
|
||||
// does not cause any problems.
|
||||
rawObject := c.ObjectAttrs.toRawObject("")
|
||||
req := &rewriteObjectRequest{
|
||||
srcObject: sourceObject{
|
||||
name: c.src.object,
|
||||
bucket: c.src.bucket,
|
||||
gen: c.src.gen,
|
||||
conds: c.src.conds,
|
||||
encryptionKey: c.src.encryptionKey,
|
||||
},
|
||||
dstObject: destinationObject{
|
||||
name: c.dst.object,
|
||||
bucket: c.dst.bucket,
|
||||
conds: c.dst.conds,
|
||||
attrs: &c.ObjectAttrs,
|
||||
encryptionKey: c.dst.encryptionKey,
|
||||
keyName: c.DestinationKMSKeyName,
|
||||
},
|
||||
predefinedACL: c.PredefinedACL,
|
||||
token: c.RewriteToken,
|
||||
maxBytesRewrittenPerCall: c.maxBytesRewrittenPerCall,
|
||||
}
|
||||
|
||||
isIdempotent := c.dst.conds != nil && (c.dst.conds.GenerationMatch != 0 || c.dst.conds.DoesNotExist)
|
||||
var userProject string
|
||||
if c.dst.userProject != "" {
|
||||
userProject = c.dst.userProject
|
||||
} else if c.src.userProject != "" {
|
||||
userProject = c.src.userProject
|
||||
}
|
||||
opts := makeStorageOpts(isIdempotent, c.dst.retry, userProject)
|
||||
|
||||
for {
|
||||
res, err := c.callRewrite(ctx, rawObject)
|
||||
res, err := c.dst.c.tc.RewriteObject(ctx, req, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.RewriteToken = res.token
|
||||
req.token = res.token
|
||||
if c.ProgressFunc != nil {
|
||||
c.ProgressFunc(uint64(res.TotalBytesRewritten), uint64(res.ObjectSize))
|
||||
c.ProgressFunc(uint64(res.written), uint64(res.size))
|
||||
}
|
||||
if res.Done { // Finished successfully.
|
||||
return newObject(res.Resource), nil
|
||||
if res.done { // Finished successfully.
|
||||
return res.resource, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Copier) callRewrite(ctx context.Context, rawObj *raw.Object) (*raw.RewriteResponse, error) {
|
||||
call := c.dst.c.raw.Objects.Rewrite(c.src.bucket, c.src.object, c.dst.bucket, c.dst.object, rawObj)
|
||||
|
||||
call.Context(ctx).Projection("full")
|
||||
if c.RewriteToken != "" {
|
||||
call.RewriteToken(c.RewriteToken)
|
||||
}
|
||||
if c.DestinationKMSKeyName != "" {
|
||||
call.DestinationKmsKeyName(c.DestinationKMSKeyName)
|
||||
}
|
||||
if c.PredefinedACL != "" {
|
||||
call.DestinationPredefinedAcl(c.PredefinedACL)
|
||||
}
|
||||
if err := applyConds("Copy destination", c.dst.gen, c.dst.conds, call); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if c.dst.userProject != "" {
|
||||
call.UserProject(c.dst.userProject)
|
||||
} else if c.src.userProject != "" {
|
||||
call.UserProject(c.src.userProject)
|
||||
}
|
||||
if err := applySourceConds(c.src.gen, c.src.conds, call); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := setEncryptionHeaders(call.Header(), c.dst.encryptionKey, false); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := setEncryptionHeaders(call.Header(), c.src.encryptionKey, true); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var res *raw.RewriteResponse
|
||||
var err error
|
||||
setClientHeader(call.Header())
|
||||
|
||||
retryCall := func() error { res, err = call.Do(); return err }
|
||||
isIdempotent := c.dst.conds != nil && (c.dst.conds.GenerationMatch != 0 || c.dst.conds.DoesNotExist)
|
||||
|
||||
if err := run(ctx, retryCall, c.dst.retry, isIdempotent, setRetryHeaderHTTP(call)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.RewriteToken = res.RewriteToken
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// ComposerFrom creates a Composer that can compose srcs into dst.
|
||||
// You can immediately call Run on the returned Composer, or you can
|
||||
// configure it first.
|
||||
|
@ -188,17 +186,13 @@ func (c *Composer) Run(ctx context.Context) (attrs *ObjectAttrs, err error) {
|
|||
if err := c.dst.validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if c.dst.gen != defaultGen {
|
||||
return nil, fmt.Errorf("storage: generation cannot be specified on compose destination, got %v", c.dst.gen)
|
||||
}
|
||||
if len(c.srcs) == 0 {
|
||||
return nil, errors.New("storage: at least one source object must be specified")
|
||||
}
|
||||
|
||||
req := &raw.ComposeRequest{}
|
||||
// Compose requires a non-empty Destination, so we always set it,
|
||||
// even if the caller-provided ObjectAttrs is the zero value.
|
||||
req.Destination = c.ObjectAttrs.toRawObject(c.dst.bucket)
|
||||
if c.SendCRC32C {
|
||||
req.Destination.Crc32c = encodeUint32(c.ObjectAttrs.CRC32C)
|
||||
}
|
||||
for _, src := range c.srcs {
|
||||
if err := src.validate(); err != nil {
|
||||
return nil, err
|
||||
|
@ -209,36 +203,31 @@ func (c *Composer) Run(ctx context.Context) (attrs *ObjectAttrs, err error) {
|
|||
if src.encryptionKey != nil {
|
||||
return nil, fmt.Errorf("storage: compose source %s.%s must not have encryption key", src.bucket, src.object)
|
||||
}
|
||||
srcObj := &raw.ComposeRequestSourceObjects{
|
||||
Name: src.object,
|
||||
}
|
||||
if err := applyConds("ComposeFrom source", src.gen, src.conds, composeSourceObj{srcObj}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.SourceObjects = append(req.SourceObjects, srcObj)
|
||||
}
|
||||
|
||||
call := c.dst.c.raw.Objects.Compose(c.dst.bucket, c.dst.object, req).Context(ctx)
|
||||
if err := applyConds("ComposeFrom destination", c.dst.gen, c.dst.conds, call); err != nil {
|
||||
return nil, err
|
||||
req := &composeObjectRequest{
|
||||
dstBucket: c.dst.bucket,
|
||||
predefinedACL: c.PredefinedACL,
|
||||
sendCRC32C: c.SendCRC32C,
|
||||
}
|
||||
if c.dst.userProject != "" {
|
||||
call.UserProject(c.dst.userProject)
|
||||
req.dstObject = destinationObject{
|
||||
name: c.dst.object,
|
||||
bucket: c.dst.bucket,
|
||||
conds: c.dst.conds,
|
||||
attrs: &c.ObjectAttrs,
|
||||
encryptionKey: c.dst.encryptionKey,
|
||||
}
|
||||
if c.PredefinedACL != "" {
|
||||
call.DestinationPredefinedAcl(c.PredefinedACL)
|
||||
for _, src := range c.srcs {
|
||||
s := sourceObject{
|
||||
name: src.object,
|
||||
bucket: src.bucket,
|
||||
gen: src.gen,
|
||||
conds: src.conds,
|
||||
}
|
||||
req.srcs = append(req.srcs, s)
|
||||
}
|
||||
if err := setEncryptionHeaders(call.Header(), c.dst.encryptionKey, false); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var obj *raw.Object
|
||||
setClientHeader(call.Header())
|
||||
|
||||
retryCall := func() error { obj, err = call.Do(); return err }
|
||||
isIdempotent := c.dst.conds != nil && (c.dst.conds.GenerationMatch != 0 || c.dst.conds.DoesNotExist)
|
||||
|
||||
if err := run(ctx, retryCall, c.dst.retry, isIdempotent, setRetryHeaderHTTP(call)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newObject(obj), nil
|
||||
opts := makeStorageOpts(isIdempotent, c.dst.retry, c.dst.userProject)
|
||||
return c.dst.c.tc.ComposeObject(ctx, req, opts...)
|
||||
}
|
||||
|
|
321
vendor/cloud.google.com/go/storage/doc.go
generated
vendored
321
vendor/cloud.google.com/go/storage/doc.go
generated
vendored
|
@ -22,185 +22,183 @@ https://cloud.google.com/storage/docs.
|
|||
See https://pkg.go.dev/cloud.google.com/go for authentication, timeouts,
|
||||
connection pooling and similar aspects of this package.
|
||||
|
||||
# Creating a Client
|
||||
|
||||
Creating a Client
|
||||
To start working with this package, create a [Client]:
|
||||
|
||||
To start working with this package, create a client:
|
||||
|
||||
ctx := context.Background()
|
||||
client, err := storage.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
ctx := context.Background()
|
||||
client, err := storage.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
The client will use your default application credentials. Clients should be
|
||||
reused instead of created as needed. The methods of Client are safe for
|
||||
reused instead of created as needed. The methods of [Client] are safe for
|
||||
concurrent use by multiple goroutines.
|
||||
|
||||
If you only wish to access public data, you can create
|
||||
an unauthenticated client with
|
||||
|
||||
client, err := storage.NewClient(ctx, option.WithoutAuthentication())
|
||||
client, err := storage.NewClient(ctx, option.WithoutAuthentication())
|
||||
|
||||
To use an emulator with this library, you can set the STORAGE_EMULATOR_HOST
|
||||
environment variable to the address at which your emulator is running. This will
|
||||
send requests to that address instead of to Cloud Storage. You can then create
|
||||
and use a client as usual:
|
||||
|
||||
// Set STORAGE_EMULATOR_HOST environment variable.
|
||||
err := os.Setenv("STORAGE_EMULATOR_HOST", "localhost:9000")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// Set STORAGE_EMULATOR_HOST environment variable.
|
||||
err := os.Setenv("STORAGE_EMULATOR_HOST", "localhost:9000")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
// Create client as usual.
|
||||
client, err := storage.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// Create client as usual.
|
||||
client, err := storage.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
// This request is now directed to http://localhost:9000/storage/v1/b
|
||||
// instead of https://storage.googleapis.com/storage/v1/b
|
||||
if err := client.Bucket("my-bucket").Create(ctx, projectID, nil); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// This request is now directed to http://localhost:9000/storage/v1/b
|
||||
// instead of https://storage.googleapis.com/storage/v1/b
|
||||
if err := client.Bucket("my-bucket").Create(ctx, projectID, nil); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
Please note that there is no official emulator for Cloud Storage.
|
||||
|
||||
Buckets
|
||||
# Buckets
|
||||
|
||||
A Google Cloud Storage bucket is a collection of objects. To work with a
|
||||
bucket, make a bucket handle:
|
||||
|
||||
bkt := client.Bucket(bucketName)
|
||||
bkt := client.Bucket(bucketName)
|
||||
|
||||
A handle is a reference to a bucket. You can have a handle even if the
|
||||
bucket doesn't exist yet. To create a bucket in Google Cloud Storage,
|
||||
call Create on the handle:
|
||||
call [BucketHandle.Create]:
|
||||
|
||||
if err := bkt.Create(ctx, projectID, nil); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
if err := bkt.Create(ctx, projectID, nil); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
Note that although buckets are associated with projects, bucket names are
|
||||
global across all projects.
|
||||
|
||||
Each bucket has associated metadata, represented in this package by
|
||||
BucketAttrs. The third argument to BucketHandle.Create allows you to set
|
||||
the initial BucketAttrs of a bucket. To retrieve a bucket's attributes, use
|
||||
Attrs:
|
||||
[BucketAttrs]. The third argument to [BucketHandle.Create] allows you to set
|
||||
the initial [BucketAttrs] of a bucket. To retrieve a bucket's attributes, use
|
||||
[BucketHandle.Attrs]:
|
||||
|
||||
attrs, err := bkt.Attrs(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Printf("bucket %s, created at %s, is located in %s with storage class %s\n",
|
||||
attrs.Name, attrs.Created, attrs.Location, attrs.StorageClass)
|
||||
attrs, err := bkt.Attrs(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Printf("bucket %s, created at %s, is located in %s with storage class %s\n",
|
||||
attrs.Name, attrs.Created, attrs.Location, attrs.StorageClass)
|
||||
|
||||
Objects
|
||||
# Objects
|
||||
|
||||
An object holds arbitrary data as a sequence of bytes, like a file. You
|
||||
refer to objects using a handle, just as with buckets, but unlike buckets
|
||||
you don't explicitly create an object. Instead, the first time you write
|
||||
to an object it will be created. You can use the standard Go io.Reader
|
||||
and io.Writer interfaces to read and write object data:
|
||||
to an object it will be created. You can use the standard Go [io.Reader]
|
||||
and [io.Writer] interfaces to read and write object data:
|
||||
|
||||
obj := bkt.Object("data")
|
||||
// Write something to obj.
|
||||
// w implements io.Writer.
|
||||
w := obj.NewWriter(ctx)
|
||||
// Write some text to obj. This will either create the object or overwrite whatever is there already.
|
||||
if _, err := fmt.Fprintf(w, "This object contains text.\n"); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// Close, just like writing a file.
|
||||
if err := w.Close(); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
obj := bkt.Object("data")
|
||||
// Write something to obj.
|
||||
// w implements io.Writer.
|
||||
w := obj.NewWriter(ctx)
|
||||
// Write some text to obj. This will either create the object or overwrite whatever is there already.
|
||||
if _, err := fmt.Fprintf(w, "This object contains text.\n"); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// Close, just like writing a file.
|
||||
if err := w.Close(); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
// Read it back.
|
||||
r, err := obj.NewReader(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
defer r.Close()
|
||||
if _, err := io.Copy(os.Stdout, r); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// Prints "This object contains text."
|
||||
// Read it back.
|
||||
r, err := obj.NewReader(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
defer r.Close()
|
||||
if _, err := io.Copy(os.Stdout, r); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// Prints "This object contains text."
|
||||
|
||||
Objects also have attributes, which you can fetch with Attrs:
|
||||
Objects also have attributes, which you can fetch with [ObjectHandle.Attrs]:
|
||||
|
||||
objAttrs, err := obj.Attrs(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Printf("object %s has size %d and can be read using %s\n",
|
||||
objAttrs.Name, objAttrs.Size, objAttrs.MediaLink)
|
||||
objAttrs, err := obj.Attrs(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Printf("object %s has size %d and can be read using %s\n",
|
||||
objAttrs.Name, objAttrs.Size, objAttrs.MediaLink)
|
||||
|
||||
Listing objects
|
||||
# Listing objects
|
||||
|
||||
Listing objects in a bucket is done with the Bucket.Objects method:
|
||||
Listing objects in a bucket is done with the [BucketHandle.Objects] method:
|
||||
|
||||
query := &storage.Query{Prefix: ""}
|
||||
query := &storage.Query{Prefix: ""}
|
||||
|
||||
var names []string
|
||||
it := bkt.Objects(ctx, query)
|
||||
for {
|
||||
attrs, err := it.Next()
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
names = append(names, attrs.Name)
|
||||
}
|
||||
var names []string
|
||||
it := bkt.Objects(ctx, query)
|
||||
for {
|
||||
attrs, err := it.Next()
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
names = append(names, attrs.Name)
|
||||
}
|
||||
|
||||
Objects are listed lexicographically by name. To filter objects
|
||||
lexicographically, Query.StartOffset and/or Query.EndOffset can be used:
|
||||
lexicographically, [Query.StartOffset] and/or [Query.EndOffset] can be used:
|
||||
|
||||
query := &storage.Query{
|
||||
Prefix: "",
|
||||
StartOffset: "bar/", // Only list objects lexicographically >= "bar/"
|
||||
EndOffset: "foo/", // Only list objects lexicographically < "foo/"
|
||||
}
|
||||
query := &storage.Query{
|
||||
Prefix: "",
|
||||
StartOffset: "bar/", // Only list objects lexicographically >= "bar/"
|
||||
EndOffset: "foo/", // Only list objects lexicographically < "foo/"
|
||||
}
|
||||
|
||||
// ... as before
|
||||
// ... as before
|
||||
|
||||
If only a subset of object attributes is needed when listing, specifying this
|
||||
subset using Query.SetAttrSelection may speed up the listing process:
|
||||
subset using [Query.SetAttrSelection] may speed up the listing process:
|
||||
|
||||
query := &storage.Query{Prefix: ""}
|
||||
query.SetAttrSelection([]string{"Name"})
|
||||
query := &storage.Query{Prefix: ""}
|
||||
query.SetAttrSelection([]string{"Name"})
|
||||
|
||||
// ... as before
|
||||
// ... as before
|
||||
|
||||
ACLs
|
||||
# ACLs
|
||||
|
||||
Both objects and buckets have ACLs (Access Control Lists). An ACL is a list of
|
||||
ACLRules, each of which specifies the role of a user, group or project. ACLs
|
||||
are suitable for fine-grained control, but you may prefer using IAM to control
|
||||
access at the project level (see
|
||||
https://cloud.google.com/storage/docs/access-control/iam).
|
||||
access at the project level (see [Cloud Storage IAM docs].
|
||||
|
||||
To list the ACLs of a bucket or object, obtain an ACLHandle and call its List method:
|
||||
To list the ACLs of a bucket or object, obtain an [ACLHandle] and call [ACLHandle.List]:
|
||||
|
||||
acls, err := obj.ACL().List(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
for _, rule := range acls {
|
||||
fmt.Printf("%s has role %s\n", rule.Entity, rule.Role)
|
||||
}
|
||||
acls, err := obj.ACL().List(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
for _, rule := range acls {
|
||||
fmt.Printf("%s has role %s\n", rule.Entity, rule.Role)
|
||||
}
|
||||
|
||||
You can also set and delete ACLs.
|
||||
|
||||
Conditions
|
||||
# Conditions
|
||||
|
||||
Every object has a generation and a metageneration. The generation changes
|
||||
whenever the content changes, and the metageneration changes whenever the
|
||||
metadata changes. Conditions let you check these values before an operation;
|
||||
metadata changes. [Conditions] let you check these values before an operation;
|
||||
the operation only executes if the conditions match. You can use conditions to
|
||||
prevent race conditions in read-modify-write operations.
|
||||
|
||||
|
@ -208,60 +206,81 @@ For example, say you've read an object's metadata into objAttrs. Now
|
|||
you want to write to that object, but only if its contents haven't changed
|
||||
since you read it. Here is how to express that:
|
||||
|
||||
w = obj.If(storage.Conditions{GenerationMatch: objAttrs.Generation}).NewWriter(ctx)
|
||||
// Proceed with writing as above.
|
||||
w = obj.If(storage.Conditions{GenerationMatch: objAttrs.Generation}).NewWriter(ctx)
|
||||
// Proceed with writing as above.
|
||||
|
||||
Signed URLs
|
||||
# Signed URLs
|
||||
|
||||
You can obtain a URL that lets anyone read or write an object for a limited time.
|
||||
Signing a URL requires credentials authorized to sign a URL. To use the same
|
||||
authentication that was used when instantiating the Storage client, use the
|
||||
BucketHandle.SignedURL method.
|
||||
authentication that was used when instantiating the Storage client, use
|
||||
[BucketHandle.SignedURL].
|
||||
|
||||
url, err := client.Bucket(bucketName).SignedURL(objectName, opts)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(url)
|
||||
url, err := client.Bucket(bucketName).SignedURL(objectName, opts)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(url)
|
||||
|
||||
You can also sign a URL wihout creating a client. See the documentation of
|
||||
SignedURL for details.
|
||||
You can also sign a URL without creating a client. See the documentation of
|
||||
[SignedURL] for details.
|
||||
|
||||
url, err := storage.SignedURL(bucketName, "shared-object", opts)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(url)
|
||||
url, err := storage.SignedURL(bucketName, "shared-object", opts)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(url)
|
||||
|
||||
Post Policy V4 Signed Request
|
||||
# Post Policy V4 Signed Request
|
||||
|
||||
A type of signed request that allows uploads through HTML forms directly to Cloud Storage with
|
||||
temporary permission. Conditions can be applied to restrict how the HTML form is used and exercised
|
||||
by a user.
|
||||
|
||||
For more information, please see https://cloud.google.com/storage/docs/xml-api/post-object as well
|
||||
as the documentation of BucketHandle.GenerateSignedPostPolicyV4.
|
||||
For more information, please see the [XML POST Object docs] as well
|
||||
as the documentation of [BucketHandle.GenerateSignedPostPolicyV4].
|
||||
|
||||
pv4, err := client.Bucket(bucketName).GenerateSignedPostPolicyV4(objectName, opts)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Printf("URL: %s\nFields; %v\n", pv4.URL, pv4.Fields)
|
||||
pv4, err := client.Bucket(bucketName).GenerateSignedPostPolicyV4(objectName, opts)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Printf("URL: %s\nFields; %v\n", pv4.URL, pv4.Fields)
|
||||
|
||||
Errors
|
||||
# Credential requirements for signing
|
||||
|
||||
Errors returned by this client are often of the type googleapi.Error.
|
||||
These errors can be introspected for more information by using errors.As
|
||||
with the richer googleapi.Error type. For example:
|
||||
If the GoogleAccessID and PrivateKey option fields are not provided, they will
|
||||
be automatically detected by [BucketHandle.SignedURL] and
|
||||
[BucketHandle.GenerateSignedPostPolicyV4] if any of the following are true:
|
||||
- you are authenticated to the Storage Client with a service account's
|
||||
downloaded private key, either directly in code or by setting the
|
||||
GOOGLE_APPLICATION_CREDENTIALS environment variable (see [Other Environments]),
|
||||
- your application is running on Google Compute Engine (GCE), or
|
||||
- you are logged into [gcloud using application default credentials]
|
||||
with [impersonation enabled].
|
||||
|
||||
Detecting GoogleAccessID may not be possible if you are authenticated using a
|
||||
token source or using [option.WithHTTPClient]. In this case, you can provide a
|
||||
service account email for GoogleAccessID and the client will attempt to sign
|
||||
the URL or Post Policy using that service account.
|
||||
|
||||
To generate the signature, you must have:
|
||||
- iam.serviceAccounts.signBlob permissions on the GoogleAccessID service
|
||||
account, and
|
||||
- the [IAM Service Account Credentials API] enabled (unless authenticating
|
||||
with a downloaded private key).
|
||||
|
||||
# Errors
|
||||
|
||||
Errors returned by this client are often of the type [googleapi.Error].
|
||||
These errors can be introspected for more information by using [errors.As]
|
||||
with the richer [googleapi.Error] type. For example:
|
||||
|
||||
var e *googleapi.Error
|
||||
if ok := errors.As(err, &e); ok {
|
||||
if e.Code == 409 { ... }
|
||||
}
|
||||
|
||||
See https://pkg.go.dev/google.golang.org/api/googleapi#Error for more information.
|
||||
|
||||
Retrying failed requests
|
||||
# Retrying failed requests
|
||||
|
||||
Methods in this package may retry calls that fail with transient errors.
|
||||
Retrying continues indefinitely unless the controlling context is canceled, the
|
||||
|
@ -271,12 +290,12 @@ continuing, use context timeouts or cancellation.
|
|||
The retry strategy in this library follows best practices for Cloud Storage. By
|
||||
default, operations are retried only if they are idempotent, and exponential
|
||||
backoff with jitter is employed. In addition, errors are only retried if they
|
||||
are defined as transient by the service. See
|
||||
https://cloud.google.com/storage/docs/retry-strategy for more information.
|
||||
are defined as transient by the service. See the [Cloud Storage retry docs]
|
||||
for more information.
|
||||
|
||||
Users can configure non-default retry behavior for a single library call (using
|
||||
BucketHandle.Retryer and ObjectHandle.Retryer) or for all calls made by a
|
||||
client (using Client.SetRetry). For example:
|
||||
[BucketHandle.Retryer] and [ObjectHandle.Retryer]) or for all calls made by a
|
||||
client (using [Client.SetRetry]). For example:
|
||||
|
||||
o := client.Bucket(bucket).Object(object).Retryer(
|
||||
// Use WithBackoff to change the timing of the exponential backoff.
|
||||
|
@ -297,5 +316,13 @@ client (using Client.SetRetry). For example:
|
|||
if err := o.Delete(ctx); err != nil {
|
||||
// Handle err.
|
||||
}
|
||||
|
||||
[Cloud Storage IAM docs]: https://cloud.google.com/storage/docs/access-control/iam
|
||||
[XML POST Object docs]: https://cloud.google.com/storage/docs/xml-api/post-object
|
||||
[Cloud Storage retry docs]: https://cloud.google.com/storage/docs/retry-strategy
|
||||
[Other Environments]: https://cloud.google.com/storage/docs/authentication#libauth
|
||||
[gcloud using application default credentials]: https://cloud.google.com/sdk/gcloud/reference/auth/application-default/login
|
||||
[impersonation enabled]: https://cloud.google.com/sdk/gcloud/reference#--impersonate-service-account
|
||||
[IAM Service Account Credentials API]: https://console.developers.google.com/apis/api/iamcredentials.googleapis.com/overview
|
||||
*/
|
||||
package storage // import "cloud.google.com/go/storage"
|
||||
|
|
509
vendor/cloud.google.com/go/storage/grpc_client.go
generated
vendored
509
vendor/cloud.google.com/go/storage/grpc_client.go
generated
vendored
|
@ -16,16 +16,20 @@ package storage
|
|||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
"os"
|
||||
|
||||
"cloud.google.com/go/internal/trace"
|
||||
gapic "cloud.google.com/go/storage/internal/apiv2"
|
||||
storagepb "cloud.google.com/go/storage/internal/apiv2/stubs"
|
||||
"github.com/googleapis/gax-go/v2"
|
||||
"google.golang.org/api/iterator"
|
||||
"google.golang.org/api/option"
|
||||
"google.golang.org/api/option/internaloption"
|
||||
iampb "google.golang.org/genproto/googleapis/iam/v1"
|
||||
storagepb "google.golang.org/genproto/googleapis/storage/v2"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/metadata"
|
||||
|
@ -43,10 +47,21 @@ const (
|
|||
// This is only used for the gRPC client.
|
||||
defaultConnPoolSize = 4
|
||||
|
||||
// maxPerMessageWriteSize is the maximum amount of content that can be sent
|
||||
// per WriteObjectRequest message. A buffer reaching this amount will
|
||||
// precipitate a flush of the buffer. It is only used by the gRPC Writer
|
||||
// implementation.
|
||||
maxPerMessageWriteSize int = int(storagepb.ServiceConstants_MAX_WRITE_CHUNK_BYTES)
|
||||
|
||||
// globalProjectAlias is the project ID alias used for global buckets.
|
||||
//
|
||||
// This is only used for the gRPC API.
|
||||
globalProjectAlias = "_"
|
||||
|
||||
// msgEntityNotSupported indicates ACL entites using project ID are not currently supported.
|
||||
//
|
||||
// This is only used for the gRPC API.
|
||||
msgEntityNotSupported = "The gRPC API currently does not support ACL entities using project ID, use project numbers instead"
|
||||
)
|
||||
|
||||
// defaultGRPCOptions returns a set of the default client options
|
||||
|
@ -75,6 +90,9 @@ func defaultGRPCOptions() []option.ClientOption {
|
|||
option.WithGRPCDialOption(grpc.WithInsecure()),
|
||||
option.WithoutAuthentication(),
|
||||
)
|
||||
} else {
|
||||
// Only enable DirectPath when the emulator is not being targeted.
|
||||
defaults = append(defaults, internaloption.EnableDirectPath(true))
|
||||
}
|
||||
|
||||
return defaults
|
||||
|
@ -127,10 +145,10 @@ func (c *grpcStorageClient) GetServiceAccount(ctx context.Context, project strin
|
|||
return resp.EmailAddress, err
|
||||
}
|
||||
|
||||
func (c *grpcStorageClient) CreateBucket(ctx context.Context, project string, attrs *BucketAttrs, opts ...storageOption) (*BucketAttrs, error) {
|
||||
func (c *grpcStorageClient) CreateBucket(ctx context.Context, project, bucket string, attrs *BucketAttrs, opts ...storageOption) (*BucketAttrs, error) {
|
||||
s := callSettings(c.settings, opts...)
|
||||
b := attrs.toProtoBucket()
|
||||
|
||||
b.Name = bucket
|
||||
// If there is lifecycle information but no location, explicitly set
|
||||
// the location. This is a GCS quirk/bug.
|
||||
if b.GetLocation() == "" && b.GetLifecycle() != nil {
|
||||
|
@ -138,11 +156,13 @@ func (c *grpcStorageClient) CreateBucket(ctx context.Context, project string, at
|
|||
}
|
||||
|
||||
req := &storagepb.CreateBucketRequest{
|
||||
Parent: toProjectResource(project),
|
||||
Bucket: b,
|
||||
BucketId: b.GetName(),
|
||||
PredefinedAcl: attrs.PredefinedACL,
|
||||
PredefinedDefaultObjectAcl: attrs.PredefinedDefaultObjectACL,
|
||||
Parent: toProjectResource(project),
|
||||
Bucket: b,
|
||||
BucketId: b.GetName(),
|
||||
}
|
||||
if attrs != nil {
|
||||
req.PredefinedAcl = attrs.PredefinedACL
|
||||
req.PredefinedDefaultObjectAcl = attrs.PredefinedDefaultObjectACL
|
||||
}
|
||||
|
||||
var battrs *BucketAttrs
|
||||
|
@ -227,7 +247,8 @@ func (c *grpcStorageClient) DeleteBucket(ctx context.Context, bucket string, con
|
|||
func (c *grpcStorageClient) GetBucket(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) (*BucketAttrs, error) {
|
||||
s := callSettings(c.settings, opts...)
|
||||
req := &storagepb.GetBucketRequest{
|
||||
Name: bucketResourceName(globalProjectAlias, bucket),
|
||||
Name: bucketResourceName(globalProjectAlias, bucket),
|
||||
ReadMask: &fieldmaskpb.FieldMask{Paths: []string{"*"}},
|
||||
}
|
||||
if err := applyBucketCondsProto("grpcStorageClient.GetBucket", conds, req); err != nil {
|
||||
return nil, err
|
||||
|
@ -309,6 +330,8 @@ func (c *grpcStorageClient) UpdateBucket(ctx context.Context, bucket string, uat
|
|||
// In cases where PredefinedDefaultObjectACL is set, DefaultObjectAcl is cleared.
|
||||
fieldMask.Paths = append(fieldMask.Paths, "default_object_acl")
|
||||
}
|
||||
// Note: This API currently does not support entites using project ID.
|
||||
// Use project numbers in ACL entities. Pending b/233617896.
|
||||
if uattrs.acl != nil {
|
||||
// In cases where acl is set by UpdateBucketACL method.
|
||||
fieldMask.Paths = append(fieldMask.Paths, "acl")
|
||||
|
@ -323,6 +346,9 @@ func (c *grpcStorageClient) UpdateBucket(ctx context.Context, bucket string, uat
|
|||
if uattrs.RPO != RPOUnknown {
|
||||
fieldMask.Paths = append(fieldMask.Paths, "rpo")
|
||||
}
|
||||
if uattrs.Autoclass != nil {
|
||||
fieldMask.Paths = append(fieldMask.Paths, "autoclass")
|
||||
}
|
||||
// TODO(cathyo): Handle labels. Pending b/230510191.
|
||||
req.UpdateMask = fieldMask
|
||||
|
||||
|
@ -359,14 +385,14 @@ func (c *grpcStorageClient) ListObjects(ctx context.Context, bucket string, q *Q
|
|||
it.query = *q
|
||||
}
|
||||
req := &storagepb.ListObjectsRequest{
|
||||
Parent: bucketResourceName(globalProjectAlias, bucket),
|
||||
Prefix: it.query.Prefix,
|
||||
Delimiter: it.query.Delimiter,
|
||||
Versions: it.query.Versions,
|
||||
LexicographicStart: it.query.StartOffset,
|
||||
LexicographicEnd: it.query.EndOffset,
|
||||
// TODO(noahietz): Convert a projection to a FieldMask.
|
||||
// ReadMask: q.Projection,
|
||||
Parent: bucketResourceName(globalProjectAlias, bucket),
|
||||
Prefix: it.query.Prefix,
|
||||
Delimiter: it.query.Delimiter,
|
||||
Versions: it.query.Versions,
|
||||
LexicographicStart: it.query.StartOffset,
|
||||
LexicographicEnd: it.query.EndOffset,
|
||||
IncludeTrailingDelimiter: it.query.IncludeTrailingDelimiter,
|
||||
ReadMask: q.toFieldMask(), // a nil Query still results in a "*" FieldMask
|
||||
}
|
||||
if s.userProject != "" {
|
||||
ctx = setUserProjectMetadata(ctx, s.userProject)
|
||||
|
@ -390,6 +416,12 @@ func (c *grpcStorageClient) ListObjects(ctx context.Context, bucket string, q *Q
|
|||
it.items = append(it.items, b)
|
||||
}
|
||||
|
||||
// Response is always non-nil after a successful request.
|
||||
res := gitr.Response.(*storagepb.ListObjectsResponse)
|
||||
for _, prefix := range res.GetPrefixes() {
|
||||
it.items = append(it.items, &ObjectAttrs{Prefix: prefix})
|
||||
}
|
||||
|
||||
return token, nil
|
||||
}
|
||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(
|
||||
|
@ -428,6 +460,8 @@ func (c *grpcStorageClient) GetObject(ctx context.Context, bucket, object string
|
|||
req := &storagepb.GetObjectRequest{
|
||||
Bucket: bucketResourceName(globalProjectAlias, bucket),
|
||||
Object: object,
|
||||
// ProjectionFull by default.
|
||||
ReadMask: &fieldmaskpb.FieldMask{Paths: []string{"*"}},
|
||||
}
|
||||
if err := applyCondsProto("grpcStorageClient.GetObject", gen, conds, req); err != nil {
|
||||
return nil, err
|
||||
|
@ -458,7 +492,8 @@ func (c *grpcStorageClient) UpdateObject(ctx context.Context, bucket, object str
|
|||
s := callSettings(c.settings, opts...)
|
||||
o := uattrs.toProtoObject(bucketResourceName(globalProjectAlias, bucket), object)
|
||||
req := &storagepb.UpdateObjectRequest{
|
||||
Object: o,
|
||||
Object: o,
|
||||
PredefinedAcl: uattrs.PredefinedACL,
|
||||
}
|
||||
if err := applyCondsProto("grpcStorageClient.UpdateObject", gen, conds, req); err != nil {
|
||||
return nil, err
|
||||
|
@ -470,10 +505,7 @@ func (c *grpcStorageClient) UpdateObject(ctx context.Context, bucket, object str
|
|||
req.CommonObjectRequestParams = toProtoCommonObjectRequestParams(encryptionKey)
|
||||
}
|
||||
|
||||
var paths []string
|
||||
fieldMask := &fieldmaskpb.FieldMask{
|
||||
Paths: paths,
|
||||
}
|
||||
fieldMask := &fieldmaskpb.FieldMask{Paths: nil}
|
||||
if uattrs.EventBasedHold != nil {
|
||||
fieldMask.Paths = append(fieldMask.Paths, "event_based_hold")
|
||||
}
|
||||
|
@ -498,8 +530,11 @@ func (c *grpcStorageClient) UpdateObject(ctx context.Context, bucket, object str
|
|||
if !uattrs.CustomTime.IsZero() {
|
||||
fieldMask.Paths = append(fieldMask.Paths, "custom_time")
|
||||
}
|
||||
|
||||
// TODO(cathyo): Handle ACL and PredefinedACL. Pending b/233617896.
|
||||
// Note: This API currently does not support entites using project ID.
|
||||
// Use project numbers in ACL entities. Pending b/233617896.
|
||||
if uattrs.ACL != nil || len(uattrs.PredefinedACL) > 0 {
|
||||
fieldMask.Paths = append(fieldMask.Paths, "acl")
|
||||
}
|
||||
// TODO(cathyo): Handle metadata. Pending b/230510191.
|
||||
|
||||
req.UpdateMask = fieldMask
|
||||
|
@ -527,11 +562,21 @@ func (c *grpcStorageClient) DeleteDefaultObjectACL(ctx context.Context, bucket s
|
|||
return err
|
||||
}
|
||||
// Delete the entity and copy other remaining ACL entities.
|
||||
// Note: This API currently does not support entites using project ID.
|
||||
// Use project numbers in ACL entities. Pending b/233617896.
|
||||
// Return error if entity is not found or a project ID is used.
|
||||
invalidEntity := true
|
||||
var acl []ACLRule
|
||||
for _, a := range attrs.DefaultObjectACL {
|
||||
if a.Entity != entity {
|
||||
acl = append(acl, a)
|
||||
}
|
||||
if a.Entity == entity {
|
||||
invalidEntity = false
|
||||
}
|
||||
}
|
||||
if invalidEntity {
|
||||
return fmt.Errorf("storage: entity %v was not found on bucket %v, got %v. %v", entity, bucket, attrs.DefaultObjectACL, msgEntityNotSupported)
|
||||
}
|
||||
uattrs := &BucketAttrsToUpdate{defaultObjectACL: acl}
|
||||
// Call UpdateBucket with a MetagenerationMatch precondition set.
|
||||
|
@ -540,6 +585,7 @@ func (c *grpcStorageClient) DeleteDefaultObjectACL(ctx context.Context, bucket s
|
|||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *grpcStorageClient) ListDefaultObjectACLs(ctx context.Context, bucket string, opts ...storageOption) ([]ACLRule, error) {
|
||||
attrs, err := c.GetBucket(ctx, bucket, nil, opts...)
|
||||
if err != nil {
|
||||
|
@ -547,8 +593,25 @@ func (c *grpcStorageClient) ListDefaultObjectACLs(ctx context.Context, bucket st
|
|||
}
|
||||
return attrs.DefaultObjectACL, nil
|
||||
}
|
||||
func (c *grpcStorageClient) UpdateDefaultObjectACL(ctx context.Context, opts ...storageOption) (*ACLRule, error) {
|
||||
return nil, errMethodNotSupported
|
||||
|
||||
func (c *grpcStorageClient) UpdateDefaultObjectACL(ctx context.Context, bucket string, entity ACLEntity, role ACLRole, opts ...storageOption) error {
|
||||
// There is no separate API for PATCH in gRPC.
|
||||
// Make a GET call first to retrieve BucketAttrs.
|
||||
attrs, err := c.GetBucket(ctx, bucket, nil, opts...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Note: This API currently does not support entites using project ID.
|
||||
// Use project numbers in ACL entities. Pending b/233617896.
|
||||
var acl []ACLRule
|
||||
aclRule := ACLRule{Entity: entity, Role: role}
|
||||
acl = append(attrs.DefaultObjectACL, aclRule)
|
||||
uattrs := &BucketAttrsToUpdate{defaultObjectACL: acl}
|
||||
// Call UpdateBucket with a MetagenerationMatch precondition set.
|
||||
if _, err = c.UpdateBucket(ctx, bucket, uattrs, &BucketConditions{MetagenerationMatch: attrs.MetaGeneration}, opts...); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Bucket ACL methods.
|
||||
|
@ -561,11 +624,21 @@ func (c *grpcStorageClient) DeleteBucketACL(ctx context.Context, bucket string,
|
|||
return err
|
||||
}
|
||||
// Delete the entity and copy other remaining ACL entities.
|
||||
// Note: This API currently does not support entites using project ID.
|
||||
// Use project numbers in ACL entities. Pending b/233617896.
|
||||
// Return error if entity is not found or a project ID is used.
|
||||
invalidEntity := true
|
||||
var acl []ACLRule
|
||||
for _, a := range attrs.ACL {
|
||||
if a.Entity != entity {
|
||||
acl = append(acl, a)
|
||||
}
|
||||
if a.Entity == entity {
|
||||
invalidEntity = false
|
||||
}
|
||||
}
|
||||
if invalidEntity {
|
||||
return fmt.Errorf("storage: entity %v was not found on bucket %v, got %v. %v", entity, bucket, attrs.ACL, msgEntityNotSupported)
|
||||
}
|
||||
uattrs := &BucketAttrsToUpdate{acl: acl}
|
||||
// Call UpdateBucket with a MetagenerationMatch precondition set.
|
||||
|
@ -574,6 +647,7 @@ func (c *grpcStorageClient) DeleteBucketACL(ctx context.Context, bucket string,
|
|||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *grpcStorageClient) ListBucketACLs(ctx context.Context, bucket string, opts ...storageOption) ([]ACLRule, error) {
|
||||
attrs, err := c.GetBucket(ctx, bucket, nil, opts...)
|
||||
if err != nil {
|
||||
|
@ -582,29 +656,58 @@ func (c *grpcStorageClient) ListBucketACLs(ctx context.Context, bucket string, o
|
|||
return attrs.ACL, nil
|
||||
}
|
||||
|
||||
func (c *grpcStorageClient) UpdateBucketACL(ctx context.Context, bucket string, entity ACLEntity, role ACLRole, opts ...storageOption) (*ACLRule, error) {
|
||||
func (c *grpcStorageClient) UpdateBucketACL(ctx context.Context, bucket string, entity ACLEntity, role ACLRole, opts ...storageOption) error {
|
||||
// There is no separate API for PATCH in gRPC.
|
||||
// Make a GET call first to retrieve BucketAttrs.
|
||||
attrs, err := c.GetBucket(ctx, bucket, nil, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
// Note: This API currently does not support entites using project ID.
|
||||
// Use project numbers in ACL entities. Pending b/233617896.
|
||||
var acl []ACLRule
|
||||
aclRule := ACLRule{Entity: entity, Role: role}
|
||||
acl = append(attrs.ACL, aclRule)
|
||||
uattrs := &BucketAttrsToUpdate{acl: acl}
|
||||
// Call UpdateBucket with a MetagenerationMatch precondition set.
|
||||
_, err = c.UpdateBucket(ctx, bucket, uattrs, &BucketConditions{MetagenerationMatch: attrs.MetaGeneration}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if _, err = c.UpdateBucket(ctx, bucket, uattrs, &BucketConditions{MetagenerationMatch: attrs.MetaGeneration}, opts...); err != nil {
|
||||
return err
|
||||
}
|
||||
return &aclRule, err
|
||||
return nil
|
||||
}
|
||||
|
||||
// Object ACL methods.
|
||||
|
||||
func (c *grpcStorageClient) DeleteObjectACL(ctx context.Context, bucket, object string, entity ACLEntity, opts ...storageOption) error {
|
||||
return errMethodNotSupported
|
||||
// There is no separate API for PATCH in gRPC.
|
||||
// Make a GET call first to retrieve ObjectAttrs.
|
||||
attrs, err := c.GetObject(ctx, bucket, object, defaultGen, nil, nil, opts...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Delete the entity and copy other remaining ACL entities.
|
||||
// Note: This API currently does not support entites using project ID.
|
||||
// Use project numbers in ACL entities. Pending b/233617896.
|
||||
// Return error if entity is not found or a project ID is used.
|
||||
invalidEntity := true
|
||||
var acl []ACLRule
|
||||
for _, a := range attrs.ACL {
|
||||
if a.Entity != entity {
|
||||
acl = append(acl, a)
|
||||
}
|
||||
if a.Entity == entity {
|
||||
invalidEntity = false
|
||||
}
|
||||
}
|
||||
if invalidEntity {
|
||||
return fmt.Errorf("storage: entity %v was not found on bucket %v, got %v. %v", entity, bucket, attrs.ACL, msgEntityNotSupported)
|
||||
}
|
||||
uattrs := &ObjectAttrsToUpdate{ACL: acl}
|
||||
// Call UpdateObject with the specified metageneration.
|
||||
if _, err = c.UpdateObject(ctx, bucket, object, uattrs, defaultGen, nil, &Conditions{MetagenerationMatch: attrs.Metageneration}, opts...); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ListObjectACLs retrieves object ACL entries. By default, it operates on the latest generation of this object.
|
||||
|
@ -617,31 +720,141 @@ func (c *grpcStorageClient) ListObjectACLs(ctx context.Context, bucket, object s
|
|||
return o.ACL, nil
|
||||
}
|
||||
|
||||
func (c *grpcStorageClient) UpdateObjectACL(ctx context.Context, bucket, object string, entity ACLEntity, role ACLRole, opts ...storageOption) (*ACLRule, error) {
|
||||
return nil, errMethodNotSupported
|
||||
func (c *grpcStorageClient) UpdateObjectACL(ctx context.Context, bucket, object string, entity ACLEntity, role ACLRole, opts ...storageOption) error {
|
||||
// There is no separate API for PATCH in gRPC.
|
||||
// Make a GET call first to retrieve ObjectAttrs.
|
||||
attrs, err := c.GetObject(ctx, bucket, object, defaultGen, nil, nil, opts...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Note: This API currently does not support entites using project ID.
|
||||
// Use project numbers in ACL entities. Pending b/233617896.
|
||||
var acl []ACLRule
|
||||
aclRule := ACLRule{Entity: entity, Role: role}
|
||||
acl = append(attrs.ACL, aclRule)
|
||||
uattrs := &ObjectAttrsToUpdate{ACL: acl}
|
||||
// Call UpdateObject with the specified metageneration.
|
||||
if _, err = c.UpdateObject(ctx, bucket, object, uattrs, defaultGen, nil, &Conditions{MetagenerationMatch: attrs.Metageneration}, opts...); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Media operations.
|
||||
|
||||
func (c *grpcStorageClient) ComposeObject(ctx context.Context, req *composeObjectRequest, opts ...storageOption) (*ObjectAttrs, error) {
|
||||
return nil, errMethodNotSupported
|
||||
s := callSettings(c.settings, opts...)
|
||||
if s.userProject != "" {
|
||||
ctx = setUserProjectMetadata(ctx, s.userProject)
|
||||
}
|
||||
|
||||
dstObjPb := req.dstObject.attrs.toProtoObject(req.dstBucket)
|
||||
dstObjPb.Name = req.dstObject.name
|
||||
if err := applyCondsProto("ComposeObject destination", defaultGen, req.dstObject.conds, dstObjPb); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if req.sendCRC32C {
|
||||
dstObjPb.Checksums.Crc32C = &req.dstObject.attrs.CRC32C
|
||||
}
|
||||
|
||||
srcs := []*storagepb.ComposeObjectRequest_SourceObject{}
|
||||
for _, src := range req.srcs {
|
||||
srcObjPb := &storagepb.ComposeObjectRequest_SourceObject{Name: src.name}
|
||||
if err := applyCondsProto("ComposeObject source", src.gen, src.conds, srcObjPb); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
srcs = append(srcs, srcObjPb)
|
||||
}
|
||||
|
||||
rawReq := &storagepb.ComposeObjectRequest{
|
||||
Destination: dstObjPb,
|
||||
SourceObjects: srcs,
|
||||
}
|
||||
if req.predefinedACL != "" {
|
||||
rawReq.DestinationPredefinedAcl = req.predefinedACL
|
||||
}
|
||||
if req.dstObject.encryptionKey != nil {
|
||||
rawReq.CommonObjectRequestParams = toProtoCommonObjectRequestParams(req.dstObject.encryptionKey)
|
||||
}
|
||||
|
||||
var obj *storagepb.Object
|
||||
var err error
|
||||
if err := run(ctx, func() error {
|
||||
obj, err = c.raw.ComposeObject(ctx, rawReq, s.gax...)
|
||||
return err
|
||||
}, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return newObjectFromProto(obj), nil
|
||||
}
|
||||
func (c *grpcStorageClient) RewriteObject(ctx context.Context, req *rewriteObjectRequest, opts ...storageOption) (*rewriteObjectResponse, error) {
|
||||
return nil, errMethodNotSupported
|
||||
s := callSettings(c.settings, opts...)
|
||||
obj := req.dstObject.attrs.toProtoObject("")
|
||||
call := &storagepb.RewriteObjectRequest{
|
||||
SourceBucket: bucketResourceName(globalProjectAlias, req.srcObject.bucket),
|
||||
SourceObject: req.srcObject.name,
|
||||
RewriteToken: req.token,
|
||||
DestinationBucket: bucketResourceName(globalProjectAlias, req.dstObject.bucket),
|
||||
DestinationName: req.dstObject.name,
|
||||
Destination: obj,
|
||||
DestinationKmsKey: req.dstObject.keyName,
|
||||
DestinationPredefinedAcl: req.predefinedACL,
|
||||
}
|
||||
|
||||
// The userProject, whether source or destination project, is decided by the code calling the interface.
|
||||
if s.userProject != "" {
|
||||
ctx = setUserProjectMetadata(ctx, s.userProject)
|
||||
}
|
||||
if err := applyCondsProto("Copy destination", defaultGen, req.dstObject.conds, call); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := applySourceCondsProto(req.srcObject.gen, req.srcObject.conds, call); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(req.dstObject.encryptionKey) > 0 {
|
||||
call.CommonObjectRequestParams = toProtoCommonObjectRequestParams(req.dstObject.encryptionKey)
|
||||
}
|
||||
if len(req.srcObject.encryptionKey) > 0 {
|
||||
srcParams := toProtoCommonObjectRequestParams(req.srcObject.encryptionKey)
|
||||
call.CopySourceEncryptionAlgorithm = srcParams.GetEncryptionAlgorithm()
|
||||
call.CopySourceEncryptionKeyBytes = srcParams.GetEncryptionKeyBytes()
|
||||
call.CopySourceEncryptionKeySha256Bytes = srcParams.GetEncryptionKeySha256Bytes()
|
||||
}
|
||||
|
||||
call.MaxBytesRewrittenPerCall = req.maxBytesRewrittenPerCall
|
||||
|
||||
var res *storagepb.RewriteResponse
|
||||
var err error
|
||||
|
||||
retryCall := func() error { res, err = c.raw.RewriteObject(ctx, call, s.gax...); return err }
|
||||
|
||||
if err := run(ctx, retryCall, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
r := &rewriteObjectResponse{
|
||||
done: res.GetDone(),
|
||||
written: res.GetTotalBytesRewritten(),
|
||||
size: res.GetObjectSize(),
|
||||
token: res.GetRewriteToken(),
|
||||
resource: newObjectFromProto(res.GetResource()),
|
||||
}
|
||||
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRangeReaderParams, opts ...storageOption) (r *Reader, err error) {
|
||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.grpcStorageClient.NewRangeReader")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
|
||||
if params.conds != nil {
|
||||
if err := params.conds.validate("grpcStorageClient.NewRangeReader"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
s := callSettings(c.settings, opts...)
|
||||
|
||||
if s.userProject != "" {
|
||||
ctx = setUserProjectMetadata(ctx, s.userProject)
|
||||
}
|
||||
|
||||
// A negative length means "read to the end of the object", but the
|
||||
// read_limit field it corresponds to uses zero to mean the same thing. Thus
|
||||
// we coerce the length to 0 to read to the end of the object.
|
||||
|
@ -695,6 +908,11 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange
|
|||
}
|
||||
|
||||
msg, err = stream.Recv()
|
||||
// These types of errors show up on the Recv call, rather than the
|
||||
// initialization of the stream via ReadObject above.
|
||||
if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound {
|
||||
return ErrObjectNotExist
|
||||
}
|
||||
|
||||
return err
|
||||
}, s.retry, s.idempotent, setRetryHeaderGRPC(ctx))
|
||||
|
@ -738,6 +956,7 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange
|
|||
// Store the content from the first Recv in the
|
||||
// client buffer for reading later.
|
||||
leftovers: msg.GetChecksummedData().GetContent(),
|
||||
settings: s,
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -759,6 +978,8 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange
|
|||
}
|
||||
|
||||
func (c *grpcStorageClient) OpenWriter(params *openWriterParams, opts ...storageOption) (*io.PipeWriter, error) {
|
||||
s := callSettings(c.settings, opts...)
|
||||
|
||||
var offset int64
|
||||
errorf := params.setError
|
||||
progress := params.progress
|
||||
|
@ -766,6 +987,10 @@ func (c *grpcStorageClient) OpenWriter(params *openWriterParams, opts ...storage
|
|||
|
||||
pr, pw := io.Pipe()
|
||||
gw := newGRPCWriter(c, params, pr)
|
||||
gw.settings = s
|
||||
if s.userProject != "" {
|
||||
gw.ctx = setUserProjectMetadata(gw.ctx, s.userProject)
|
||||
}
|
||||
|
||||
// This function reads the data sent to the pipe and sends sets of messages
|
||||
// on the gRPC client-stream as the buffer is filled.
|
||||
|
@ -883,20 +1108,142 @@ func (c *grpcStorageClient) TestIamPermissions(ctx context.Context, resource str
|
|||
|
||||
// HMAC Key methods.
|
||||
|
||||
func (c *grpcStorageClient) GetHMACKey(ctx context.Context, desc *hmacKeyDesc, opts ...storageOption) (*HMACKey, error) {
|
||||
return nil, errMethodNotSupported
|
||||
func (c *grpcStorageClient) GetHMACKey(ctx context.Context, project, accessID string, opts ...storageOption) (*HMACKey, error) {
|
||||
s := callSettings(c.settings, opts...)
|
||||
req := &storagepb.GetHmacKeyRequest{
|
||||
AccessId: accessID,
|
||||
Project: toProjectResource(project),
|
||||
}
|
||||
if s.userProject != "" {
|
||||
ctx = setUserProjectMetadata(ctx, s.userProject)
|
||||
}
|
||||
var metadata *storagepb.HmacKeyMetadata
|
||||
err := run(ctx, func() error {
|
||||
var err error
|
||||
metadata, err = c.raw.GetHmacKey(ctx, req, s.gax...)
|
||||
return err
|
||||
}, s.retry, s.idempotent, setRetryHeaderGRPC(ctx))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return toHMACKeyFromProto(metadata), nil
|
||||
}
|
||||
func (c *grpcStorageClient) ListHMACKey(ctx context.Context, desc *hmacKeyDesc, opts ...storageOption) *HMACKeysIterator {
|
||||
return &HMACKeysIterator{}
|
||||
|
||||
func (c *grpcStorageClient) ListHMACKeys(ctx context.Context, project, serviceAccountEmail string, showDeletedKeys bool, opts ...storageOption) *HMACKeysIterator {
|
||||
s := callSettings(c.settings, opts...)
|
||||
req := &storagepb.ListHmacKeysRequest{
|
||||
Project: toProjectResource(project),
|
||||
ServiceAccountEmail: serviceAccountEmail,
|
||||
ShowDeletedKeys: showDeletedKeys,
|
||||
}
|
||||
if s.userProject != "" {
|
||||
ctx = setUserProjectMetadata(ctx, s.userProject)
|
||||
}
|
||||
it := &HMACKeysIterator{
|
||||
ctx: ctx,
|
||||
projectID: project,
|
||||
retry: s.retry,
|
||||
}
|
||||
gitr := c.raw.ListHmacKeys(it.ctx, req, s.gax...)
|
||||
fetch := func(pageSize int, pageToken string) (token string, err error) {
|
||||
var hmacKeys []*storagepb.HmacKeyMetadata
|
||||
err = run(it.ctx, func() error {
|
||||
hmacKeys, token, err = gitr.InternalFetch(pageSize, pageToken)
|
||||
return err
|
||||
}, s.retry, s.idempotent, setRetryHeaderGRPC(ctx))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
for _, hkmd := range hmacKeys {
|
||||
hk := toHMACKeyFromProto(hkmd)
|
||||
it.hmacKeys = append(it.hmacKeys, hk)
|
||||
}
|
||||
|
||||
return token, nil
|
||||
}
|
||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(
|
||||
fetch,
|
||||
func() int { return len(it.hmacKeys) - it.index },
|
||||
func() interface{} {
|
||||
prev := it.hmacKeys
|
||||
it.hmacKeys = it.hmacKeys[:0]
|
||||
it.index = 0
|
||||
return prev
|
||||
})
|
||||
return it
|
||||
}
|
||||
func (c *grpcStorageClient) UpdateHMACKey(ctx context.Context, desc *hmacKeyDesc, attrs *HMACKeyAttrsToUpdate, opts ...storageOption) (*HMACKey, error) {
|
||||
return nil, errMethodNotSupported
|
||||
|
||||
func (c *grpcStorageClient) UpdateHMACKey(ctx context.Context, project, serviceAccountEmail, accessID string, attrs *HMACKeyAttrsToUpdate, opts ...storageOption) (*HMACKey, error) {
|
||||
s := callSettings(c.settings, opts...)
|
||||
hk := &storagepb.HmacKeyMetadata{
|
||||
AccessId: accessID,
|
||||
Project: toProjectResource(project),
|
||||
ServiceAccountEmail: serviceAccountEmail,
|
||||
State: string(attrs.State),
|
||||
Etag: attrs.Etag,
|
||||
}
|
||||
var paths []string
|
||||
fieldMask := &fieldmaskpb.FieldMask{
|
||||
Paths: paths,
|
||||
}
|
||||
if attrs.State != "" {
|
||||
fieldMask.Paths = append(fieldMask.Paths, "state")
|
||||
}
|
||||
req := &storagepb.UpdateHmacKeyRequest{
|
||||
HmacKey: hk,
|
||||
UpdateMask: fieldMask,
|
||||
}
|
||||
if s.userProject != "" {
|
||||
ctx = setUserProjectMetadata(ctx, s.userProject)
|
||||
}
|
||||
var metadata *storagepb.HmacKeyMetadata
|
||||
err := run(ctx, func() error {
|
||||
var err error
|
||||
metadata, err = c.raw.UpdateHmacKey(ctx, req, s.gax...)
|
||||
return err
|
||||
}, s.retry, s.idempotent, setRetryHeaderGRPC(ctx))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return toHMACKeyFromProto(metadata), nil
|
||||
}
|
||||
func (c *grpcStorageClient) CreateHMACKey(ctx context.Context, desc *hmacKeyDesc, opts ...storageOption) (*HMACKey, error) {
|
||||
return nil, errMethodNotSupported
|
||||
|
||||
func (c *grpcStorageClient) CreateHMACKey(ctx context.Context, project, serviceAccountEmail string, opts ...storageOption) (*HMACKey, error) {
|
||||
s := callSettings(c.settings, opts...)
|
||||
req := &storagepb.CreateHmacKeyRequest{
|
||||
Project: toProjectResource(project),
|
||||
ServiceAccountEmail: serviceAccountEmail,
|
||||
}
|
||||
if s.userProject != "" {
|
||||
ctx = setUserProjectMetadata(ctx, s.userProject)
|
||||
}
|
||||
var res *storagepb.CreateHmacKeyResponse
|
||||
err := run(ctx, func() error {
|
||||
var err error
|
||||
res, err = c.raw.CreateHmacKey(ctx, req, s.gax...)
|
||||
return err
|
||||
}, s.retry, s.idempotent, setRetryHeaderGRPC(ctx))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
key := toHMACKeyFromProto(res.Metadata)
|
||||
key.Secret = base64.StdEncoding.EncodeToString(res.SecretKeyBytes)
|
||||
|
||||
return key, nil
|
||||
}
|
||||
func (c *grpcStorageClient) DeleteHMACKey(ctx context.Context, desc *hmacKeyDesc, opts ...storageOption) error {
|
||||
return errMethodNotSupported
|
||||
|
||||
func (c *grpcStorageClient) DeleteHMACKey(ctx context.Context, project string, accessID string, opts ...storageOption) error {
|
||||
s := callSettings(c.settings, opts...)
|
||||
req := &storagepb.DeleteHmacKeyRequest{
|
||||
AccessId: accessID,
|
||||
Project: toProjectResource(project),
|
||||
}
|
||||
if s.userProject != "" {
|
||||
ctx = setUserProjectMetadata(ctx, s.userProject)
|
||||
}
|
||||
return run(ctx, func() error {
|
||||
return c.raw.DeleteHmacKey(ctx, req, s.gax...)
|
||||
}, s.retry, s.idempotent, setRetryHeaderGRPC(ctx))
|
||||
}
|
||||
|
||||
// Notification methods.
|
||||
|
@ -988,6 +1335,7 @@ type gRPCReader struct {
|
|||
reopen func(seen int64) (*readStreamResponse, context.CancelFunc, error)
|
||||
leftovers []byte
|
||||
cancel context.CancelFunc
|
||||
settings *settings
|
||||
}
|
||||
|
||||
// Read reads bytes into the user's buffer from an open gRPC stream.
|
||||
|
@ -1063,6 +1411,10 @@ func (r *gRPCReader) Close() error {
|
|||
// an attempt to reopen the stream.
|
||||
func (r *gRPCReader) recv() (*storagepb.ReadObjectResponse, error) {
|
||||
msg, err := r.stream.Recv()
|
||||
var shouldRetry = ShouldRetry
|
||||
if r.settings.retry != nil && r.settings.retry.shouldRetry != nil {
|
||||
shouldRetry = r.settings.retry.shouldRetry
|
||||
}
|
||||
if err != nil && shouldRetry(err) {
|
||||
// This will "close" the existing stream and immediately attempt to
|
||||
// reopen the stream, but will backoff if further attempts are necessary.
|
||||
|
@ -1127,6 +1479,7 @@ type gRPCWriter struct {
|
|||
attrs *ObjectAttrs
|
||||
conds *Conditions
|
||||
encryptionKey []byte
|
||||
settings *settings
|
||||
|
||||
sendCRC32C bool
|
||||
|
||||
|
@ -1144,21 +1497,27 @@ func (w *gRPCWriter) startResumableUpload() error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
upres, err := w.c.raw.StartResumableWrite(w.ctx, &storagepb.StartResumableWriteRequest{
|
||||
WriteObjectSpec: spec,
|
||||
})
|
||||
|
||||
w.upid = upres.GetUploadId()
|
||||
return err
|
||||
return run(w.ctx, func() error {
|
||||
upres, err := w.c.raw.StartResumableWrite(w.ctx, &storagepb.StartResumableWriteRequest{
|
||||
WriteObjectSpec: spec,
|
||||
})
|
||||
w.upid = upres.GetUploadId()
|
||||
return err
|
||||
}, w.settings.retry, w.settings.idempotent, setRetryHeaderGRPC(w.ctx))
|
||||
}
|
||||
|
||||
// queryProgress is a helper that queries the status of the resumable upload
|
||||
// associated with the given upload ID.
|
||||
func (w *gRPCWriter) queryProgress() (int64, error) {
|
||||
q, err := w.c.raw.QueryWriteStatus(w.ctx, &storagepb.QueryWriteStatusRequest{UploadId: w.upid})
|
||||
var persistedSize int64
|
||||
err := run(w.ctx, func() error {
|
||||
q, err := w.c.raw.QueryWriteStatus(w.ctx, &storagepb.QueryWriteStatusRequest{UploadId: w.upid})
|
||||
persistedSize = q.GetPersistedSize()
|
||||
return err
|
||||
}, w.settings.retry, true, setRetryHeaderGRPC(w.ctx))
|
||||
|
||||
// q.GetCommittedSize() will return 0 if q is nil.
|
||||
return q.GetPersistedSize(), err
|
||||
return persistedSize, err
|
||||
}
|
||||
|
||||
// uploadBuffer opens a Write stream and uploads the buffer at the given offset (if
|
||||
|
@ -1173,6 +1532,10 @@ func (w *gRPCWriter) uploadBuffer(recvd int, start int64, doneReading bool) (*st
|
|||
var err error
|
||||
var finishWrite bool
|
||||
var sent, limit int = 0, maxPerMessageWriteSize
|
||||
var shouldRetry = ShouldRetry
|
||||
if w.settings.retry != nil && w.settings.retry.shouldRetry != nil {
|
||||
shouldRetry = w.settings.retry.shouldRetry
|
||||
}
|
||||
offset := start
|
||||
toWrite := w.buf[:recvd]
|
||||
for {
|
||||
|
@ -1203,7 +1566,8 @@ func (w *gRPCWriter) uploadBuffer(recvd int, start int64, doneReading bool) (*st
|
|||
// The first message on the WriteObject stream must either be the
|
||||
// Object or the Resumable Upload ID.
|
||||
if first {
|
||||
w.stream, err = w.c.raw.WriteObject(w.ctx)
|
||||
ctx := gapic.InsertMetadata(w.ctx, metadata.Pairs("x-goog-request-params", "bucket="+url.QueryEscape(w.bucket)))
|
||||
w.stream, err = w.c.raw.WriteObject(ctx)
|
||||
if err != nil {
|
||||
return nil, 0, false, err
|
||||
}
|
||||
|
@ -1225,8 +1589,16 @@ func (w *gRPCWriter) uploadBuffer(recvd int, start int64, doneReading bool) (*st
|
|||
// on the *last* message of the stream (instead of the first).
|
||||
if w.sendCRC32C {
|
||||
req.ObjectChecksums = &storagepb.ObjectChecksums{
|
||||
Crc32C: proto.Uint32(w.attrs.CRC32C),
|
||||
Md5Hash: w.attrs.MD5,
|
||||
Crc32C: proto.Uint32(w.attrs.CRC32C),
|
||||
}
|
||||
}
|
||||
if len(w.attrs.MD5) != 0 {
|
||||
if cs := req.GetObjectChecksums(); cs == nil {
|
||||
req.ObjectChecksums = &storagepb.ObjectChecksums{
|
||||
Md5Hash: w.attrs.MD5,
|
||||
}
|
||||
} else {
|
||||
cs.Md5Hash = w.attrs.MD5
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1336,8 +1708,8 @@ func (w *gRPCWriter) writeObjectSpec() (*storagepb.WriteObjectSpec, error) {
|
|||
spec := &storagepb.WriteObjectSpec{
|
||||
Resource: attrs.toProtoObject(w.bucket),
|
||||
}
|
||||
// WriteObject doesn't support the generation condition, so use -1.
|
||||
if err := applyCondsProto("WriteObject", -1, w.conds, spec); err != nil {
|
||||
// WriteObject doesn't support the generation condition, so use default.
|
||||
if err := applyCondsProto("WriteObject", defaultGen, w.conds, spec); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return spec, nil
|
||||
|
@ -1345,7 +1717,12 @@ func (w *gRPCWriter) writeObjectSpec() (*storagepb.WriteObjectSpec, error) {
|
|||
|
||||
// read copies the data in the reader to the given buffer and reports how much
|
||||
// data was read into the buffer and if there is no more data to read (EOF).
|
||||
// Furthermore, if the attrs.ContentType is unset, the first bytes of content
|
||||
// will be sniffed for a matching content type.
|
||||
func (w *gRPCWriter) read() (int, bool, error) {
|
||||
if w.attrs.ContentType == "" {
|
||||
w.reader, w.attrs.ContentType = gax.DetermineContentType(w.reader)
|
||||
}
|
||||
// Set n to -1 to start the Read loop.
|
||||
var n, recvd int = -1, 0
|
||||
var err error
|
||||
|
|
161
vendor/cloud.google.com/go/storage/hmac.go
generated
vendored
161
vendor/cloud.google.com/go/storage/hmac.go
generated
vendored
|
@ -20,6 +20,7 @@ import (
|
|||
"fmt"
|
||||
"time"
|
||||
|
||||
storagepb "cloud.google.com/go/storage/internal/apiv2/stubs"
|
||||
"google.golang.org/api/iterator"
|
||||
raw "google.golang.org/api/storage/v1"
|
||||
)
|
||||
|
@ -90,7 +91,7 @@ type HMACKeyHandle struct {
|
|||
projectID string
|
||||
accessID string
|
||||
retry *retryConfig
|
||||
raw *raw.ProjectsHmacKeysService
|
||||
tc storageClient
|
||||
}
|
||||
|
||||
// HMACKeyHandle creates a handle that will be used for HMACKey operations.
|
||||
|
@ -101,7 +102,7 @@ func (c *Client) HMACKeyHandle(projectID, accessID string) *HMACKeyHandle {
|
|||
projectID: projectID,
|
||||
accessID: accessID,
|
||||
retry: c.retry,
|
||||
raw: raw.NewProjectsHmacKeysService(c.raw),
|
||||
tc: c.tc,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -113,32 +114,15 @@ func (c *Client) HMACKeyHandle(projectID, accessID string) *HMACKeyHandle {
|
|||
//
|
||||
// This method is EXPERIMENTAL and subject to change or removal without notice.
|
||||
func (hkh *HMACKeyHandle) Get(ctx context.Context, opts ...HMACKeyOption) (*HMACKey, error) {
|
||||
call := hkh.raw.Get(hkh.projectID, hkh.accessID)
|
||||
|
||||
desc := new(hmacKeyDesc)
|
||||
for _, opt := range opts {
|
||||
opt.withHMACKeyDesc(desc)
|
||||
}
|
||||
if desc.userProjectID != "" {
|
||||
call = call.UserProject(desc.userProjectID)
|
||||
}
|
||||
|
||||
setClientHeader(call.Header())
|
||||
o := makeStorageOpts(true, hkh.retry, desc.userProjectID)
|
||||
hk, err := hkh.tc.GetHMACKey(ctx, hkh.projectID, hkh.accessID, o...)
|
||||
|
||||
var metadata *raw.HmacKeyMetadata
|
||||
var err error
|
||||
err = run(ctx, func() error {
|
||||
metadata, err = call.Context(ctx).Do()
|
||||
return err
|
||||
}, hkh.retry, true, setRetryHeaderHTTP(call))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
hkPb := &raw.HmacKey{
|
||||
Metadata: metadata,
|
||||
}
|
||||
return pbHmacKeyToHMACKey(hkPb, false)
|
||||
return hk, err
|
||||
}
|
||||
|
||||
// Delete invokes an RPC to delete the key referenced by accessID, on Google Cloud Storage.
|
||||
|
@ -147,49 +131,59 @@ func (hkh *HMACKeyHandle) Get(ctx context.Context, opts ...HMACKeyOption) (*HMAC
|
|||
//
|
||||
// This method is EXPERIMENTAL and subject to change or removal without notice.
|
||||
func (hkh *HMACKeyHandle) Delete(ctx context.Context, opts ...HMACKeyOption) error {
|
||||
delCall := hkh.raw.Delete(hkh.projectID, hkh.accessID)
|
||||
desc := new(hmacKeyDesc)
|
||||
for _, opt := range opts {
|
||||
opt.withHMACKeyDesc(desc)
|
||||
}
|
||||
if desc.userProjectID != "" {
|
||||
delCall = delCall.UserProject(desc.userProjectID)
|
||||
}
|
||||
setClientHeader(delCall.Header())
|
||||
|
||||
return run(ctx, func() error {
|
||||
return delCall.Context(ctx).Do()
|
||||
}, hkh.retry, true, setRetryHeaderHTTP(delCall))
|
||||
o := makeStorageOpts(true, hkh.retry, desc.userProjectID)
|
||||
return hkh.tc.DeleteHMACKey(ctx, hkh.projectID, hkh.accessID, o...)
|
||||
}
|
||||
|
||||
func pbHmacKeyToHMACKey(pb *raw.HmacKey, updatedTimeCanBeNil bool) (*HMACKey, error) {
|
||||
pbmd := pb.Metadata
|
||||
if pbmd == nil {
|
||||
func toHMACKeyFromRaw(hk *raw.HmacKey, updatedTimeCanBeNil bool) (*HMACKey, error) {
|
||||
hkmd := hk.Metadata
|
||||
if hkmd == nil {
|
||||
return nil, errors.New("field Metadata cannot be nil")
|
||||
}
|
||||
createdTime, err := time.Parse(time.RFC3339, pbmd.TimeCreated)
|
||||
createdTime, err := time.Parse(time.RFC3339, hkmd.TimeCreated)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("field CreatedTime: %v", err)
|
||||
return nil, fmt.Errorf("field CreatedTime: %w", err)
|
||||
}
|
||||
updatedTime, err := time.Parse(time.RFC3339, pbmd.Updated)
|
||||
updatedTime, err := time.Parse(time.RFC3339, hkmd.Updated)
|
||||
if err != nil && !updatedTimeCanBeNil {
|
||||
return nil, fmt.Errorf("field UpdatedTime: %v", err)
|
||||
return nil, fmt.Errorf("field UpdatedTime: %w", err)
|
||||
}
|
||||
|
||||
hmk := &HMACKey{
|
||||
AccessID: pbmd.AccessId,
|
||||
Secret: pb.Secret,
|
||||
Etag: pbmd.Etag,
|
||||
ID: pbmd.Id,
|
||||
State: HMACState(pbmd.State),
|
||||
ProjectID: pbmd.ProjectId,
|
||||
hmKey := &HMACKey{
|
||||
AccessID: hkmd.AccessId,
|
||||
Secret: hk.Secret,
|
||||
Etag: hkmd.Etag,
|
||||
ID: hkmd.Id,
|
||||
State: HMACState(hkmd.State),
|
||||
ProjectID: hkmd.ProjectId,
|
||||
CreatedTime: createdTime,
|
||||
UpdatedTime: updatedTime,
|
||||
|
||||
ServiceAccountEmail: pbmd.ServiceAccountEmail,
|
||||
ServiceAccountEmail: hkmd.ServiceAccountEmail,
|
||||
}
|
||||
|
||||
return hmk, nil
|
||||
return hmKey, nil
|
||||
}
|
||||
|
||||
func toHMACKeyFromProto(pbmd *storagepb.HmacKeyMetadata) *HMACKey {
|
||||
if pbmd == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return &HMACKey{
|
||||
AccessID: pbmd.GetAccessId(),
|
||||
ID: pbmd.GetId(),
|
||||
State: HMACState(pbmd.GetState()),
|
||||
ProjectID: pbmd.GetProject(),
|
||||
CreatedTime: convertProtoTime(pbmd.GetCreateTime()),
|
||||
UpdatedTime: convertProtoTime(pbmd.GetUpdateTime()),
|
||||
ServiceAccountEmail: pbmd.GetServiceAccountEmail(),
|
||||
}
|
||||
}
|
||||
|
||||
// CreateHMACKey invokes an RPC for Google Cloud Storage to create a new HMACKey.
|
||||
|
@ -203,29 +197,14 @@ func (c *Client) CreateHMACKey(ctx context.Context, projectID, serviceAccountEma
|
|||
return nil, errors.New("storage: expecting a non-blank service account email")
|
||||
}
|
||||
|
||||
svc := raw.NewProjectsHmacKeysService(c.raw)
|
||||
call := svc.Create(projectID, serviceAccountEmail)
|
||||
desc := new(hmacKeyDesc)
|
||||
for _, opt := range opts {
|
||||
opt.withHMACKeyDesc(desc)
|
||||
}
|
||||
if desc.userProjectID != "" {
|
||||
call = call.UserProject(desc.userProjectID)
|
||||
}
|
||||
|
||||
setClientHeader(call.Header())
|
||||
|
||||
var hkPb *raw.HmacKey
|
||||
|
||||
if err := run(ctx, func() error {
|
||||
h, err := call.Context(ctx).Do()
|
||||
hkPb = h
|
||||
return err
|
||||
}, c.retry, false, setRetryHeaderHTTP(call)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return pbHmacKeyToHMACKey(hkPb, true)
|
||||
o := makeStorageOpts(false, c.retry, desc.userProjectID)
|
||||
hk, err := c.tc.CreateHMACKey(ctx, projectID, serviceAccountEmail, o...)
|
||||
return hk, err
|
||||
}
|
||||
|
||||
// HMACKeyAttrsToUpdate defines the attributes of an HMACKey that will be updated.
|
||||
|
@ -247,35 +226,15 @@ func (h *HMACKeyHandle) Update(ctx context.Context, au HMACKeyAttrsToUpdate, opt
|
|||
return nil, fmt.Errorf("storage: invalid state %q for update, must be either %q or %q", au.State, Active, Inactive)
|
||||
}
|
||||
|
||||
call := h.raw.Update(h.projectID, h.accessID, &raw.HmacKeyMetadata{
|
||||
Etag: au.Etag,
|
||||
State: string(au.State),
|
||||
})
|
||||
|
||||
desc := new(hmacKeyDesc)
|
||||
for _, opt := range opts {
|
||||
opt.withHMACKeyDesc(desc)
|
||||
}
|
||||
if desc.userProjectID != "" {
|
||||
call = call.UserProject(desc.userProjectID)
|
||||
}
|
||||
setClientHeader(call.Header())
|
||||
|
||||
var metadata *raw.HmacKeyMetadata
|
||||
var err error
|
||||
isIdempotent := len(au.Etag) > 0
|
||||
err = run(ctx, func() error {
|
||||
metadata, err = call.Context(ctx).Do()
|
||||
return err
|
||||
}, h.retry, isIdempotent, setRetryHeaderHTTP(call))
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hkPb := &raw.HmacKey{
|
||||
Metadata: metadata,
|
||||
}
|
||||
return pbHmacKeyToHMACKey(hkPb, false)
|
||||
o := makeStorageOpts(isIdempotent, h.retry, desc.userProjectID)
|
||||
hk, err := h.tc.UpdateHMACKey(ctx, h.projectID, desc.forServiceAccountEmail, h.accessID, &au, o...)
|
||||
return hk, err
|
||||
}
|
||||
|
||||
// An HMACKeysIterator is an iterator over HMACKeys.
|
||||
|
@ -301,27 +260,13 @@ type HMACKeysIterator struct {
|
|||
//
|
||||
// This method is EXPERIMENTAL and subject to change or removal without notice.
|
||||
func (c *Client) ListHMACKeys(ctx context.Context, projectID string, opts ...HMACKeyOption) *HMACKeysIterator {
|
||||
it := &HMACKeysIterator{
|
||||
ctx: ctx,
|
||||
raw: raw.NewProjectsHmacKeysService(c.raw),
|
||||
projectID: projectID,
|
||||
retry: c.retry,
|
||||
}
|
||||
|
||||
desc := new(hmacKeyDesc)
|
||||
for _, opt := range opts {
|
||||
opt.withHMACKeyDesc(&it.desc)
|
||||
opt.withHMACKeyDesc(desc)
|
||||
}
|
||||
|
||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(
|
||||
it.fetch,
|
||||
func() int { return len(it.hmacKeys) - it.index },
|
||||
func() interface{} {
|
||||
prev := it.hmacKeys
|
||||
it.hmacKeys = it.hmacKeys[:0]
|
||||
it.index = 0
|
||||
return prev
|
||||
})
|
||||
return it
|
||||
o := makeStorageOpts(true, c.retry, desc.userProjectID)
|
||||
return c.tc.ListHMACKeys(ctx, projectID, desc.forServiceAccountEmail, desc.showDeletedKeys, o...)
|
||||
}
|
||||
|
||||
// Next returns the next result. Its second return value is iterator.Done if
|
||||
|
@ -350,6 +295,8 @@ func (it *HMACKeysIterator) Next() (*HMACKey, error) {
|
|||
func (it *HMACKeysIterator) PageInfo() *iterator.PageInfo { return it.pageInfo }
|
||||
|
||||
func (it *HMACKeysIterator) fetch(pageSize int, pageToken string) (token string, err error) {
|
||||
// TODO: Remove fetch method upon integration. This method is internalized into
|
||||
// httpStorageClient.ListHMACKeys() as it is the only caller.
|
||||
call := it.raw.List(it.projectID)
|
||||
setClientHeader(call.Header())
|
||||
if pageToken != "" {
|
||||
|
@ -379,10 +326,10 @@ func (it *HMACKeysIterator) fetch(pageSize int, pageToken string) (token string,
|
|||
}
|
||||
|
||||
for _, metadata := range resp.Items {
|
||||
hkPb := &raw.HmacKey{
|
||||
hk := &raw.HmacKey{
|
||||
Metadata: metadata,
|
||||
}
|
||||
hkey, err := pbHmacKeyToHMACKey(hkPb, true)
|
||||
hkey, err := toHMACKeyFromRaw(hk, true)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
|
333
vendor/cloud.google.com/go/storage/http_client.go
generated
vendored
333
vendor/cloud.google.com/go/storage/http_client.go
generated
vendored
|
@ -114,17 +114,17 @@ func newHTTPStorageClient(ctx context.Context, opts ...storageOption) (storageCl
|
|||
// htransport selects the correct endpoint among WithEndpoint (user override), WithDefaultEndpoint, and WithDefaultMTLSEndpoint.
|
||||
hc, ep, err := htransport.NewClient(ctx, s.clientOption...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("dialing: %v", err)
|
||||
return nil, fmt.Errorf("dialing: %w", err)
|
||||
}
|
||||
// RawService should be created with the chosen endpoint to take account of user override.
|
||||
rawService, err := raw.NewService(ctx, option.WithEndpoint(ep), option.WithHTTPClient(hc))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("storage client: %v", err)
|
||||
return nil, fmt.Errorf("storage client: %w", err)
|
||||
}
|
||||
// Update readHost and scheme with the chosen endpoint.
|
||||
u, err := url.Parse(ep)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("supplied endpoint %q is not valid: %v", ep, err)
|
||||
return nil, fmt.Errorf("supplied endpoint %q is not valid: %w", ep, err)
|
||||
}
|
||||
|
||||
return &httpStorageClient{
|
||||
|
@ -159,7 +159,7 @@ func (c *httpStorageClient) GetServiceAccount(ctx context.Context, project strin
|
|||
return res.EmailAddress, nil
|
||||
}
|
||||
|
||||
func (c *httpStorageClient) CreateBucket(ctx context.Context, project string, attrs *BucketAttrs, opts ...storageOption) (*BucketAttrs, error) {
|
||||
func (c *httpStorageClient) CreateBucket(ctx context.Context, project, bucket string, attrs *BucketAttrs, opts ...storageOption) (*BucketAttrs, error) {
|
||||
s := callSettings(c.settings, opts...)
|
||||
var bkt *raw.Bucket
|
||||
if attrs != nil {
|
||||
|
@ -167,7 +167,7 @@ func (c *httpStorageClient) CreateBucket(ctx context.Context, project string, at
|
|||
} else {
|
||||
bkt = &raw.Bucket{}
|
||||
}
|
||||
|
||||
bkt.Name = bucket
|
||||
// If there is lifecycle information but no location, explicitly set
|
||||
// the location. This is a GCS quirk/bug.
|
||||
if bkt.Location == "" && bkt.Lifecycle != nil {
|
||||
|
@ -344,8 +344,8 @@ func (c *httpStorageClient) ListObjects(ctx context.Context, bucket string, q *Q
|
|||
req.EndOffset(it.query.EndOffset)
|
||||
req.Versions(it.query.Versions)
|
||||
req.IncludeTrailingDelimiter(it.query.IncludeTrailingDelimiter)
|
||||
if len(it.query.fieldSelection) > 0 {
|
||||
req.Fields("nextPageToken", googleapi.Field(it.query.fieldSelection))
|
||||
if selection := it.query.toFieldSelection(); selection != "" {
|
||||
req.Fields("nextPageToken", googleapi.Field(selection))
|
||||
}
|
||||
req.PageToken(pageToken)
|
||||
if s.userProject != "" {
|
||||
|
@ -548,8 +548,25 @@ func (c *httpStorageClient) ListDefaultObjectACLs(ctx context.Context, bucket st
|
|||
}
|
||||
return toObjectACLRules(acls.Items), nil
|
||||
}
|
||||
func (c *httpStorageClient) UpdateDefaultObjectACL(ctx context.Context, opts ...storageOption) (*ACLRule, error) {
|
||||
return nil, errMethodNotSupported
|
||||
func (c *httpStorageClient) UpdateDefaultObjectACL(ctx context.Context, bucket string, entity ACLEntity, role ACLRole, opts ...storageOption) error {
|
||||
s := callSettings(c.settings, opts...)
|
||||
type setRequest interface {
|
||||
Do(opts ...googleapi.CallOption) (*raw.ObjectAccessControl, error)
|
||||
Header() http.Header
|
||||
}
|
||||
acl := &raw.ObjectAccessControl{
|
||||
Bucket: bucket,
|
||||
Entity: string(entity),
|
||||
Role: string(role),
|
||||
}
|
||||
var req setRequest
|
||||
var err error
|
||||
req = c.raw.DefaultObjectAccessControls.Update(bucket, string(entity), acl)
|
||||
configureACLCall(ctx, s.userProject, req)
|
||||
return run(ctx, func() error {
|
||||
_, err = req.Do()
|
||||
return err
|
||||
}, s.retry, s.idempotent, setRetryHeaderHTTP(req))
|
||||
}
|
||||
|
||||
// Bucket ACL methods.
|
||||
|
@ -577,7 +594,7 @@ func (c *httpStorageClient) ListBucketACLs(ctx context.Context, bucket string, o
|
|||
return toBucketACLRules(acls.Items), nil
|
||||
}
|
||||
|
||||
func (c *httpStorageClient) UpdateBucketACL(ctx context.Context, bucket string, entity ACLEntity, role ACLRole, opts ...storageOption) (*ACLRule, error) {
|
||||
func (c *httpStorageClient) UpdateBucketACL(ctx context.Context, bucket string, entity ACLEntity, role ACLRole, opts ...storageOption) error {
|
||||
s := callSettings(c.settings, opts...)
|
||||
acl := &raw.BucketAccessControl{
|
||||
Bucket: bucket,
|
||||
|
@ -586,17 +603,11 @@ func (c *httpStorageClient) UpdateBucketACL(ctx context.Context, bucket string,
|
|||
}
|
||||
req := c.raw.BucketAccessControls.Update(bucket, string(entity), acl)
|
||||
configureACLCall(ctx, s.userProject, req)
|
||||
var aclRule ACLRule
|
||||
var err error
|
||||
err = run(ctx, func() error {
|
||||
acl, err = req.Do()
|
||||
aclRule = toBucketACLRule(acl)
|
||||
return run(ctx, func() error {
|
||||
_, err = req.Do()
|
||||
return err
|
||||
}, s.retry, s.idempotent, setRetryHeaderHTTP(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &aclRule, nil
|
||||
}
|
||||
|
||||
// configureACLCall sets the context, user project and headers on the apiary library call.
|
||||
|
@ -613,7 +624,10 @@ func configureACLCall(ctx context.Context, userProject string, call interface{ H
|
|||
// Object ACL methods.
|
||||
|
||||
func (c *httpStorageClient) DeleteObjectACL(ctx context.Context, bucket, object string, entity ACLEntity, opts ...storageOption) error {
|
||||
return errMethodNotSupported
|
||||
s := callSettings(c.settings, opts...)
|
||||
req := c.raw.ObjectAccessControls.Delete(bucket, object, string(entity))
|
||||
configureACLCall(ctx, s.userProject, req)
|
||||
return run(ctx, func() error { return req.Context(ctx).Do() }, s.retry, s.idempotent, setRetryHeaderHTTP(req))
|
||||
}
|
||||
|
||||
// ListObjectACLs retrieves object ACL entries. By default, it operates on the latest generation of this object.
|
||||
|
@ -634,17 +648,129 @@ func (c *httpStorageClient) ListObjectACLs(ctx context.Context, bucket, object s
|
|||
return toObjectACLRules(acls.Items), nil
|
||||
}
|
||||
|
||||
func (c *httpStorageClient) UpdateObjectACL(ctx context.Context, bucket, object string, entity ACLEntity, role ACLRole, opts ...storageOption) (*ACLRule, error) {
|
||||
return nil, errMethodNotSupported
|
||||
func (c *httpStorageClient) UpdateObjectACL(ctx context.Context, bucket, object string, entity ACLEntity, role ACLRole, opts ...storageOption) error {
|
||||
s := callSettings(c.settings, opts...)
|
||||
type setRequest interface {
|
||||
Do(opts ...googleapi.CallOption) (*raw.ObjectAccessControl, error)
|
||||
Header() http.Header
|
||||
}
|
||||
|
||||
acl := &raw.ObjectAccessControl{
|
||||
Bucket: bucket,
|
||||
Entity: string(entity),
|
||||
Role: string(role),
|
||||
}
|
||||
var req setRequest
|
||||
var err error
|
||||
req = c.raw.ObjectAccessControls.Update(bucket, object, string(entity), acl)
|
||||
configureACLCall(ctx, s.userProject, req)
|
||||
return run(ctx, func() error {
|
||||
_, err = req.Do()
|
||||
return err
|
||||
}, s.retry, s.idempotent, setRetryHeaderHTTP(req))
|
||||
}
|
||||
|
||||
// Media operations.
|
||||
|
||||
func (c *httpStorageClient) ComposeObject(ctx context.Context, req *composeObjectRequest, opts ...storageOption) (*ObjectAttrs, error) {
|
||||
return nil, errMethodNotSupported
|
||||
s := callSettings(c.settings, opts...)
|
||||
rawReq := &raw.ComposeRequest{}
|
||||
// Compose requires a non-empty Destination, so we always set it,
|
||||
// even if the caller-provided ObjectAttrs is the zero value.
|
||||
rawReq.Destination = req.dstObject.attrs.toRawObject(req.dstBucket)
|
||||
if req.sendCRC32C {
|
||||
rawReq.Destination.Crc32c = encodeUint32(req.dstObject.attrs.CRC32C)
|
||||
}
|
||||
for _, src := range req.srcs {
|
||||
srcObj := &raw.ComposeRequestSourceObjects{
|
||||
Name: src.name,
|
||||
}
|
||||
if err := applyConds("ComposeFrom source", src.gen, src.conds, composeSourceObj{srcObj}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rawReq.SourceObjects = append(rawReq.SourceObjects, srcObj)
|
||||
}
|
||||
|
||||
call := c.raw.Objects.Compose(req.dstBucket, req.dstObject.name, rawReq).Context(ctx)
|
||||
if err := applyConds("ComposeFrom destination", defaultGen, req.dstObject.conds, call); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if s.userProject != "" {
|
||||
call.UserProject(s.userProject)
|
||||
}
|
||||
if req.predefinedACL != "" {
|
||||
call.DestinationPredefinedAcl(req.predefinedACL)
|
||||
}
|
||||
if err := setEncryptionHeaders(call.Header(), req.dstObject.encryptionKey, false); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var obj *raw.Object
|
||||
setClientHeader(call.Header())
|
||||
|
||||
var err error
|
||||
retryCall := func() error { obj, err = call.Do(); return err }
|
||||
|
||||
if err := run(ctx, retryCall, s.retry, s.idempotent, setRetryHeaderHTTP(call)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newObject(obj), nil
|
||||
}
|
||||
func (c *httpStorageClient) RewriteObject(ctx context.Context, req *rewriteObjectRequest, opts ...storageOption) (*rewriteObjectResponse, error) {
|
||||
return nil, errMethodNotSupported
|
||||
s := callSettings(c.settings, opts...)
|
||||
rawObject := req.dstObject.attrs.toRawObject("")
|
||||
call := c.raw.Objects.Rewrite(req.srcObject.bucket, req.srcObject.name, req.dstObject.bucket, req.dstObject.name, rawObject)
|
||||
|
||||
call.Context(ctx).Projection("full")
|
||||
if req.token != "" {
|
||||
call.RewriteToken(req.token)
|
||||
}
|
||||
if req.dstObject.keyName != "" {
|
||||
call.DestinationKmsKeyName(req.dstObject.keyName)
|
||||
}
|
||||
if req.predefinedACL != "" {
|
||||
call.DestinationPredefinedAcl(req.predefinedACL)
|
||||
}
|
||||
if err := applyConds("Copy destination", defaultGen, req.dstObject.conds, call); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := applySourceConds(req.srcObject.gen, req.srcObject.conds, call); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if s.userProject != "" {
|
||||
call.UserProject(s.userProject)
|
||||
}
|
||||
// Set destination encryption headers.
|
||||
if err := setEncryptionHeaders(call.Header(), req.dstObject.encryptionKey, false); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Set source encryption headers.
|
||||
if err := setEncryptionHeaders(call.Header(), req.srcObject.encryptionKey, true); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if req.maxBytesRewrittenPerCall != 0 {
|
||||
call.MaxBytesRewrittenPerCall(req.maxBytesRewrittenPerCall)
|
||||
}
|
||||
|
||||
var res *raw.RewriteResponse
|
||||
var err error
|
||||
setClientHeader(call.Header())
|
||||
|
||||
retryCall := func() error { res, err = call.Do(); return err }
|
||||
|
||||
if err := run(ctx, retryCall, s.retry, s.idempotent, setRetryHeaderHTTP(call)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
r := &rewriteObjectResponse{
|
||||
done: res.Done,
|
||||
written: res.TotalBytesRewritten,
|
||||
size: res.ObjectSize,
|
||||
token: res.RewriteToken,
|
||||
resource: newObject(res.Resource),
|
||||
}
|
||||
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func (c *httpStorageClient) NewRangeReader(ctx context.Context, params *newRangeReaderParams, opts ...storageOption) (r *Reader, err error) {
|
||||
|
@ -653,14 +779,6 @@ func (c *httpStorageClient) NewRangeReader(ctx context.Context, params *newRange
|
|||
|
||||
s := callSettings(c.settings, opts...)
|
||||
|
||||
if params.offset < 0 && params.length >= 0 {
|
||||
return nil, fmt.Errorf("storage: invalid offset %d < 0 requires negative length", params.offset)
|
||||
}
|
||||
if params.conds != nil {
|
||||
if err := params.conds.validate("NewRangeReader"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
u := &url.URL{
|
||||
Scheme: c.scheme,
|
||||
Host: c.readHost,
|
||||
|
@ -678,10 +796,9 @@ func (c *httpStorageClient) NewRangeReader(ctx context.Context, params *newRange
|
|||
if s.userProject != "" {
|
||||
req.Header.Set("X-Goog-User-Project", s.userProject)
|
||||
}
|
||||
// TODO(noahdietz): add option for readCompressed.
|
||||
// if o.readCompressed {
|
||||
// req.Header.Set("Accept-Encoding", "gzip")
|
||||
// }
|
||||
if params.readCompressed {
|
||||
req.Header.Set("Accept-Encoding", "gzip")
|
||||
}
|
||||
if err := setEncryptionHeaders(req.Header, params.encryptionKey, false); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -793,7 +910,7 @@ func (c *httpStorageClient) NewRangeReader(ctx context.Context, params *newRange
|
|||
if dashIndex >= 0 {
|
||||
startOffset, err = strconv.ParseInt(cr[len("bytes="):dashIndex], 10, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("storage: invalid Content-Range %q: %v", cr, err)
|
||||
return nil, fmt.Errorf("storage: invalid Content-Range %q: %w", cr, err)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
@ -906,7 +1023,7 @@ func (c *httpStorageClient) OpenWriter(params *openWriterParams, opts ...storage
|
|||
return
|
||||
}
|
||||
var resp *raw.Object
|
||||
err := applyConds("NewWriter", params.attrs.Generation, params.conds, call)
|
||||
err := applyConds("NewWriter", defaultGen, params.conds, call)
|
||||
if err == nil {
|
||||
if s.userProject != "" {
|
||||
call.UserProject(s.userProject)
|
||||
|
@ -921,9 +1038,8 @@ func (c *httpStorageClient) OpenWriter(params *openWriterParams, opts ...storage
|
|||
// there is no need to add retries here.
|
||||
|
||||
// Retry only when the operation is idempotent or the retry policy is RetryAlways.
|
||||
isIdempotent := params.conds != nil && (params.conds.GenerationMatch >= 0 || params.conds.DoesNotExist == true)
|
||||
var useRetry bool
|
||||
if (s.retry == nil || s.retry.policy == RetryIdempotent) && isIdempotent {
|
||||
if (s.retry == nil || s.retry.policy == RetryIdempotent) && s.idempotent {
|
||||
useRetry = true
|
||||
} else if s.retry != nil && s.retry.policy == RetryAlways {
|
||||
useRetry = true
|
||||
|
@ -1006,20 +1122,139 @@ func (c *httpStorageClient) TestIamPermissions(ctx context.Context, resource str
|
|||
|
||||
// HMAC Key methods.
|
||||
|
||||
func (c *httpStorageClient) GetHMACKey(ctx context.Context, desc *hmacKeyDesc, opts ...storageOption) (*HMACKey, error) {
|
||||
return nil, errMethodNotSupported
|
||||
func (c *httpStorageClient) GetHMACKey(ctx context.Context, project, accessID string, opts ...storageOption) (*HMACKey, error) {
|
||||
s := callSettings(c.settings, opts...)
|
||||
call := c.raw.Projects.HmacKeys.Get(project, accessID)
|
||||
if s.userProject != "" {
|
||||
call = call.UserProject(s.userProject)
|
||||
}
|
||||
|
||||
var metadata *raw.HmacKeyMetadata
|
||||
var err error
|
||||
if err := run(ctx, func() error {
|
||||
metadata, err = call.Context(ctx).Do()
|
||||
return err
|
||||
}, s.retry, s.idempotent, setRetryHeaderHTTP(call)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hk := &raw.HmacKey{
|
||||
Metadata: metadata,
|
||||
}
|
||||
return toHMACKeyFromRaw(hk, false)
|
||||
}
|
||||
func (c *httpStorageClient) ListHMACKey(ctx context.Context, desc *hmacKeyDesc, opts ...storageOption) *HMACKeysIterator {
|
||||
return &HMACKeysIterator{}
|
||||
|
||||
func (c *httpStorageClient) ListHMACKeys(ctx context.Context, project, serviceAccountEmail string, showDeletedKeys bool, opts ...storageOption) *HMACKeysIterator {
|
||||
s := callSettings(c.settings, opts...)
|
||||
it := &HMACKeysIterator{
|
||||
ctx: ctx,
|
||||
raw: c.raw.Projects.HmacKeys,
|
||||
projectID: project,
|
||||
retry: s.retry,
|
||||
}
|
||||
fetch := func(pageSize int, pageToken string) (token string, err error) {
|
||||
call := c.raw.Projects.HmacKeys.List(project)
|
||||
setClientHeader(call.Header())
|
||||
if pageToken != "" {
|
||||
call = call.PageToken(pageToken)
|
||||
}
|
||||
if pageSize > 0 {
|
||||
call = call.MaxResults(int64(pageSize))
|
||||
}
|
||||
if showDeletedKeys {
|
||||
call = call.ShowDeletedKeys(true)
|
||||
}
|
||||
if s.userProject != "" {
|
||||
call = call.UserProject(s.userProject)
|
||||
}
|
||||
if serviceAccountEmail != "" {
|
||||
call = call.ServiceAccountEmail(serviceAccountEmail)
|
||||
}
|
||||
|
||||
var resp *raw.HmacKeysMetadata
|
||||
err = run(it.ctx, func() error {
|
||||
resp, err = call.Context(it.ctx).Do()
|
||||
return err
|
||||
}, s.retry, s.idempotent, setRetryHeaderHTTP(call))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
for _, metadata := range resp.Items {
|
||||
hk := &raw.HmacKey{
|
||||
Metadata: metadata,
|
||||
}
|
||||
hkey, err := toHMACKeyFromRaw(hk, true)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
it.hmacKeys = append(it.hmacKeys, hkey)
|
||||
}
|
||||
return resp.NextPageToken, nil
|
||||
}
|
||||
|
||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(
|
||||
fetch,
|
||||
func() int { return len(it.hmacKeys) - it.index },
|
||||
func() interface{} {
|
||||
prev := it.hmacKeys
|
||||
it.hmacKeys = it.hmacKeys[:0]
|
||||
it.index = 0
|
||||
return prev
|
||||
})
|
||||
return it
|
||||
}
|
||||
func (c *httpStorageClient) UpdateHMACKey(ctx context.Context, desc *hmacKeyDesc, attrs *HMACKeyAttrsToUpdate, opts ...storageOption) (*HMACKey, error) {
|
||||
return nil, errMethodNotSupported
|
||||
|
||||
func (c *httpStorageClient) UpdateHMACKey(ctx context.Context, project, serviceAccountEmail, accessID string, attrs *HMACKeyAttrsToUpdate, opts ...storageOption) (*HMACKey, error) {
|
||||
s := callSettings(c.settings, opts...)
|
||||
call := c.raw.Projects.HmacKeys.Update(project, accessID, &raw.HmacKeyMetadata{
|
||||
Etag: attrs.Etag,
|
||||
State: string(attrs.State),
|
||||
})
|
||||
if s.userProject != "" {
|
||||
call = call.UserProject(s.userProject)
|
||||
}
|
||||
|
||||
var metadata *raw.HmacKeyMetadata
|
||||
var err error
|
||||
if err := run(ctx, func() error {
|
||||
metadata, err = call.Context(ctx).Do()
|
||||
return err
|
||||
}, s.retry, s.idempotent, setRetryHeaderHTTP(call)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hk := &raw.HmacKey{
|
||||
Metadata: metadata,
|
||||
}
|
||||
return toHMACKeyFromRaw(hk, false)
|
||||
}
|
||||
func (c *httpStorageClient) CreateHMACKey(ctx context.Context, desc *hmacKeyDesc, opts ...storageOption) (*HMACKey, error) {
|
||||
return nil, errMethodNotSupported
|
||||
|
||||
func (c *httpStorageClient) CreateHMACKey(ctx context.Context, project, serviceAccountEmail string, opts ...storageOption) (*HMACKey, error) {
|
||||
s := callSettings(c.settings, opts...)
|
||||
call := c.raw.Projects.HmacKeys.Create(project, serviceAccountEmail)
|
||||
if s.userProject != "" {
|
||||
call = call.UserProject(s.userProject)
|
||||
}
|
||||
|
||||
var hk *raw.HmacKey
|
||||
if err := run(ctx, func() error {
|
||||
h, err := call.Context(ctx).Do()
|
||||
hk = h
|
||||
return err
|
||||
}, s.retry, s.idempotent, setRetryHeaderHTTP(call)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return toHMACKeyFromRaw(hk, true)
|
||||
}
|
||||
func (c *httpStorageClient) DeleteHMACKey(ctx context.Context, desc *hmacKeyDesc, opts ...storageOption) error {
|
||||
return errMethodNotSupported
|
||||
|
||||
func (c *httpStorageClient) DeleteHMACKey(ctx context.Context, project string, accessID string, opts ...storageOption) error {
|
||||
s := callSettings(c.settings, opts...)
|
||||
call := c.raw.Projects.HmacKeys.Delete(project, accessID)
|
||||
if s.userProject != "" {
|
||||
call = call.UserProject(s.userProject)
|
||||
}
|
||||
return run(ctx, func() error {
|
||||
return call.Context(ctx).Do()
|
||||
}, s.retry, s.idempotent, setRetryHeaderHTTP(call))
|
||||
}
|
||||
|
||||
// Notification methods.
|
||||
|
|
48
vendor/cloud.google.com/go/storage/iam.go
generated
vendored
48
vendor/cloud.google.com/go/storage/iam.go
generated
vendored
|
@ -27,17 +27,17 @@ import (
|
|||
// IAM provides access to IAM access control for the bucket.
|
||||
func (b *BucketHandle) IAM() *iam.Handle {
|
||||
return iam.InternalNewHandleClient(&iamClient{
|
||||
raw: b.c.raw,
|
||||
userProject: b.userProject,
|
||||
retry: b.retry,
|
||||
client: b.c,
|
||||
}, b.name)
|
||||
}
|
||||
|
||||
// iamClient implements the iam.client interface.
|
||||
type iamClient struct {
|
||||
raw *raw.Service
|
||||
userProject string
|
||||
retry *retryConfig
|
||||
client *Client
|
||||
}
|
||||
|
||||
func (c *iamClient) Get(ctx context.Context, resource string) (p *iampb.Policy, err error) {
|
||||
|
@ -48,57 +48,25 @@ func (c *iamClient) GetWithVersion(ctx context.Context, resource string, request
|
|||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.IAM.Get")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
|
||||
call := c.raw.Buckets.GetIamPolicy(resource).OptionsRequestedPolicyVersion(int64(requestedPolicyVersion))
|
||||
setClientHeader(call.Header())
|
||||
if c.userProject != "" {
|
||||
call.UserProject(c.userProject)
|
||||
}
|
||||
var rp *raw.Policy
|
||||
err = run(ctx, func() error {
|
||||
rp, err = call.Context(ctx).Do()
|
||||
return err
|
||||
}, c.retry, true, setRetryHeaderHTTP(call))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return iamFromStoragePolicy(rp), nil
|
||||
o := makeStorageOpts(true, c.retry, c.userProject)
|
||||
return c.client.tc.GetIamPolicy(ctx, resource, requestedPolicyVersion, o...)
|
||||
}
|
||||
|
||||
func (c *iamClient) Set(ctx context.Context, resource string, p *iampb.Policy) (err error) {
|
||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.IAM.Set")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
|
||||
rp := iamToStoragePolicy(p)
|
||||
call := c.raw.Buckets.SetIamPolicy(resource, rp)
|
||||
setClientHeader(call.Header())
|
||||
if c.userProject != "" {
|
||||
call.UserProject(c.userProject)
|
||||
}
|
||||
isIdempotent := len(p.Etag) > 0
|
||||
return run(ctx, func() error {
|
||||
_, err := call.Context(ctx).Do()
|
||||
return err
|
||||
}, c.retry, isIdempotent, setRetryHeaderHTTP(call))
|
||||
o := makeStorageOpts(isIdempotent, c.retry, c.userProject)
|
||||
return c.client.tc.SetIamPolicy(ctx, resource, p, o...)
|
||||
}
|
||||
|
||||
func (c *iamClient) Test(ctx context.Context, resource string, perms []string) (permissions []string, err error) {
|
||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.IAM.Test")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
|
||||
call := c.raw.Buckets.TestIamPermissions(resource, perms)
|
||||
setClientHeader(call.Header())
|
||||
if c.userProject != "" {
|
||||
call.UserProject(c.userProject)
|
||||
}
|
||||
var res *raw.TestIamPermissionsResponse
|
||||
err = run(ctx, func() error {
|
||||
res, err = call.Context(ctx).Do()
|
||||
return err
|
||||
}, c.retry, true, setRetryHeaderHTTP(call))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res.Permissions, nil
|
||||
o := makeStorageOpts(true, c.retry, c.userProject)
|
||||
return c.client.tc.TestIamPermissions(ctx, resource, perms, o...)
|
||||
}
|
||||
|
||||
func iamToStoragePolicy(ip *iampb.Policy) *raw.Policy {
|
||||
|
|
59
vendor/cloud.google.com/go/storage/internal/apiv2/doc.go
generated
vendored
59
vendor/cloud.google.com/go/storage/internal/apiv2/doc.go
generated
vendored
|
@ -19,43 +19,54 @@
|
|||
//
|
||||
// Lets you store and retrieve potentially-large, immutable data objects.
|
||||
//
|
||||
// NOTE: This package is in alpha. It is not stable, and is likely to change.
|
||||
// NOTE: This package is in alpha. It is not stable, and is likely to change.
|
||||
//
|
||||
// Example usage
|
||||
// # Example usage
|
||||
//
|
||||
// To get started with this package, create a client.
|
||||
// ctx := context.Background()
|
||||
// c, err := storage.NewClient(ctx)
|
||||
// if err != nil {
|
||||
// // TODO: Handle error.
|
||||
// }
|
||||
// defer c.Close()
|
||||
//
|
||||
// ctx := context.Background()
|
||||
// // This snippet has been automatically generated and should be regarded as a code template only.
|
||||
// // It will require modifications to work:
|
||||
// // - It may require correct/in-range values for request initialization.
|
||||
// // - It may require specifying regional endpoints when creating the service client as shown in:
|
||||
// // https://pkg.go.dev/cloud.google.com/go#hdr-Client_Options
|
||||
// c, err := storage.NewClient(ctx)
|
||||
// if err != nil {
|
||||
// // TODO: Handle error.
|
||||
// }
|
||||
// defer c.Close()
|
||||
//
|
||||
// The client will use your default application credentials. Clients should be reused instead of created as needed.
|
||||
// The methods of Client are safe for concurrent use by multiple goroutines.
|
||||
// The returned client must be Closed when it is done being used.
|
||||
//
|
||||
// Using the Client
|
||||
// # Using the Client
|
||||
//
|
||||
// The following is an example of making an API call with the newly created client.
|
||||
//
|
||||
// ctx := context.Background()
|
||||
// c, err := storage.NewClient(ctx)
|
||||
// if err != nil {
|
||||
// // TODO: Handle error.
|
||||
// }
|
||||
// defer c.Close()
|
||||
// ctx := context.Background()
|
||||
// // This snippet has been automatically generated and should be regarded as a code template only.
|
||||
// // It will require modifications to work:
|
||||
// // - It may require correct/in-range values for request initialization.
|
||||
// // - It may require specifying regional endpoints when creating the service client as shown in:
|
||||
// // https://pkg.go.dev/cloud.google.com/go#hdr-Client_Options
|
||||
// c, err := storage.NewClient(ctx)
|
||||
// if err != nil {
|
||||
// // TODO: Handle error.
|
||||
// }
|
||||
// defer c.Close()
|
||||
//
|
||||
// req := &storagepb.DeleteBucketRequest{
|
||||
// // TODO: Fill request struct fields.
|
||||
// // See https://pkg.go.dev/google.golang.org/genproto/googleapis/storage/v2#DeleteBucketRequest.
|
||||
// }
|
||||
// err = c.DeleteBucket(ctx, req)
|
||||
// if err != nil {
|
||||
// // TODO: Handle error.
|
||||
// }
|
||||
// req := &storagepb.DeleteBucketRequest{
|
||||
// // TODO: Fill request struct fields.
|
||||
// // See https://pkg.go.dev/cloud.google.com/go/storage/internal/apiv2/stubs#DeleteBucketRequest.
|
||||
// }
|
||||
// err = c.DeleteBucket(ctx, req)
|
||||
// if err != nil {
|
||||
// // TODO: Handle error.
|
||||
// }
|
||||
//
|
||||
// Use of Context
|
||||
// # Use of Context
|
||||
//
|
||||
// The ctx passed to NewClient is used for authentication requests and
|
||||
// for creating the underlying connection, but is not used for subsequent calls.
|
||||
|
|
5
vendor/cloud.google.com/go/storage/internal/apiv2/gapic_metadata.json
generated
vendored
5
vendor/cloud.google.com/go/storage/internal/apiv2/gapic_metadata.json
generated
vendored
|
@ -10,6 +10,11 @@
|
|||
"grpc": {
|
||||
"libraryClient": "Client",
|
||||
"rpcs": {
|
||||
"CancelResumableWrite": {
|
||||
"methods": [
|
||||
"CancelResumableWrite"
|
||||
]
|
||||
},
|
||||
"ComposeObject": {
|
||||
"methods": [
|
||||
"ComposeObject"
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2021 Google LLC
|
||||
// Copyright 2022 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
|
@ -12,20 +12,15 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package adapters
|
||||
package storage
|
||||
|
||||
import (
|
||||
"time"
|
||||
"context"
|
||||
|
||||
mpb "google.golang.org/genproto/googleapis/type/month"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
// ToMonth converts a google.type.Month to a golang Month.
|
||||
func ToMonth(m mpb.Month) time.Month {
|
||||
return time.Month(m.Number())
|
||||
}
|
||||
|
||||
// ToProtoMonth converts a golang Month to a google.type.Month.
|
||||
func ToProtoMonth(m time.Month) mpb.Month {
|
||||
return mpb.Month(m)
|
||||
// InsertMetadata inserts the given gRPC metadata into the outgoing context.
|
||||
func InsertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
|
||||
return insertMetadata(ctx, mds...)
|
||||
}
|
533
vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go
generated
vendored
533
vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go
generated
vendored
|
@ -18,15 +18,19 @@ package storage
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
storagepb "cloud.google.com/go/storage/internal/apiv2/stubs"
|
||||
gax "github.com/googleapis/gax-go/v2"
|
||||
"google.golang.org/api/iterator"
|
||||
"google.golang.org/api/option"
|
||||
"google.golang.org/api/option/internaloption"
|
||||
gtransport "google.golang.org/api/transport/grpc"
|
||||
iampb "google.golang.org/genproto/googleapis/iam/v1"
|
||||
storagepb "google.golang.org/genproto/googleapis/storage/v2"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/protobuf/proto"
|
||||
|
@ -51,6 +55,7 @@ type CallOptions struct {
|
|||
ListNotifications []gax.CallOption
|
||||
ComposeObject []gax.CallOption
|
||||
DeleteObject []gax.CallOption
|
||||
CancelResumableWrite []gax.CallOption
|
||||
GetObject []gax.CallOption
|
||||
ReadObject []gax.CallOption
|
||||
UpdateObject []gax.CallOption
|
||||
|
@ -96,6 +101,7 @@ func defaultCallOptions() *CallOptions {
|
|||
ListNotifications: []gax.CallOption{},
|
||||
ComposeObject: []gax.CallOption{},
|
||||
DeleteObject: []gax.CallOption{},
|
||||
CancelResumableWrite: []gax.CallOption{},
|
||||
GetObject: []gax.CallOption{},
|
||||
ReadObject: []gax.CallOption{},
|
||||
UpdateObject: []gax.CallOption{},
|
||||
|
@ -133,6 +139,7 @@ type internalClient interface {
|
|||
ListNotifications(context.Context, *storagepb.ListNotificationsRequest, ...gax.CallOption) *NotificationIterator
|
||||
ComposeObject(context.Context, *storagepb.ComposeObjectRequest, ...gax.CallOption) (*storagepb.Object, error)
|
||||
DeleteObject(context.Context, *storagepb.DeleteObjectRequest, ...gax.CallOption) error
|
||||
CancelResumableWrite(context.Context, *storagepb.CancelResumableWriteRequest, ...gax.CallOption) (*storagepb.CancelResumableWriteResponse, error)
|
||||
GetObject(context.Context, *storagepb.GetObjectRequest, ...gax.CallOption) (*storagepb.Object, error)
|
||||
ReadObject(context.Context, *storagepb.ReadObjectRequest, ...gax.CallOption) (storagepb.Storage_ReadObjectClient, error)
|
||||
UpdateObject(context.Context, *storagepb.UpdateObjectRequest, ...gax.CallOption) (*storagepb.Object, error)
|
||||
|
@ -158,22 +165,22 @@ type internalClient interface {
|
|||
//
|
||||
// Resources are named as follows:
|
||||
//
|
||||
// Projects are referred to as they are defined by the Resource Manager API,
|
||||
// using strings like projects/123456 or projects/my-string-id.
|
||||
// Projects are referred to as they are defined by the Resource Manager API,
|
||||
// using strings like projects/123456 or projects/my-string-id.
|
||||
//
|
||||
// Buckets are named using string names of the form:
|
||||
// projects/{project}/buckets/{bucket}
|
||||
// For globally unique buckets, _ may be substituted for the project.
|
||||
// Buckets are named using string names of the form:
|
||||
// projects/{project}/buckets/{bucket}
|
||||
// For globally unique buckets, _ may be substituted for the project.
|
||||
//
|
||||
// Objects are uniquely identified by their name along with the name of the
|
||||
// bucket they belong to, as separate strings in this API. For example:
|
||||
// Objects are uniquely identified by their name along with the name of the
|
||||
// bucket they belong to, as separate strings in this API. For example:
|
||||
//
|
||||
// ReadObjectRequest {
|
||||
// bucket: ‘projects/_/buckets/my-bucket’
|
||||
// object: ‘my-object’
|
||||
// }
|
||||
// Note that object names can contain / characters, which are treated as
|
||||
// any other character (no special directory semantics).
|
||||
// ReadObjectRequest {
|
||||
// bucket: ‘projects/_/buckets/my-bucket’
|
||||
// object: ‘my-object’
|
||||
// }
|
||||
// Note that object names can contain / characters, which are treated as
|
||||
// any other character (no special directory semantics).
|
||||
type Client struct {
|
||||
// The internal transport-dependent client.
|
||||
internalClient internalClient
|
||||
|
@ -199,7 +206,8 @@ func (c *Client) setGoogleClientInfo(keyval ...string) {
|
|||
|
||||
// Connection returns a connection to the API service.
|
||||
//
|
||||
// Deprecated.
|
||||
// Deprecated: Connections are now pooled so this method does not always
|
||||
// return the same resource.
|
||||
func (c *Client) Connection() *grpc.ClientConn {
|
||||
return c.internalClient.Connection()
|
||||
}
|
||||
|
@ -229,17 +237,17 @@ func (c *Client) LockBucketRetentionPolicy(ctx context.Context, req *storagepb.L
|
|||
return c.internalClient.LockBucketRetentionPolicy(ctx, req, opts...)
|
||||
}
|
||||
|
||||
// GetIamPolicy gets the IAM policy for a specified bucket.
|
||||
// GetIamPolicy gets the IAM policy for a specified bucket or object.
|
||||
func (c *Client) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) {
|
||||
return c.internalClient.GetIamPolicy(ctx, req, opts...)
|
||||
}
|
||||
|
||||
// SetIamPolicy updates an IAM policy for the specified bucket.
|
||||
// SetIamPolicy updates an IAM policy for the specified bucket or object.
|
||||
func (c *Client) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) {
|
||||
return c.internalClient.SetIamPolicy(ctx, req, opts...)
|
||||
}
|
||||
|
||||
// TestIamPermissions tests a set of permissions on the given bucket to see which, if
|
||||
// TestIamPermissions tests a set of permissions on the given bucket or object to see which, if
|
||||
// any, are held by the caller.
|
||||
func (c *Client) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) {
|
||||
return c.internalClient.TestIamPermissions(ctx, req, opts...)
|
||||
|
@ -280,12 +288,16 @@ func (c *Client) ComposeObject(ctx context.Context, req *storagepb.ComposeObject
|
|||
}
|
||||
|
||||
// DeleteObject deletes an object and its metadata. Deletions are permanent if versioning
|
||||
// is not enabled for the bucket, or if the generation parameter
|
||||
// is used.
|
||||
// is not enabled for the bucket, or if the generation parameter is used.
|
||||
func (c *Client) DeleteObject(ctx context.Context, req *storagepb.DeleteObjectRequest, opts ...gax.CallOption) error {
|
||||
return c.internalClient.DeleteObject(ctx, req, opts...)
|
||||
}
|
||||
|
||||
// CancelResumableWrite cancels an in-progress resumable upload.
|
||||
func (c *Client) CancelResumableWrite(ctx context.Context, req *storagepb.CancelResumableWriteRequest, opts ...gax.CallOption) (*storagepb.CancelResumableWriteResponse, error) {
|
||||
return c.internalClient.CancelResumableWrite(ctx, req, opts...)
|
||||
}
|
||||
|
||||
// GetObject retrieves an object’s metadata.
|
||||
func (c *Client) GetObject(ctx context.Context, req *storagepb.GetObjectRequest, opts ...gax.CallOption) (*storagepb.Object, error) {
|
||||
return c.internalClient.GetObject(ctx, req, opts...)
|
||||
|
@ -319,33 +331,33 @@ func (c *Client) UpdateObject(ctx context.Context, req *storagepb.UpdateObjectRe
|
|||
// error or an error response from the server), the client should do as
|
||||
// follows:
|
||||
//
|
||||
// Check the result Status of the stream, to determine if writing can be
|
||||
// resumed on this stream or must be restarted from scratch (by calling
|
||||
// StartResumableWrite()). The resumable errors are DEADLINE_EXCEEDED,
|
||||
// INTERNAL, and UNAVAILABLE. For each case, the client should use binary
|
||||
// exponential backoff before retrying. Additionally, writes can be
|
||||
// resumed after RESOURCE_EXHAUSTED errors, but only after taking
|
||||
// appropriate measures, which may include reducing aggregate send rate
|
||||
// across clients and/or requesting a quota increase for your project.
|
||||
// Check the result Status of the stream, to determine if writing can be
|
||||
// resumed on this stream or must be restarted from scratch (by calling
|
||||
// StartResumableWrite()). The resumable errors are DEADLINE_EXCEEDED,
|
||||
// INTERNAL, and UNAVAILABLE. For each case, the client should use binary
|
||||
// exponential backoff before retrying. Additionally, writes can be
|
||||
// resumed after RESOURCE_EXHAUSTED errors, but only after taking
|
||||
// appropriate measures, which may include reducing aggregate send rate
|
||||
// across clients and/or requesting a quota increase for your project.
|
||||
//
|
||||
// If the call to WriteObject returns ABORTED, that indicates
|
||||
// concurrent attempts to update the resumable write, caused either by
|
||||
// multiple racing clients or by a single client where the previous
|
||||
// request was timed out on the client side but nonetheless reached the
|
||||
// server. In this case the client should take steps to prevent further
|
||||
// concurrent writes (e.g., increase the timeouts, stop using more than
|
||||
// one process to perform the upload, etc.), and then should follow the
|
||||
// steps below for resuming the upload.
|
||||
// If the call to WriteObject returns ABORTED, that indicates
|
||||
// concurrent attempts to update the resumable write, caused either by
|
||||
// multiple racing clients or by a single client where the previous
|
||||
// request was timed out on the client side but nonetheless reached the
|
||||
// server. In this case the client should take steps to prevent further
|
||||
// concurrent writes (e.g., increase the timeouts, stop using more than
|
||||
// one process to perform the upload, etc.), and then should follow the
|
||||
// steps below for resuming the upload.
|
||||
//
|
||||
// For resumable errors, the client should call QueryWriteStatus() and
|
||||
// then continue writing from the returned persisted_size. This may be
|
||||
// less than the amount of data the client previously sent. Note also that
|
||||
// it is acceptable to send data starting at an offset earlier than the
|
||||
// returned persisted_size; in this case, the service will skip data at
|
||||
// offsets that were already persisted (without checking that it matches
|
||||
// the previously written data), and write only the data starting from the
|
||||
// persisted offset. This behavior can make client-side handling simpler
|
||||
// in some cases.
|
||||
// For resumable errors, the client should call QueryWriteStatus() and
|
||||
// then continue writing from the returned persisted_size. This may be
|
||||
// less than the amount of data the client previously sent. Note also that
|
||||
// it is acceptable to send data starting at an offset earlier than the
|
||||
// returned persisted_size; in this case, the service will skip data at
|
||||
// offsets that were already persisted (without checking that it matches
|
||||
// the previously written data), and write only the data starting from the
|
||||
// persisted offset. This behavior can make client-side handling simpler
|
||||
// in some cases.
|
||||
//
|
||||
// The service will not view the object as complete until the client has
|
||||
// sent a WriteObjectRequest with finish_write set to true. Sending any
|
||||
|
@ -455,22 +467,22 @@ type gRPCClient struct {
|
|||
//
|
||||
// Resources are named as follows:
|
||||
//
|
||||
// Projects are referred to as they are defined by the Resource Manager API,
|
||||
// using strings like projects/123456 or projects/my-string-id.
|
||||
// Projects are referred to as they are defined by the Resource Manager API,
|
||||
// using strings like projects/123456 or projects/my-string-id.
|
||||
//
|
||||
// Buckets are named using string names of the form:
|
||||
// projects/{project}/buckets/{bucket}
|
||||
// For globally unique buckets, _ may be substituted for the project.
|
||||
// Buckets are named using string names of the form:
|
||||
// projects/{project}/buckets/{bucket}
|
||||
// For globally unique buckets, _ may be substituted for the project.
|
||||
//
|
||||
// Objects are uniquely identified by their name along with the name of the
|
||||
// bucket they belong to, as separate strings in this API. For example:
|
||||
// Objects are uniquely identified by their name along with the name of the
|
||||
// bucket they belong to, as separate strings in this API. For example:
|
||||
//
|
||||
// ReadObjectRequest {
|
||||
// bucket: ‘projects/_/buckets/my-bucket’
|
||||
// object: ‘my-object’
|
||||
// }
|
||||
// Note that object names can contain / characters, which are treated as
|
||||
// any other character (no special directory semantics).
|
||||
// ReadObjectRequest {
|
||||
// bucket: ‘projects/_/buckets/my-bucket’
|
||||
// object: ‘my-object’
|
||||
// }
|
||||
// Note that object names can contain / characters, which are treated as
|
||||
// any other character (no special directory semantics).
|
||||
func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) {
|
||||
clientOpts := defaultGRPCClientOptions()
|
||||
if newClientHook != nil {
|
||||
|
@ -507,7 +519,8 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error
|
|||
|
||||
// Connection returns a connection to the API service.
|
||||
//
|
||||
// Deprecated.
|
||||
// Deprecated: Connections are now pooled so this method does not always
|
||||
// return the same resource.
|
||||
func (c *gRPCClient) Connection() *grpc.ClientConn {
|
||||
return c.connPool.Conn()
|
||||
}
|
||||
|
@ -528,7 +541,18 @@ func (c *gRPCClient) Close() error {
|
|||
}
|
||||
|
||||
func (c *gRPCClient) DeleteBucket(ctx context.Context, req *storagepb.DeleteBucketRequest, opts ...gax.CallOption) error {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
routingHeaders := ""
|
||||
routingHeadersMap := make(map[string]string)
|
||||
if reg := regexp.MustCompile("(?P<bucket>.*)"); reg.MatchString(req.GetName()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1])) > 0 {
|
||||
routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1])
|
||||
}
|
||||
for headerName, headerValue := range routingHeadersMap {
|
||||
routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
|
||||
}
|
||||
routingHeaders = strings.TrimSuffix(routingHeaders, "&")
|
||||
md := metadata.Pairs("x-goog-request-params", routingHeaders)
|
||||
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||
opts = append((*c.CallOptions).DeleteBucket[0:len((*c.CallOptions).DeleteBucket):len((*c.CallOptions).DeleteBucket)], opts...)
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
|
@ -539,7 +563,18 @@ func (c *gRPCClient) DeleteBucket(ctx context.Context, req *storagepb.DeleteBuck
|
|||
}
|
||||
|
||||
func (c *gRPCClient) GetBucket(ctx context.Context, req *storagepb.GetBucketRequest, opts ...gax.CallOption) (*storagepb.Bucket, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
routingHeaders := ""
|
||||
routingHeadersMap := make(map[string]string)
|
||||
if reg := regexp.MustCompile("(?P<bucket>.*)"); reg.MatchString(req.GetName()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1])) > 0 {
|
||||
routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1])
|
||||
}
|
||||
for headerName, headerValue := range routingHeadersMap {
|
||||
routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
|
||||
}
|
||||
routingHeaders = strings.TrimSuffix(routingHeaders, "&")
|
||||
md := metadata.Pairs("x-goog-request-params", routingHeaders)
|
||||
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||
opts = append((*c.CallOptions).GetBucket[0:len((*c.CallOptions).GetBucket):len((*c.CallOptions).GetBucket)], opts...)
|
||||
var resp *storagepb.Bucket
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
@ -554,7 +589,18 @@ func (c *gRPCClient) GetBucket(ctx context.Context, req *storagepb.GetBucketRequ
|
|||
}
|
||||
|
||||
func (c *gRPCClient) CreateBucket(ctx context.Context, req *storagepb.CreateBucketRequest, opts ...gax.CallOption) (*storagepb.Bucket, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
routingHeaders := ""
|
||||
routingHeadersMap := make(map[string]string)
|
||||
if reg := regexp.MustCompile("(?P<project>.*)"); reg.MatchString(req.GetParent()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1])) > 0 {
|
||||
routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1])
|
||||
}
|
||||
for headerName, headerValue := range routingHeadersMap {
|
||||
routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
|
||||
}
|
||||
routingHeaders = strings.TrimSuffix(routingHeaders, "&")
|
||||
md := metadata.Pairs("x-goog-request-params", routingHeaders)
|
||||
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||
opts = append((*c.CallOptions).CreateBucket[0:len((*c.CallOptions).CreateBucket):len((*c.CallOptions).CreateBucket)], opts...)
|
||||
var resp *storagepb.Bucket
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
@ -569,7 +615,18 @@ func (c *gRPCClient) CreateBucket(ctx context.Context, req *storagepb.CreateBuck
|
|||
}
|
||||
|
||||
func (c *gRPCClient) ListBuckets(ctx context.Context, req *storagepb.ListBucketsRequest, opts ...gax.CallOption) *BucketIterator {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
routingHeaders := ""
|
||||
routingHeadersMap := make(map[string]string)
|
||||
if reg := regexp.MustCompile("(?P<project>.*)"); reg.MatchString(req.GetParent()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1])) > 0 {
|
||||
routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1])
|
||||
}
|
||||
for headerName, headerValue := range routingHeadersMap {
|
||||
routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
|
||||
}
|
||||
routingHeaders = strings.TrimSuffix(routingHeaders, "&")
|
||||
md := metadata.Pairs("x-goog-request-params", routingHeaders)
|
||||
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||
opts = append((*c.CallOptions).ListBuckets[0:len((*c.CallOptions).ListBuckets):len((*c.CallOptions).ListBuckets)], opts...)
|
||||
it := &BucketIterator{}
|
||||
req = proto.Clone(req).(*storagepb.ListBucketsRequest)
|
||||
|
@ -612,7 +669,18 @@ func (c *gRPCClient) ListBuckets(ctx context.Context, req *storagepb.ListBuckets
|
|||
}
|
||||
|
||||
func (c *gRPCClient) LockBucketRetentionPolicy(ctx context.Context, req *storagepb.LockBucketRetentionPolicyRequest, opts ...gax.CallOption) (*storagepb.Bucket, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
routingHeaders := ""
|
||||
routingHeadersMap := make(map[string]string)
|
||||
if reg := regexp.MustCompile("(?P<bucket>.*)"); reg.MatchString(req.GetBucket()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetBucket())[1])) > 0 {
|
||||
routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetBucket())[1])
|
||||
}
|
||||
for headerName, headerValue := range routingHeadersMap {
|
||||
routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
|
||||
}
|
||||
routingHeaders = strings.TrimSuffix(routingHeaders, "&")
|
||||
md := metadata.Pairs("x-goog-request-params", routingHeaders)
|
||||
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||
opts = append((*c.CallOptions).LockBucketRetentionPolicy[0:len((*c.CallOptions).LockBucketRetentionPolicy):len((*c.CallOptions).LockBucketRetentionPolicy)], opts...)
|
||||
var resp *storagepb.Bucket
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
@ -627,7 +695,21 @@ func (c *gRPCClient) LockBucketRetentionPolicy(ctx context.Context, req *storage
|
|||
}
|
||||
|
||||
func (c *gRPCClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
routingHeaders := ""
|
||||
routingHeadersMap := make(map[string]string)
|
||||
if reg := regexp.MustCompile("(?P<bucket>.*)"); reg.MatchString(req.GetResource()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])) > 0 {
|
||||
routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])
|
||||
}
|
||||
if reg := regexp.MustCompile("(?P<bucket>projects/[^/]+/buckets/[^/]+)/objects(?:/.*)?"); reg.MatchString(req.GetResource()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])) > 0 {
|
||||
routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])
|
||||
}
|
||||
for headerName, headerValue := range routingHeadersMap {
|
||||
routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
|
||||
}
|
||||
routingHeaders = strings.TrimSuffix(routingHeaders, "&")
|
||||
md := metadata.Pairs("x-goog-request-params", routingHeaders)
|
||||
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||
opts = append((*c.CallOptions).GetIamPolicy[0:len((*c.CallOptions).GetIamPolicy):len((*c.CallOptions).GetIamPolicy)], opts...)
|
||||
var resp *iampb.Policy
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
@ -642,7 +724,21 @@ func (c *gRPCClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRe
|
|||
}
|
||||
|
||||
func (c *gRPCClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
routingHeaders := ""
|
||||
routingHeadersMap := make(map[string]string)
|
||||
if reg := regexp.MustCompile("(?P<bucket>.*)"); reg.MatchString(req.GetResource()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])) > 0 {
|
||||
routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])
|
||||
}
|
||||
if reg := regexp.MustCompile("(?P<bucket>projects/[^/]+/buckets/[^/]+)/objects(?:/.*)?"); reg.MatchString(req.GetResource()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])) > 0 {
|
||||
routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])
|
||||
}
|
||||
for headerName, headerValue := range routingHeadersMap {
|
||||
routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
|
||||
}
|
||||
routingHeaders = strings.TrimSuffix(routingHeaders, "&")
|
||||
md := metadata.Pairs("x-goog-request-params", routingHeaders)
|
||||
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||
opts = append((*c.CallOptions).SetIamPolicy[0:len((*c.CallOptions).SetIamPolicy):len((*c.CallOptions).SetIamPolicy)], opts...)
|
||||
var resp *iampb.Policy
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
@ -657,7 +753,21 @@ func (c *gRPCClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRe
|
|||
}
|
||||
|
||||
func (c *gRPCClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
routingHeaders := ""
|
||||
routingHeadersMap := make(map[string]string)
|
||||
if reg := regexp.MustCompile("(?P<bucket>.*)"); reg.MatchString(req.GetResource()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])) > 0 {
|
||||
routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])
|
||||
}
|
||||
if reg := regexp.MustCompile("(?P<bucket>projects/[^/]+/buckets/[^/]+)/objects(?:/.*)?"); reg.MatchString(req.GetResource()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])) > 0 {
|
||||
routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])
|
||||
}
|
||||
for headerName, headerValue := range routingHeadersMap {
|
||||
routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
|
||||
}
|
||||
routingHeaders = strings.TrimSuffix(routingHeaders, "&")
|
||||
md := metadata.Pairs("x-goog-request-params", routingHeaders)
|
||||
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||
opts = append((*c.CallOptions).TestIamPermissions[0:len((*c.CallOptions).TestIamPermissions):len((*c.CallOptions).TestIamPermissions)], opts...)
|
||||
var resp *iampb.TestIamPermissionsResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
@ -672,7 +782,18 @@ func (c *gRPCClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamP
|
|||
}
|
||||
|
||||
func (c *gRPCClient) UpdateBucket(ctx context.Context, req *storagepb.UpdateBucketRequest, opts ...gax.CallOption) (*storagepb.Bucket, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
routingHeaders := ""
|
||||
routingHeadersMap := make(map[string]string)
|
||||
if reg := regexp.MustCompile("(?P<bucket>.*)"); reg.MatchString(req.GetBucket().GetName()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetBucket().GetName())[1])) > 0 {
|
||||
routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetBucket().GetName())[1])
|
||||
}
|
||||
for headerName, headerValue := range routingHeadersMap {
|
||||
routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
|
||||
}
|
||||
routingHeaders = strings.TrimSuffix(routingHeaders, "&")
|
||||
md := metadata.Pairs("x-goog-request-params", routingHeaders)
|
||||
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||
opts = append((*c.CallOptions).UpdateBucket[0:len((*c.CallOptions).UpdateBucket):len((*c.CallOptions).UpdateBucket)], opts...)
|
||||
var resp *storagepb.Bucket
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
@ -687,7 +808,18 @@ func (c *gRPCClient) UpdateBucket(ctx context.Context, req *storagepb.UpdateBuck
|
|||
}
|
||||
|
||||
func (c *gRPCClient) DeleteNotification(ctx context.Context, req *storagepb.DeleteNotificationRequest, opts ...gax.CallOption) error {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
routingHeaders := ""
|
||||
routingHeadersMap := make(map[string]string)
|
||||
if reg := regexp.MustCompile("(?P<bucket>projects/[^/]+/buckets/[^/]+)(?:/.*)?"); reg.MatchString(req.GetName()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1])) > 0 {
|
||||
routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1])
|
||||
}
|
||||
for headerName, headerValue := range routingHeadersMap {
|
||||
routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
|
||||
}
|
||||
routingHeaders = strings.TrimSuffix(routingHeaders, "&")
|
||||
md := metadata.Pairs("x-goog-request-params", routingHeaders)
|
||||
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||
opts = append((*c.CallOptions).DeleteNotification[0:len((*c.CallOptions).DeleteNotification):len((*c.CallOptions).DeleteNotification)], opts...)
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
|
@ -698,7 +830,18 @@ func (c *gRPCClient) DeleteNotification(ctx context.Context, req *storagepb.Dele
|
|||
}
|
||||
|
||||
func (c *gRPCClient) GetNotification(ctx context.Context, req *storagepb.GetNotificationRequest, opts ...gax.CallOption) (*storagepb.Notification, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
routingHeaders := ""
|
||||
routingHeadersMap := make(map[string]string)
|
||||
if reg := regexp.MustCompile("(?P<bucket>projects/[^/]+/buckets/[^/]+)(?:/.*)?"); reg.MatchString(req.GetName()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1])) > 0 {
|
||||
routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1])
|
||||
}
|
||||
for headerName, headerValue := range routingHeadersMap {
|
||||
routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
|
||||
}
|
||||
routingHeaders = strings.TrimSuffix(routingHeaders, "&")
|
||||
md := metadata.Pairs("x-goog-request-params", routingHeaders)
|
||||
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||
opts = append((*c.CallOptions).GetNotification[0:len((*c.CallOptions).GetNotification):len((*c.CallOptions).GetNotification)], opts...)
|
||||
var resp *storagepb.Notification
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
@ -713,7 +856,18 @@ func (c *gRPCClient) GetNotification(ctx context.Context, req *storagepb.GetNoti
|
|||
}
|
||||
|
||||
func (c *gRPCClient) CreateNotification(ctx context.Context, req *storagepb.CreateNotificationRequest, opts ...gax.CallOption) (*storagepb.Notification, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
routingHeaders := ""
|
||||
routingHeadersMap := make(map[string]string)
|
||||
if reg := regexp.MustCompile("(?P<bucket>.*)"); reg.MatchString(req.GetParent()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1])) > 0 {
|
||||
routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1])
|
||||
}
|
||||
for headerName, headerValue := range routingHeadersMap {
|
||||
routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
|
||||
}
|
||||
routingHeaders = strings.TrimSuffix(routingHeaders, "&")
|
||||
md := metadata.Pairs("x-goog-request-params", routingHeaders)
|
||||
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||
opts = append((*c.CallOptions).CreateNotification[0:len((*c.CallOptions).CreateNotification):len((*c.CallOptions).CreateNotification)], opts...)
|
||||
var resp *storagepb.Notification
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
@ -728,7 +882,18 @@ func (c *gRPCClient) CreateNotification(ctx context.Context, req *storagepb.Crea
|
|||
}
|
||||
|
||||
func (c *gRPCClient) ListNotifications(ctx context.Context, req *storagepb.ListNotificationsRequest, opts ...gax.CallOption) *NotificationIterator {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
routingHeaders := ""
|
||||
routingHeadersMap := make(map[string]string)
|
||||
if reg := regexp.MustCompile("(?P<bucket>.*)"); reg.MatchString(req.GetParent()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1])) > 0 {
|
||||
routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1])
|
||||
}
|
||||
for headerName, headerValue := range routingHeadersMap {
|
||||
routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
|
||||
}
|
||||
routingHeaders = strings.TrimSuffix(routingHeaders, "&")
|
||||
md := metadata.Pairs("x-goog-request-params", routingHeaders)
|
||||
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||
opts = append((*c.CallOptions).ListNotifications[0:len((*c.CallOptions).ListNotifications):len((*c.CallOptions).ListNotifications)], opts...)
|
||||
it := &NotificationIterator{}
|
||||
req = proto.Clone(req).(*storagepb.ListNotificationsRequest)
|
||||
|
@ -771,7 +936,18 @@ func (c *gRPCClient) ListNotifications(ctx context.Context, req *storagepb.ListN
|
|||
}
|
||||
|
||||
func (c *gRPCClient) ComposeObject(ctx context.Context, req *storagepb.ComposeObjectRequest, opts ...gax.CallOption) (*storagepb.Object, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
routingHeaders := ""
|
||||
routingHeadersMap := make(map[string]string)
|
||||
if reg := regexp.MustCompile("(?P<bucket>.*)"); reg.MatchString(req.GetDestination().GetBucket()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetDestination().GetBucket())[1])) > 0 {
|
||||
routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetDestination().GetBucket())[1])
|
||||
}
|
||||
for headerName, headerValue := range routingHeadersMap {
|
||||
routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
|
||||
}
|
||||
routingHeaders = strings.TrimSuffix(routingHeaders, "&")
|
||||
md := metadata.Pairs("x-goog-request-params", routingHeaders)
|
||||
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||
opts = append((*c.CallOptions).ComposeObject[0:len((*c.CallOptions).ComposeObject):len((*c.CallOptions).ComposeObject)], opts...)
|
||||
var resp *storagepb.Object
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
@ -786,7 +962,18 @@ func (c *gRPCClient) ComposeObject(ctx context.Context, req *storagepb.ComposeOb
|
|||
}
|
||||
|
||||
func (c *gRPCClient) DeleteObject(ctx context.Context, req *storagepb.DeleteObjectRequest, opts ...gax.CallOption) error {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
routingHeaders := ""
|
||||
routingHeadersMap := make(map[string]string)
|
||||
if reg := regexp.MustCompile("(?P<bucket>.*)"); reg.MatchString(req.GetBucket()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetBucket())[1])) > 0 {
|
||||
routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetBucket())[1])
|
||||
}
|
||||
for headerName, headerValue := range routingHeadersMap {
|
||||
routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
|
||||
}
|
||||
routingHeaders = strings.TrimSuffix(routingHeaders, "&")
|
||||
md := metadata.Pairs("x-goog-request-params", routingHeaders)
|
||||
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||
opts = append((*c.CallOptions).DeleteObject[0:len((*c.CallOptions).DeleteObject):len((*c.CallOptions).DeleteObject)], opts...)
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
|
@ -796,8 +983,45 @@ func (c *gRPCClient) DeleteObject(ctx context.Context, req *storagepb.DeleteObje
|
|||
return err
|
||||
}
|
||||
|
||||
func (c *gRPCClient) CancelResumableWrite(ctx context.Context, req *storagepb.CancelResumableWriteRequest, opts ...gax.CallOption) (*storagepb.CancelResumableWriteResponse, error) {
|
||||
routingHeaders := ""
|
||||
routingHeadersMap := make(map[string]string)
|
||||
if reg := regexp.MustCompile("(?P<bucket>projects/[^/]+/buckets/[^/]+)(?:/.*)?"); reg.MatchString(req.GetUploadId()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetUploadId())[1])) > 0 {
|
||||
routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetUploadId())[1])
|
||||
}
|
||||
for headerName, headerValue := range routingHeadersMap {
|
||||
routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
|
||||
}
|
||||
routingHeaders = strings.TrimSuffix(routingHeaders, "&")
|
||||
md := metadata.Pairs("x-goog-request-params", routingHeaders)
|
||||
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||
opts = append((*c.CallOptions).CancelResumableWrite[0:len((*c.CallOptions).CancelResumableWrite):len((*c.CallOptions).CancelResumableWrite)], opts...)
|
||||
var resp *storagepb.CancelResumableWriteResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.client.CancelResumableWrite(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (c *gRPCClient) GetObject(ctx context.Context, req *storagepb.GetObjectRequest, opts ...gax.CallOption) (*storagepb.Object, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
routingHeaders := ""
|
||||
routingHeadersMap := make(map[string]string)
|
||||
if reg := regexp.MustCompile("(?P<bucket>.*)"); reg.MatchString(req.GetBucket()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetBucket())[1])) > 0 {
|
||||
routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetBucket())[1])
|
||||
}
|
||||
for headerName, headerValue := range routingHeadersMap {
|
||||
routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
|
||||
}
|
||||
routingHeaders = strings.TrimSuffix(routingHeaders, "&")
|
||||
md := metadata.Pairs("x-goog-request-params", routingHeaders)
|
||||
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||
opts = append((*c.CallOptions).GetObject[0:len((*c.CallOptions).GetObject):len((*c.CallOptions).GetObject)], opts...)
|
||||
var resp *storagepb.Object
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
@ -812,7 +1036,18 @@ func (c *gRPCClient) GetObject(ctx context.Context, req *storagepb.GetObjectRequ
|
|||
}
|
||||
|
||||
func (c *gRPCClient) ReadObject(ctx context.Context, req *storagepb.ReadObjectRequest, opts ...gax.CallOption) (storagepb.Storage_ReadObjectClient, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
routingHeaders := ""
|
||||
routingHeadersMap := make(map[string]string)
|
||||
if reg := regexp.MustCompile("(?P<bucket>.*)"); reg.MatchString(req.GetBucket()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetBucket())[1])) > 0 {
|
||||
routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetBucket())[1])
|
||||
}
|
||||
for headerName, headerValue := range routingHeadersMap {
|
||||
routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
|
||||
}
|
||||
routingHeaders = strings.TrimSuffix(routingHeaders, "&")
|
||||
md := metadata.Pairs("x-goog-request-params", routingHeaders)
|
||||
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||
var resp storagepb.Storage_ReadObjectClient
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
|
@ -826,7 +1061,18 @@ func (c *gRPCClient) ReadObject(ctx context.Context, req *storagepb.ReadObjectRe
|
|||
}
|
||||
|
||||
func (c *gRPCClient) UpdateObject(ctx context.Context, req *storagepb.UpdateObjectRequest, opts ...gax.CallOption) (*storagepb.Object, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
routingHeaders := ""
|
||||
routingHeadersMap := make(map[string]string)
|
||||
if reg := regexp.MustCompile("(?P<bucket>.*)"); reg.MatchString(req.GetObject().GetBucket()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetObject().GetBucket())[1])) > 0 {
|
||||
routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetObject().GetBucket())[1])
|
||||
}
|
||||
for headerName, headerValue := range routingHeadersMap {
|
||||
routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
|
||||
}
|
||||
routingHeaders = strings.TrimSuffix(routingHeaders, "&")
|
||||
md := metadata.Pairs("x-goog-request-params", routingHeaders)
|
||||
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||
opts = append((*c.CallOptions).UpdateObject[0:len((*c.CallOptions).UpdateObject):len((*c.CallOptions).UpdateObject)], opts...)
|
||||
var resp *storagepb.Object
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
@ -856,7 +1102,18 @@ func (c *gRPCClient) WriteObject(ctx context.Context, opts ...gax.CallOption) (s
|
|||
}
|
||||
|
||||
func (c *gRPCClient) ListObjects(ctx context.Context, req *storagepb.ListObjectsRequest, opts ...gax.CallOption) *ObjectIterator {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
routingHeaders := ""
|
||||
routingHeadersMap := make(map[string]string)
|
||||
if reg := regexp.MustCompile("(?P<bucket>.*)"); reg.MatchString(req.GetParent()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1])) > 0 {
|
||||
routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1])
|
||||
}
|
||||
for headerName, headerValue := range routingHeadersMap {
|
||||
routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
|
||||
}
|
||||
routingHeaders = strings.TrimSuffix(routingHeaders, "&")
|
||||
md := metadata.Pairs("x-goog-request-params", routingHeaders)
|
||||
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||
opts = append((*c.CallOptions).ListObjects[0:len((*c.CallOptions).ListObjects):len((*c.CallOptions).ListObjects)], opts...)
|
||||
it := &ObjectIterator{}
|
||||
req = proto.Clone(req).(*storagepb.ListObjectsRequest)
|
||||
|
@ -899,7 +1156,21 @@ func (c *gRPCClient) ListObjects(ctx context.Context, req *storagepb.ListObjects
|
|||
}
|
||||
|
||||
func (c *gRPCClient) RewriteObject(ctx context.Context, req *storagepb.RewriteObjectRequest, opts ...gax.CallOption) (*storagepb.RewriteResponse, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
routingHeaders := ""
|
||||
routingHeadersMap := make(map[string]string)
|
||||
if reg := regexp.MustCompile("(.*)"); reg.MatchString(req.GetSourceBucket()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetSourceBucket())[1])) > 0 {
|
||||
routingHeadersMap["source_bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetSourceBucket())[1])
|
||||
}
|
||||
if reg := regexp.MustCompile("(?P<bucket>.*)"); reg.MatchString(req.GetDestinationBucket()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetDestinationBucket())[1])) > 0 {
|
||||
routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetDestinationBucket())[1])
|
||||
}
|
||||
for headerName, headerValue := range routingHeadersMap {
|
||||
routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
|
||||
}
|
||||
routingHeaders = strings.TrimSuffix(routingHeaders, "&")
|
||||
md := metadata.Pairs("x-goog-request-params", routingHeaders)
|
||||
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||
opts = append((*c.CallOptions).RewriteObject[0:len((*c.CallOptions).RewriteObject):len((*c.CallOptions).RewriteObject)], opts...)
|
||||
var resp *storagepb.RewriteResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
@ -914,7 +1185,18 @@ func (c *gRPCClient) RewriteObject(ctx context.Context, req *storagepb.RewriteOb
|
|||
}
|
||||
|
||||
func (c *gRPCClient) StartResumableWrite(ctx context.Context, req *storagepb.StartResumableWriteRequest, opts ...gax.CallOption) (*storagepb.StartResumableWriteResponse, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
routingHeaders := ""
|
||||
routingHeadersMap := make(map[string]string)
|
||||
if reg := regexp.MustCompile("(?P<bucket>.*)"); reg.MatchString(req.GetWriteObjectSpec().GetResource().GetBucket()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetWriteObjectSpec().GetResource().GetBucket())[1])) > 0 {
|
||||
routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetWriteObjectSpec().GetResource().GetBucket())[1])
|
||||
}
|
||||
for headerName, headerValue := range routingHeadersMap {
|
||||
routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
|
||||
}
|
||||
routingHeaders = strings.TrimSuffix(routingHeaders, "&")
|
||||
md := metadata.Pairs("x-goog-request-params", routingHeaders)
|
||||
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||
opts = append((*c.CallOptions).StartResumableWrite[0:len((*c.CallOptions).StartResumableWrite):len((*c.CallOptions).StartResumableWrite)], opts...)
|
||||
var resp *storagepb.StartResumableWriteResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
@ -929,7 +1211,18 @@ func (c *gRPCClient) StartResumableWrite(ctx context.Context, req *storagepb.Sta
|
|||
}
|
||||
|
||||
func (c *gRPCClient) QueryWriteStatus(ctx context.Context, req *storagepb.QueryWriteStatusRequest, opts ...gax.CallOption) (*storagepb.QueryWriteStatusResponse, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
routingHeaders := ""
|
||||
routingHeadersMap := make(map[string]string)
|
||||
if reg := regexp.MustCompile("(?P<bucket>projects/[^/]+/buckets/[^/]+)(?:/.*)?"); reg.MatchString(req.GetUploadId()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetUploadId())[1])) > 0 {
|
||||
routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetUploadId())[1])
|
||||
}
|
||||
for headerName, headerValue := range routingHeadersMap {
|
||||
routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
|
||||
}
|
||||
routingHeaders = strings.TrimSuffix(routingHeaders, "&")
|
||||
md := metadata.Pairs("x-goog-request-params", routingHeaders)
|
||||
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||
opts = append((*c.CallOptions).QueryWriteStatus[0:len((*c.CallOptions).QueryWriteStatus):len((*c.CallOptions).QueryWriteStatus)], opts...)
|
||||
var resp *storagepb.QueryWriteStatusResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
@ -944,7 +1237,18 @@ func (c *gRPCClient) QueryWriteStatus(ctx context.Context, req *storagepb.QueryW
|
|||
}
|
||||
|
||||
func (c *gRPCClient) GetServiceAccount(ctx context.Context, req *storagepb.GetServiceAccountRequest, opts ...gax.CallOption) (*storagepb.ServiceAccount, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
routingHeaders := ""
|
||||
routingHeadersMap := make(map[string]string)
|
||||
if reg := regexp.MustCompile("(.*)"); reg.MatchString(req.GetProject()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1])) > 0 {
|
||||
routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1])
|
||||
}
|
||||
for headerName, headerValue := range routingHeadersMap {
|
||||
routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
|
||||
}
|
||||
routingHeaders = strings.TrimSuffix(routingHeaders, "&")
|
||||
md := metadata.Pairs("x-goog-request-params", routingHeaders)
|
||||
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||
opts = append((*c.CallOptions).GetServiceAccount[0:len((*c.CallOptions).GetServiceAccount):len((*c.CallOptions).GetServiceAccount)], opts...)
|
||||
var resp *storagepb.ServiceAccount
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
@ -959,7 +1263,18 @@ func (c *gRPCClient) GetServiceAccount(ctx context.Context, req *storagepb.GetSe
|
|||
}
|
||||
|
||||
func (c *gRPCClient) CreateHmacKey(ctx context.Context, req *storagepb.CreateHmacKeyRequest, opts ...gax.CallOption) (*storagepb.CreateHmacKeyResponse, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
routingHeaders := ""
|
||||
routingHeadersMap := make(map[string]string)
|
||||
if reg := regexp.MustCompile("(.*)"); reg.MatchString(req.GetProject()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1])) > 0 {
|
||||
routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1])
|
||||
}
|
||||
for headerName, headerValue := range routingHeadersMap {
|
||||
routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
|
||||
}
|
||||
routingHeaders = strings.TrimSuffix(routingHeaders, "&")
|
||||
md := metadata.Pairs("x-goog-request-params", routingHeaders)
|
||||
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||
opts = append((*c.CallOptions).CreateHmacKey[0:len((*c.CallOptions).CreateHmacKey):len((*c.CallOptions).CreateHmacKey)], opts...)
|
||||
var resp *storagepb.CreateHmacKeyResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
@ -974,7 +1289,18 @@ func (c *gRPCClient) CreateHmacKey(ctx context.Context, req *storagepb.CreateHma
|
|||
}
|
||||
|
||||
func (c *gRPCClient) DeleteHmacKey(ctx context.Context, req *storagepb.DeleteHmacKeyRequest, opts ...gax.CallOption) error {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
routingHeaders := ""
|
||||
routingHeadersMap := make(map[string]string)
|
||||
if reg := regexp.MustCompile("(.*)"); reg.MatchString(req.GetProject()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1])) > 0 {
|
||||
routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1])
|
||||
}
|
||||
for headerName, headerValue := range routingHeadersMap {
|
||||
routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
|
||||
}
|
||||
routingHeaders = strings.TrimSuffix(routingHeaders, "&")
|
||||
md := metadata.Pairs("x-goog-request-params", routingHeaders)
|
||||
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||
opts = append((*c.CallOptions).DeleteHmacKey[0:len((*c.CallOptions).DeleteHmacKey):len((*c.CallOptions).DeleteHmacKey)], opts...)
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
|
@ -985,7 +1311,18 @@ func (c *gRPCClient) DeleteHmacKey(ctx context.Context, req *storagepb.DeleteHma
|
|||
}
|
||||
|
||||
func (c *gRPCClient) GetHmacKey(ctx context.Context, req *storagepb.GetHmacKeyRequest, opts ...gax.CallOption) (*storagepb.HmacKeyMetadata, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
routingHeaders := ""
|
||||
routingHeadersMap := make(map[string]string)
|
||||
if reg := regexp.MustCompile("(.*)"); reg.MatchString(req.GetProject()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1])) > 0 {
|
||||
routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1])
|
||||
}
|
||||
for headerName, headerValue := range routingHeadersMap {
|
||||
routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
|
||||
}
|
||||
routingHeaders = strings.TrimSuffix(routingHeaders, "&")
|
||||
md := metadata.Pairs("x-goog-request-params", routingHeaders)
|
||||
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||
opts = append((*c.CallOptions).GetHmacKey[0:len((*c.CallOptions).GetHmacKey):len((*c.CallOptions).GetHmacKey)], opts...)
|
||||
var resp *storagepb.HmacKeyMetadata
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
@ -1000,7 +1337,18 @@ func (c *gRPCClient) GetHmacKey(ctx context.Context, req *storagepb.GetHmacKeyRe
|
|||
}
|
||||
|
||||
func (c *gRPCClient) ListHmacKeys(ctx context.Context, req *storagepb.ListHmacKeysRequest, opts ...gax.CallOption) *HmacKeyMetadataIterator {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
routingHeaders := ""
|
||||
routingHeadersMap := make(map[string]string)
|
||||
if reg := regexp.MustCompile("(.*)"); reg.MatchString(req.GetProject()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1])) > 0 {
|
||||
routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1])
|
||||
}
|
||||
for headerName, headerValue := range routingHeadersMap {
|
||||
routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
|
||||
}
|
||||
routingHeaders = strings.TrimSuffix(routingHeaders, "&")
|
||||
md := metadata.Pairs("x-goog-request-params", routingHeaders)
|
||||
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||
opts = append((*c.CallOptions).ListHmacKeys[0:len((*c.CallOptions).ListHmacKeys):len((*c.CallOptions).ListHmacKeys)], opts...)
|
||||
it := &HmacKeyMetadataIterator{}
|
||||
req = proto.Clone(req).(*storagepb.ListHmacKeysRequest)
|
||||
|
@ -1043,7 +1391,18 @@ func (c *gRPCClient) ListHmacKeys(ctx context.Context, req *storagepb.ListHmacKe
|
|||
}
|
||||
|
||||
func (c *gRPCClient) UpdateHmacKey(ctx context.Context, req *storagepb.UpdateHmacKeyRequest, opts ...gax.CallOption) (*storagepb.HmacKeyMetadata, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
routingHeaders := ""
|
||||
routingHeadersMap := make(map[string]string)
|
||||
if reg := regexp.MustCompile("(?P<project>.*)"); reg.MatchString(req.GetHmacKey().GetProject()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetHmacKey().GetProject())[1])) > 0 {
|
||||
routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetHmacKey().GetProject())[1])
|
||||
}
|
||||
for headerName, headerValue := range routingHeadersMap {
|
||||
routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
|
||||
}
|
||||
routingHeaders = strings.TrimSuffix(routingHeaders, "&")
|
||||
md := metadata.Pairs("x-goog-request-params", routingHeaders)
|
||||
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||
opts = append((*c.CallOptions).UpdateHmacKey[0:len((*c.CallOptions).UpdateHmacKey):len((*c.CallOptions).UpdateHmacKey)], opts...)
|
||||
var resp *storagepb.HmacKeyMetadata
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
|
File diff suppressed because it is too large
Load diff
2
vendor/cloud.google.com/go/storage/internal/version.go
generated
vendored
2
vendor/cloud.google.com/go/storage/internal/version.go
generated
vendored
|
@ -15,4 +15,4 @@
|
|||
package internal
|
||||
|
||||
// Version is the current tagged release of the library.
|
||||
const Version = "1.23.0"
|
||||
const Version = "1.28.0"
|
||||
|
|
15
vendor/cloud.google.com/go/storage/invoke.go
generated
vendored
15
vendor/cloud.google.com/go/storage/invoke.go
generated
vendored
|
@ -57,7 +57,7 @@ func run(ctx context.Context, call func() error, retry *retryConfig, isIdempoten
|
|||
bo.Initial = retry.backoff.Initial
|
||||
bo.Max = retry.backoff.Max
|
||||
}
|
||||
var errorFunc func(err error) bool = shouldRetry
|
||||
var errorFunc func(err error) bool = ShouldRetry
|
||||
if retry.shouldRetry != nil {
|
||||
errorFunc = retry.shouldRetry
|
||||
}
|
||||
|
@ -89,7 +89,16 @@ func setRetryHeaderGRPC(_ context.Context) func(string, int) {
|
|||
}
|
||||
}
|
||||
|
||||
func shouldRetry(err error) bool {
|
||||
// ShouldRetry returns true if an error is retryable, based on best practice
|
||||
// guidance from GCS. See
|
||||
// https://cloud.google.com/storage/docs/retry-strategy#go for more information
|
||||
// on what errors are considered retryable.
|
||||
//
|
||||
// If you would like to customize retryable errors, use the WithErrorFunc to
|
||||
// supply a RetryOption to your library calls. For example, to retry additional
|
||||
// errors, you can write a custom func that wraps ShouldRetry and also specifies
|
||||
// additional errors that should return true.
|
||||
func ShouldRetry(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
|
@ -131,7 +140,7 @@ func shouldRetry(err error) bool {
|
|||
}
|
||||
// Unwrap is only supported in go1.13.x+
|
||||
if e, ok := err.(interface{ Unwrap() error }); ok {
|
||||
return shouldRetry(e.Unwrap())
|
||||
return ShouldRetry(e.Unwrap())
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
|
46
vendor/cloud.google.com/go/storage/notifications.go
generated
vendored
46
vendor/cloud.google.com/go/storage/notifications.go
generated
vendored
|
@ -21,8 +21,8 @@ import (
|
|||
"regexp"
|
||||
|
||||
"cloud.google.com/go/internal/trace"
|
||||
storagepb "cloud.google.com/go/storage/internal/apiv2/stubs"
|
||||
raw "google.golang.org/api/storage/v1"
|
||||
storagepb "google.golang.org/genproto/googleapis/storage/v2"
|
||||
)
|
||||
|
||||
// A Notification describes how to send Cloud PubSub messages when certain
|
||||
|
@ -157,21 +157,10 @@ func (b *BucketHandle) AddNotification(ctx context.Context, n *Notification) (re
|
|||
if n.TopicID == "" {
|
||||
return nil, errors.New("storage: AddNotification: missing TopicID")
|
||||
}
|
||||
call := b.c.raw.Notifications.Insert(b.name, toRawNotification(n))
|
||||
setClientHeader(call.Header())
|
||||
if b.userProject != "" {
|
||||
call.UserProject(b.userProject)
|
||||
}
|
||||
|
||||
var rn *raw.Notification
|
||||
err = run(ctx, func() error {
|
||||
rn, err = call.Context(ctx).Do()
|
||||
return err
|
||||
}, b.retry, false, setRetryHeaderHTTP(call))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return toNotification(rn), nil
|
||||
opts := makeStorageOpts(false, b.retry, b.userProject)
|
||||
ret, err = b.c.tc.CreateNotification(ctx, b.name, n, opts...)
|
||||
return ret, err
|
||||
}
|
||||
|
||||
// Notifications returns all the Notifications configured for this bucket, as a map
|
||||
|
@ -180,20 +169,9 @@ func (b *BucketHandle) Notifications(ctx context.Context) (n map[string]*Notific
|
|||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Notifications")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
|
||||
call := b.c.raw.Notifications.List(b.name)
|
||||
setClientHeader(call.Header())
|
||||
if b.userProject != "" {
|
||||
call.UserProject(b.userProject)
|
||||
}
|
||||
var res *raw.Notifications
|
||||
err = run(ctx, func() error {
|
||||
res, err = call.Context(ctx).Do()
|
||||
return err
|
||||
}, b.retry, true, setRetryHeaderHTTP(call))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return notificationsToMap(res.Items), nil
|
||||
opts := makeStorageOpts(true, b.retry, b.userProject)
|
||||
n, err = b.c.tc.ListNotifications(ctx, b.name, opts...)
|
||||
return n, err
|
||||
}
|
||||
|
||||
func notificationsToMap(rns []*raw.Notification) map[string]*Notification {
|
||||
|
@ -217,12 +195,6 @@ func (b *BucketHandle) DeleteNotification(ctx context.Context, id string) (err e
|
|||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.DeleteNotification")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
|
||||
call := b.c.raw.Notifications.Delete(b.name, id)
|
||||
setClientHeader(call.Header())
|
||||
if b.userProject != "" {
|
||||
call.UserProject(b.userProject)
|
||||
}
|
||||
return run(ctx, func() error {
|
||||
return call.Context(ctx).Do()
|
||||
}, b.retry, true, setRetryHeaderHTTP(call))
|
||||
opts := makeStorageOpts(true, b.retry, b.userProject)
|
||||
return b.c.tc.DeleteNotification(ctx, b.name, id, opts...)
|
||||
}
|
||||
|
|
2
vendor/cloud.google.com/go/storage/post_policy_v4.go
generated
vendored
2
vendor/cloud.google.com/go/storage/post_policy_v4.go
generated
vendored
|
@ -340,7 +340,7 @@ func GenerateSignedPostPolicyV4(bucket, object string, opts *PostPolicyV4Options
|
|||
"expiration": opts.Expires.Format(time.RFC3339),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("storage: PostPolicyV4 JSON serialization failed: %v", err)
|
||||
return nil, fmt.Errorf("storage: PostPolicyV4 JSON serialization failed: %w", err)
|
||||
}
|
||||
|
||||
b64Policy := base64.StdEncoding.EncodeToString(condsAsJSON)
|
||||
|
|
287
vendor/cloud.google.com/go/storage/reader.go
generated
vendored
287
vendor/cloud.google.com/go/storage/reader.go
generated
vendored
|
@ -16,19 +16,15 @@ package storage
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash/crc32"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/internal/trace"
|
||||
"google.golang.org/api/googleapi"
|
||||
)
|
||||
|
||||
var crc32cTable = crc32.MakeTable(crc32.Castagnoli)
|
||||
|
@ -94,10 +90,6 @@ func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64)
|
|||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Object.NewRangeReader")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
|
||||
if o.c.tc != nil {
|
||||
return o.newRangeReaderWithGRPC(ctx, offset, length)
|
||||
}
|
||||
|
||||
if err := o.validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -109,208 +101,31 @@ func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64)
|
|||
return nil, err
|
||||
}
|
||||
}
|
||||
u := &url.URL{
|
||||
Scheme: o.c.scheme,
|
||||
Host: o.c.readHost,
|
||||
Path: fmt.Sprintf("/%s/%s", o.bucket, o.object),
|
||||
}
|
||||
verb := "GET"
|
||||
if length == 0 {
|
||||
verb = "HEAD"
|
||||
}
|
||||
req, err := http.NewRequest(verb, u.String(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req = req.WithContext(ctx)
|
||||
if o.userProject != "" {
|
||||
req.Header.Set("X-Goog-User-Project", o.userProject)
|
||||
}
|
||||
if o.readCompressed {
|
||||
req.Header.Set("Accept-Encoding", "gzip")
|
||||
}
|
||||
if err := setEncryptionHeaders(req.Header, o.encryptionKey, false); err != nil {
|
||||
return nil, err
|
||||
|
||||
opts := makeStorageOpts(true, o.retry, o.userProject)
|
||||
|
||||
params := &newRangeReaderParams{
|
||||
bucket: o.bucket,
|
||||
object: o.object,
|
||||
gen: o.gen,
|
||||
offset: offset,
|
||||
length: length,
|
||||
encryptionKey: o.encryptionKey,
|
||||
conds: o.conds,
|
||||
readCompressed: o.readCompressed,
|
||||
}
|
||||
|
||||
gen := o.gen
|
||||
r, err = o.c.tc.NewRangeReader(ctx, params, opts...)
|
||||
|
||||
// Define a function that initiates a Read with offset and length, assuming we
|
||||
// have already read seen bytes.
|
||||
reopen := func(seen int64) (*http.Response, error) {
|
||||
// If the context has already expired, return immediately without making a
|
||||
// call.
|
||||
if err := ctx.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
start := offset + seen
|
||||
if length < 0 && start < 0 {
|
||||
req.Header.Set("Range", fmt.Sprintf("bytes=%d", start))
|
||||
} else if length < 0 && start > 0 {
|
||||
req.Header.Set("Range", fmt.Sprintf("bytes=%d-", start))
|
||||
} else if length > 0 {
|
||||
// The end character isn't affected by how many bytes we've seen.
|
||||
req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", start, offset+length-1))
|
||||
}
|
||||
// We wait to assign conditions here because the generation number can change in between reopen() runs.
|
||||
if err := setConditionsHeaders(req.Header, o.conds); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// If an object generation is specified, include generation as query string parameters.
|
||||
if gen >= 0 {
|
||||
req.URL.RawQuery = fmt.Sprintf("generation=%d", gen)
|
||||
}
|
||||
|
||||
var res *http.Response
|
||||
err = run(ctx, func() error {
|
||||
res, err = o.c.hc.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if res.StatusCode == http.StatusNotFound {
|
||||
res.Body.Close()
|
||||
return ErrObjectNotExist
|
||||
}
|
||||
if res.StatusCode < 200 || res.StatusCode > 299 {
|
||||
body, _ := ioutil.ReadAll(res.Body)
|
||||
res.Body.Close()
|
||||
return &googleapi.Error{
|
||||
Code: res.StatusCode,
|
||||
Header: res.Header,
|
||||
Body: string(body),
|
||||
}
|
||||
}
|
||||
|
||||
partialContentNotSatisfied :=
|
||||
!decompressiveTranscoding(res) &&
|
||||
start > 0 && length != 0 &&
|
||||
res.StatusCode != http.StatusPartialContent
|
||||
|
||||
if partialContentNotSatisfied {
|
||||
res.Body.Close()
|
||||
return errors.New("storage: partial request not satisfied")
|
||||
}
|
||||
|
||||
// With "Content-Encoding": "gzip" aka decompressive transcoding, GCS serves
|
||||
// back the whole file regardless of the range count passed in as per:
|
||||
// https://cloud.google.com/storage/docs/transcoding#range,
|
||||
// thus we have to manually move the body forward by seen bytes.
|
||||
if decompressiveTranscoding(res) && seen > 0 {
|
||||
_, _ = io.CopyN(ioutil.Discard, res.Body, seen)
|
||||
}
|
||||
|
||||
// If a generation hasn't been specified, and this is the first response we get, let's record the
|
||||
// generation. In future requests we'll use this generation as a precondition to avoid data races.
|
||||
if gen < 0 && res.Header.Get("X-Goog-Generation") != "" {
|
||||
gen64, err := strconv.ParseInt(res.Header.Get("X-Goog-Generation"), 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
gen = gen64
|
||||
}
|
||||
return nil
|
||||
}, o.retry, true, setRetryHeaderHTTP(&readerRequestWrapper{req}))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
res, err := reopen(0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var (
|
||||
size int64 // total size of object, even if a range was requested.
|
||||
checkCRC bool
|
||||
crc uint32
|
||||
startOffset int64 // non-zero if range request.
|
||||
)
|
||||
if res.StatusCode == http.StatusPartialContent {
|
||||
cr := strings.TrimSpace(res.Header.Get("Content-Range"))
|
||||
if !strings.HasPrefix(cr, "bytes ") || !strings.Contains(cr, "/") {
|
||||
return nil, fmt.Errorf("storage: invalid Content-Range %q", cr)
|
||||
}
|
||||
// Content range is formatted <first byte>-<last byte>/<total size>. We take
|
||||
// the total size.
|
||||
size, err = strconv.ParseInt(cr[strings.LastIndex(cr, "/")+1:], 10, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("storage: invalid Content-Range %q", cr)
|
||||
}
|
||||
|
||||
dashIndex := strings.Index(cr, "-")
|
||||
if dashIndex >= 0 {
|
||||
startOffset, err = strconv.ParseInt(cr[len("bytes="):dashIndex], 10, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("storage: invalid Content-Range %q: %v", cr, err)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
size = res.ContentLength
|
||||
// Check the CRC iff all of the following hold:
|
||||
// - We asked for content (length != 0).
|
||||
// - We got all the content (status != PartialContent).
|
||||
// - The server sent a CRC header.
|
||||
// - The Go http stack did not uncompress the file.
|
||||
// - We were not served compressed data that was uncompressed on download.
|
||||
// The problem with the last two cases is that the CRC will not match -- GCS
|
||||
// computes it on the compressed contents, but we compute it on the
|
||||
// uncompressed contents.
|
||||
if length != 0 && !res.Uncompressed && !uncompressedByServer(res) {
|
||||
crc, checkCRC = parseCRC32c(res)
|
||||
}
|
||||
}
|
||||
|
||||
remain := res.ContentLength
|
||||
body := res.Body
|
||||
if length == 0 {
|
||||
remain = 0
|
||||
body.Close()
|
||||
body = emptyBody
|
||||
}
|
||||
var metaGen int64
|
||||
if res.Header.Get("X-Goog-Metageneration") != "" {
|
||||
metaGen, err = strconv.ParseInt(res.Header.Get("X-Goog-Metageneration"), 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
var lm time.Time
|
||||
if res.Header.Get("Last-Modified") != "" {
|
||||
lm, err = http.ParseTime(res.Header.Get("Last-Modified"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
attrs := ReaderObjectAttrs{
|
||||
Size: size,
|
||||
ContentType: res.Header.Get("Content-Type"),
|
||||
ContentEncoding: res.Header.Get("Content-Encoding"),
|
||||
CacheControl: res.Header.Get("Cache-Control"),
|
||||
LastModified: lm,
|
||||
StartOffset: startOffset,
|
||||
Generation: gen,
|
||||
Metageneration: metaGen,
|
||||
}
|
||||
return &Reader{
|
||||
Attrs: attrs,
|
||||
body: body,
|
||||
size: size,
|
||||
remain: remain,
|
||||
wantCRC: crc,
|
||||
checkCRC: checkCRC,
|
||||
reopen: reopen,
|
||||
}, nil
|
||||
return r, err
|
||||
}
|
||||
|
||||
// decompressiveTranscoding returns true if the request was served decompressed
|
||||
// and different than its original storage form. This happens when the "Content-Encoding"
|
||||
// header is "gzip".
|
||||
// See:
|
||||
// * https://cloud.google.com/storage/docs/transcoding#transcoding_and_gzip
|
||||
// * https://github.com/googleapis/google-cloud-go/issues/1800
|
||||
// - https://cloud.google.com/storage/docs/transcoding#transcoding_and_gzip
|
||||
// - https://github.com/googleapis/google-cloud-go/issues/1800
|
||||
func decompressiveTranscoding(res *http.Response) bool {
|
||||
// Decompressive Transcoding.
|
||||
return res.Header.Get("Content-Encoding") == "gzip" ||
|
||||
|
@ -375,37 +190,21 @@ var emptyBody = ioutil.NopCloser(strings.NewReader(""))
|
|||
// is skipped if transcoding occurs. See https://cloud.google.com/storage/docs/transcoding.
|
||||
type Reader struct {
|
||||
Attrs ReaderObjectAttrs
|
||||
body io.ReadCloser
|
||||
seen, remain, size int64
|
||||
checkCRC bool // should we check the CRC?
|
||||
wantCRC uint32 // the CRC32c value the server sent in the header
|
||||
gotCRC uint32 // running crc
|
||||
reopen func(seen int64) (*http.Response, error)
|
||||
|
||||
reader io.ReadCloser
|
||||
}
|
||||
|
||||
// Close closes the Reader. It must be called when done reading.
|
||||
func (r *Reader) Close() error {
|
||||
if r.body != nil {
|
||||
return r.body.Close()
|
||||
}
|
||||
|
||||
// TODO(noahdietz): Complete integration means returning this call's return
|
||||
// value, which for gRPC will always be nil.
|
||||
if r.reader != nil {
|
||||
return r.reader.Close()
|
||||
}
|
||||
return nil
|
||||
return r.reader.Close()
|
||||
}
|
||||
|
||||
func (r *Reader) Read(p []byte) (int, error) {
|
||||
read := r.readWithRetry
|
||||
if r.reader != nil {
|
||||
read = r.reader.Read
|
||||
}
|
||||
|
||||
n, err := read(p)
|
||||
n, err := r.reader.Read(p)
|
||||
if r.remain != -1 {
|
||||
r.remain -= int64(n)
|
||||
}
|
||||
|
@ -424,56 +223,6 @@ func (r *Reader) Read(p []byte) (int, error) {
|
|||
return n, err
|
||||
}
|
||||
|
||||
// newRangeReaderWithGRPC creates a new Reader with the given range that uses
|
||||
// gRPC to read Object content.
|
||||
//
|
||||
// This is an experimental API and not intended for public use.
|
||||
func (o *ObjectHandle) newRangeReaderWithGRPC(ctx context.Context, offset, length int64) (r *Reader, err error) {
|
||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Object.newRangeReaderWithGRPC")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
|
||||
if err = o.validate(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
params := &newRangeReaderParams{
|
||||
bucket: o.bucket,
|
||||
object: o.object,
|
||||
gen: o.gen,
|
||||
offset: offset,
|
||||
length: length,
|
||||
encryptionKey: o.encryptionKey,
|
||||
conds: o.conds,
|
||||
}
|
||||
|
||||
r, err = o.c.tc.NewRangeReader(ctx, params)
|
||||
|
||||
return r, err
|
||||
}
|
||||
|
||||
func (r *Reader) readWithRetry(p []byte) (int, error) {
|
||||
n := 0
|
||||
for len(p[n:]) > 0 {
|
||||
m, err := r.body.Read(p[n:])
|
||||
n += m
|
||||
r.seen += int64(m)
|
||||
if err == nil || err == io.EOF {
|
||||
return n, err
|
||||
}
|
||||
// Read failed (likely due to connection issues), but we will try to reopen
|
||||
// the pipe and continue. Send a ranged read request that takes into account
|
||||
// the number of bytes we've already seen.
|
||||
res, err := r.reopen(r.seen)
|
||||
if err != nil {
|
||||
// reopen already retries
|
||||
return n, err
|
||||
}
|
||||
r.body.Close()
|
||||
r.body = res.Body
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// Size returns the size of the object in bytes.
|
||||
// The returned value is always the same and is not affected by
|
||||
// calls to Read or Close.
|
||||
|
|
11
vendor/cloud.google.com/go/storage/release-please-config.json
generated
vendored
11
vendor/cloud.google.com/go/storage/release-please-config.json
generated
vendored
|
@ -1,11 +0,0 @@
|
|||
{
|
||||
"release-type": "go-yoshi",
|
||||
"separate-pull-requests": true,
|
||||
"include-component-in-tag": true,
|
||||
"tag-separator": "/",
|
||||
"packages": {
|
||||
"storage": {
|
||||
"component": "storage"
|
||||
}
|
||||
}
|
||||
}
|
384
vendor/cloud.google.com/go/storage/storage.go
generated
vendored
384
vendor/cloud.google.com/go/storage/storage.go
generated
vendored
|
@ -33,6 +33,7 @@ import (
|
|||
"reflect"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
@ -40,6 +41,7 @@ import (
|
|||
"cloud.google.com/go/internal/optional"
|
||||
"cloud.google.com/go/internal/trace"
|
||||
"cloud.google.com/go/storage/internal"
|
||||
storagepb "cloud.google.com/go/storage/internal/apiv2/stubs"
|
||||
"github.com/googleapis/gax-go/v2"
|
||||
"golang.org/x/oauth2/google"
|
||||
"google.golang.org/api/googleapi"
|
||||
|
@ -48,9 +50,9 @@ import (
|
|||
raw "google.golang.org/api/storage/v1"
|
||||
"google.golang.org/api/transport"
|
||||
htransport "google.golang.org/api/transport/http"
|
||||
storagepb "google.golang.org/genproto/googleapis/storage/v2"
|
||||
"google.golang.org/protobuf/proto"
|
||||
"google.golang.org/protobuf/reflect/protoreflect"
|
||||
"google.golang.org/protobuf/types/known/fieldmaskpb"
|
||||
"google.golang.org/protobuf/types/known/timestamppb"
|
||||
)
|
||||
|
||||
|
@ -115,6 +117,10 @@ type Client struct {
|
|||
|
||||
// tc is the transport-agnostic client implemented with either gRPC or HTTP.
|
||||
tc storageClient
|
||||
// useGRPC flags whether the client uses gRPC. This is needed while the
|
||||
// integration piece is only partially complete.
|
||||
// TODO: remove before merging to main.
|
||||
useGRPC bool
|
||||
}
|
||||
|
||||
// NewClient creates a new Google Cloud Storage client.
|
||||
|
@ -182,17 +188,22 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error
|
|||
// htransport selects the correct endpoint among WithEndpoint (user override), WithDefaultEndpoint, and WithDefaultMTLSEndpoint.
|
||||
hc, ep, err := htransport.NewClient(ctx, opts...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("dialing: %v", err)
|
||||
return nil, fmt.Errorf("dialing: %w", err)
|
||||
}
|
||||
// RawService should be created with the chosen endpoint to take account of user override.
|
||||
rawService, err := raw.NewService(ctx, option.WithEndpoint(ep), option.WithHTTPClient(hc))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("storage client: %v", err)
|
||||
return nil, fmt.Errorf("storage client: %w", err)
|
||||
}
|
||||
// Update readHost and scheme with the chosen endpoint.
|
||||
u, err := url.Parse(ep)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("supplied endpoint %q is not valid: %v", ep, err)
|
||||
return nil, fmt.Errorf("supplied endpoint %q is not valid: %w", ep, err)
|
||||
}
|
||||
|
||||
tc, err := newHTTPStorageClient(ctx, withClientOptions(opts...))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("storage: %w", err)
|
||||
}
|
||||
|
||||
return &Client{
|
||||
|
@ -201,6 +212,7 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error
|
|||
scheme: u.Scheme,
|
||||
readHost: u.Host,
|
||||
creds: creds,
|
||||
tc: tc,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -215,7 +227,7 @@ func newGRPCClient(ctx context.Context, opts ...option.ClientOption) (*Client, e
|
|||
return nil, err
|
||||
}
|
||||
|
||||
return &Client{tc: tc}, nil
|
||||
return &Client{tc: tc, useGRPC: true}, nil
|
||||
}
|
||||
|
||||
// Close closes the Client.
|
||||
|
@ -509,13 +521,13 @@ func v2SanitizeHeaders(hdrs []string) []string {
|
|||
// at https://cloud.google.com/storage/docs/authentication/canonical-requests#about-headers.
|
||||
//
|
||||
// V4 does a couple things differently from V2:
|
||||
// - Headers get sorted by key, instead of by key:value. We do this in
|
||||
// signedURLV4.
|
||||
// - There's no canonical regexp: we simply split headers on :.
|
||||
// - We don't exclude canonical headers.
|
||||
// - We replace leading and trailing spaces in header values, like v2, but also
|
||||
// all intermediate space duplicates get stripped. That is, there's only ever
|
||||
// a single consecutive space.
|
||||
// - Headers get sorted by key, instead of by key:value. We do this in
|
||||
// signedURLV4.
|
||||
// - There's no canonical regexp: we simply split headers on :.
|
||||
// - We don't exclude canonical headers.
|
||||
// - We replace leading and trailing spaces in header values, like v2, but also
|
||||
// all intermediate space duplicates get stripped. That is, there's only ever
|
||||
// a single consecutive space.
|
||||
func v4SanitizeHeaders(hdrs []string) []string {
|
||||
headerMap := map[string][]string{}
|
||||
for _, hdr := range hdrs {
|
||||
|
@ -907,27 +919,8 @@ func (o *ObjectHandle) Attrs(ctx context.Context) (attrs *ObjectAttrs, err error
|
|||
if err := o.validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
call := o.c.raw.Objects.Get(o.bucket, o.object).Projection("full").Context(ctx)
|
||||
if err := applyConds("Attrs", o.gen, o.conds, call); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if o.userProject != "" {
|
||||
call.UserProject(o.userProject)
|
||||
}
|
||||
if err := setEncryptionHeaders(call.Header(), o.encryptionKey, false); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var obj *raw.Object
|
||||
setClientHeader(call.Header())
|
||||
err = run(ctx, func() error { obj, err = call.Do(); return err }, o.retry, true, setRetryHeaderHTTP(call))
|
||||
var e *googleapi.Error
|
||||
if errors.As(err, &e) && e.Code == http.StatusNotFound {
|
||||
return nil, ErrObjectNotExist
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newObject(obj), nil
|
||||
opts := makeStorageOpts(true, o.retry, o.userProject)
|
||||
return o.c.tc.GetObject(ctx, o.bucket, o.object, o.gen, o.encryptionKey, o.conds, opts...)
|
||||
}
|
||||
|
||||
// Update updates an object with the provided attributes. See
|
||||
|
@ -940,99 +933,9 @@ func (o *ObjectHandle) Update(ctx context.Context, uattrs ObjectAttrsToUpdate) (
|
|||
if err := o.validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var attrs ObjectAttrs
|
||||
// Lists of fields to send, and set to null, in the JSON.
|
||||
var forceSendFields, nullFields []string
|
||||
if uattrs.ContentType != nil {
|
||||
attrs.ContentType = optional.ToString(uattrs.ContentType)
|
||||
// For ContentType, sending the empty string is a no-op.
|
||||
// Instead we send a null.
|
||||
if attrs.ContentType == "" {
|
||||
nullFields = append(nullFields, "ContentType")
|
||||
} else {
|
||||
forceSendFields = append(forceSendFields, "ContentType")
|
||||
}
|
||||
}
|
||||
if uattrs.ContentLanguage != nil {
|
||||
attrs.ContentLanguage = optional.ToString(uattrs.ContentLanguage)
|
||||
// For ContentLanguage it's an error to send the empty string.
|
||||
// Instead we send a null.
|
||||
if attrs.ContentLanguage == "" {
|
||||
nullFields = append(nullFields, "ContentLanguage")
|
||||
} else {
|
||||
forceSendFields = append(forceSendFields, "ContentLanguage")
|
||||
}
|
||||
}
|
||||
if uattrs.ContentEncoding != nil {
|
||||
attrs.ContentEncoding = optional.ToString(uattrs.ContentEncoding)
|
||||
forceSendFields = append(forceSendFields, "ContentEncoding")
|
||||
}
|
||||
if uattrs.ContentDisposition != nil {
|
||||
attrs.ContentDisposition = optional.ToString(uattrs.ContentDisposition)
|
||||
forceSendFields = append(forceSendFields, "ContentDisposition")
|
||||
}
|
||||
if uattrs.CacheControl != nil {
|
||||
attrs.CacheControl = optional.ToString(uattrs.CacheControl)
|
||||
forceSendFields = append(forceSendFields, "CacheControl")
|
||||
}
|
||||
if uattrs.EventBasedHold != nil {
|
||||
attrs.EventBasedHold = optional.ToBool(uattrs.EventBasedHold)
|
||||
forceSendFields = append(forceSendFields, "EventBasedHold")
|
||||
}
|
||||
if uattrs.TemporaryHold != nil {
|
||||
attrs.TemporaryHold = optional.ToBool(uattrs.TemporaryHold)
|
||||
forceSendFields = append(forceSendFields, "TemporaryHold")
|
||||
}
|
||||
if !uattrs.CustomTime.IsZero() {
|
||||
attrs.CustomTime = uattrs.CustomTime
|
||||
forceSendFields = append(forceSendFields, "CustomTime")
|
||||
}
|
||||
if uattrs.Metadata != nil {
|
||||
attrs.Metadata = uattrs.Metadata
|
||||
if len(attrs.Metadata) == 0 {
|
||||
// Sending the empty map is a no-op. We send null instead.
|
||||
nullFields = append(nullFields, "Metadata")
|
||||
} else {
|
||||
forceSendFields = append(forceSendFields, "Metadata")
|
||||
}
|
||||
}
|
||||
if uattrs.ACL != nil {
|
||||
attrs.ACL = uattrs.ACL
|
||||
// It's an error to attempt to delete the ACL, so
|
||||
// we don't append to nullFields here.
|
||||
forceSendFields = append(forceSendFields, "Acl")
|
||||
}
|
||||
rawObj := attrs.toRawObject(o.bucket)
|
||||
rawObj.ForceSendFields = forceSendFields
|
||||
rawObj.NullFields = nullFields
|
||||
call := o.c.raw.Objects.Patch(o.bucket, o.object, rawObj).Projection("full").Context(ctx)
|
||||
if err := applyConds("Update", o.gen, o.conds, call); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if o.userProject != "" {
|
||||
call.UserProject(o.userProject)
|
||||
}
|
||||
if uattrs.PredefinedACL != "" {
|
||||
call.PredefinedAcl(uattrs.PredefinedACL)
|
||||
}
|
||||
if err := setEncryptionHeaders(call.Header(), o.encryptionKey, false); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var obj *raw.Object
|
||||
setClientHeader(call.Header())
|
||||
var isIdempotent bool
|
||||
if o.conds != nil && o.conds.MetagenerationMatch != 0 {
|
||||
isIdempotent = true
|
||||
}
|
||||
err = run(ctx, func() error { obj, err = call.Do(); return err }, o.retry, isIdempotent, setRetryHeaderHTTP(call))
|
||||
var e *googleapi.Error
|
||||
if errors.As(err, &e) && e.Code == http.StatusNotFound {
|
||||
return nil, ErrObjectNotExist
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newObject(obj), nil
|
||||
isIdempotent := o.conds != nil && o.conds.MetagenerationMatch != 0
|
||||
opts := makeStorageOpts(isIdempotent, o.retry, o.userProject)
|
||||
return o.c.tc.UpdateObject(ctx, o.bucket, o.object, &uattrs, o.gen, o.encryptionKey, o.conds, opts...)
|
||||
}
|
||||
|
||||
// BucketName returns the name of the bucket.
|
||||
|
@ -1052,11 +955,12 @@ func (o *ObjectHandle) ObjectName() string {
|
|||
//
|
||||
// For example, to change ContentType and delete ContentEncoding and
|
||||
// Metadata, use
|
||||
// ObjectAttrsToUpdate{
|
||||
// ContentType: "text/html",
|
||||
// ContentEncoding: "",
|
||||
// Metadata: map[string]string{},
|
||||
// }
|
||||
//
|
||||
// ObjectAttrsToUpdate{
|
||||
// ContentType: "text/html",
|
||||
// ContentEncoding: "",
|
||||
// Metadata: map[string]string{},
|
||||
// }
|
||||
type ObjectAttrsToUpdate struct {
|
||||
EventBasedHold optional.Bool
|
||||
TemporaryHold optional.Bool
|
||||
|
@ -1079,27 +983,11 @@ func (o *ObjectHandle) Delete(ctx context.Context) error {
|
|||
if err := o.validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
call := o.c.raw.Objects.Delete(o.bucket, o.object).Context(ctx)
|
||||
if err := applyConds("Delete", o.gen, o.conds, call); err != nil {
|
||||
return err
|
||||
}
|
||||
if o.userProject != "" {
|
||||
call.UserProject(o.userProject)
|
||||
}
|
||||
// Encryption doesn't apply to Delete.
|
||||
setClientHeader(call.Header())
|
||||
var isIdempotent bool
|
||||
// Delete is idempotent if GenerationMatch or Generation have been passed in.
|
||||
// The default generation is negative to get the latest version of the object.
|
||||
if (o.conds != nil && o.conds.GenerationMatch != 0) || o.gen >= 0 {
|
||||
isIdempotent = true
|
||||
}
|
||||
err := run(ctx, func() error { return call.Do() }, o.retry, isIdempotent, setRetryHeaderHTTP(call))
|
||||
var e *googleapi.Error
|
||||
if errors.As(err, &e) && e.Code == http.StatusNotFound {
|
||||
return ErrObjectNotExist
|
||||
}
|
||||
return err
|
||||
isIdempotent := (o.conds != nil && o.conds.GenerationMatch != 0) || o.gen >= 0
|
||||
opts := makeStorageOpts(isIdempotent, o.retry, o.userProject)
|
||||
return o.c.tc.DeleteObject(ctx, o.bucket, o.object, o.gen, o.conds, opts...)
|
||||
}
|
||||
|
||||
// ReadCompressed when true causes the read to happen without decompressing.
|
||||
|
@ -1202,14 +1090,12 @@ func (o *ObjectAttrs) toRawObject(bucket string) *raw.Object {
|
|||
|
||||
// toProtoObject copies the editable attributes from o to the proto library's Object type.
|
||||
func (o *ObjectAttrs) toProtoObject(b string) *storagepb.Object {
|
||||
checksums := &storagepb.ObjectChecksums{Md5Hash: o.MD5}
|
||||
if o.CRC32C > 0 {
|
||||
checksums.Crc32C = proto.Uint32(o.CRC32C)
|
||||
}
|
||||
|
||||
// For now, there are only globally unique buckets, and "_" is the alias
|
||||
// project ID for such buckets.
|
||||
b = bucketResourceName("_", b)
|
||||
// project ID for such buckets. If the bucket is not provided, like in the
|
||||
// destination ObjectAttrs of a Copy, do not attempt to format it.
|
||||
if b != "" {
|
||||
b = bucketResourceName(globalProjectAlias, b)
|
||||
}
|
||||
|
||||
return &storagepb.Object{
|
||||
Bucket: b,
|
||||
|
@ -1232,7 +1118,6 @@ func (o *ObjectAttrs) toProtoObject(b string) *storagepb.Object {
|
|||
KmsKey: o.KMSKeyName,
|
||||
Generation: o.Generation,
|
||||
Size: o.Size,
|
||||
Checksums: checksums,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1599,10 +1484,11 @@ type Query struct {
|
|||
// object will be included in the results.
|
||||
Versions bool
|
||||
|
||||
// fieldSelection is used to select only specific fields to be returned by
|
||||
// the query. It's used internally and is populated for the user by
|
||||
// calling Query.SetAttrSelection
|
||||
fieldSelection string
|
||||
// attrSelection is used to select only specific fields to be returned by
|
||||
// the query. It is set by the user calling calling SetAttrSelection. These
|
||||
// are used by toFieldMask and toFieldSelection for gRPC and HTTP/JSON
|
||||
// clients repsectively.
|
||||
attrSelection []string
|
||||
|
||||
// StartOffset is used to filter results to objects whose names are
|
||||
// lexicographically equal to or after startOffset. If endOffset is also set,
|
||||
|
@ -1662,6 +1548,39 @@ var attrToFieldMap = map[string]string{
|
|||
"CustomTime": "customTime",
|
||||
}
|
||||
|
||||
// attrToProtoFieldMap maps the field names of ObjectAttrs to the underlying field
|
||||
// names in the protobuf Object message.
|
||||
var attrToProtoFieldMap = map[string]string{
|
||||
"Name": "name",
|
||||
"Bucket": "bucket",
|
||||
"Etag": "etag",
|
||||
"Generation": "generation",
|
||||
"Metageneration": "metageneration",
|
||||
"StorageClass": "storage_class",
|
||||
"Size": "size",
|
||||
"ContentEncoding": "content_encoding",
|
||||
"ContentDisposition": "content_disposition",
|
||||
"CacheControl": "cache_control",
|
||||
"ACL": "acl",
|
||||
"ContentLanguage": "content_language",
|
||||
"Deleted": "delete_time",
|
||||
"ContentType": "content_type",
|
||||
"Created": "create_time",
|
||||
"CRC32C": "checksums.crc32c",
|
||||
"MD5": "checksums.md5_hash",
|
||||
"Updated": "update_time",
|
||||
"KMSKeyName": "kms_key",
|
||||
"TemporaryHold": "temporary_hold",
|
||||
"RetentionExpirationTime": "retention_expire_time",
|
||||
"Metadata": "metadata",
|
||||
"EventBasedHold": "event_based_hold",
|
||||
"Owner": "owner",
|
||||
"CustomerKeySHA256": "customer_encryption",
|
||||
"CustomTime": "custom_time",
|
||||
// MediaLink was explicitly excluded from the proto as it is an HTTP-ism.
|
||||
// "MediaLink": "mediaLink",
|
||||
}
|
||||
|
||||
// SetAttrSelection makes the query populate only specific attributes of
|
||||
// objects. When iterating over objects, if you only need each object's name
|
||||
// and size, pass []string{"Name", "Size"} to this method. Only these fields
|
||||
|
@ -1670,16 +1589,42 @@ var attrToFieldMap = map[string]string{
|
|||
// optimization; for more information, see
|
||||
// https://cloud.google.com/storage/docs/json_api/v1/how-tos/performance
|
||||
func (q *Query) SetAttrSelection(attrs []string) error {
|
||||
// Validate selections.
|
||||
for _, attr := range attrs {
|
||||
// If the attr is acceptable for one of the two sets, then it is OK.
|
||||
// If it is not acceptable for either, then return an error.
|
||||
// The respective masking implementations ignore unknown attrs which
|
||||
// makes switching between transports a little easier.
|
||||
_, okJSON := attrToFieldMap[attr]
|
||||
_, okGRPC := attrToProtoFieldMap[attr]
|
||||
|
||||
if !okJSON && !okGRPC {
|
||||
return fmt.Errorf("storage: attr %v is not valid", attr)
|
||||
}
|
||||
}
|
||||
|
||||
q.attrSelection = attrs
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (q *Query) toFieldSelection() string {
|
||||
if q == nil || len(q.attrSelection) == 0 {
|
||||
return ""
|
||||
}
|
||||
fieldSet := make(map[string]bool)
|
||||
|
||||
for _, attr := range attrs {
|
||||
for _, attr := range q.attrSelection {
|
||||
field, ok := attrToFieldMap[attr]
|
||||
if !ok {
|
||||
return fmt.Errorf("storage: attr %v is not valid", attr)
|
||||
// Future proofing, skip unknown fields, let SetAttrSelection handle
|
||||
// error modes.
|
||||
continue
|
||||
}
|
||||
fieldSet[field] = true
|
||||
}
|
||||
|
||||
var s string
|
||||
if len(fieldSet) > 0 {
|
||||
var b bytes.Buffer
|
||||
b.WriteString("prefixes,items(")
|
||||
|
@ -1692,9 +1637,50 @@ func (q *Query) SetAttrSelection(attrs []string) error {
|
|||
b.WriteString(field)
|
||||
}
|
||||
b.WriteString(")")
|
||||
q.fieldSelection = b.String()
|
||||
s = b.String()
|
||||
}
|
||||
return nil
|
||||
return s
|
||||
}
|
||||
|
||||
func (q *Query) toFieldMask() *fieldmaskpb.FieldMask {
|
||||
// The default behavior with no Query is ProjectionDefault (i.e. ProjectionFull).
|
||||
if q == nil {
|
||||
return &fieldmaskpb.FieldMask{Paths: []string{"*"}}
|
||||
}
|
||||
|
||||
// User selected attributes via q.SetAttrSeleciton. This takes precedence
|
||||
// over the Projection.
|
||||
if numSelected := len(q.attrSelection); numSelected > 0 {
|
||||
protoFieldPaths := make([]string, 0, numSelected)
|
||||
|
||||
for _, attr := range q.attrSelection {
|
||||
pf, ok := attrToProtoFieldMap[attr]
|
||||
if !ok {
|
||||
// Future proofing, skip unknown fields, let SetAttrSelection
|
||||
// handle error modes.
|
||||
continue
|
||||
}
|
||||
protoFieldPaths = append(protoFieldPaths, pf)
|
||||
}
|
||||
|
||||
return &fieldmaskpb.FieldMask{Paths: protoFieldPaths}
|
||||
}
|
||||
|
||||
// ProjectDefault == ProjectionFull which means all fields.
|
||||
fm := &fieldmaskpb.FieldMask{Paths: []string{"*"}}
|
||||
if q.Projection == ProjectionNoACL {
|
||||
paths := make([]string, 0, len(attrToProtoFieldMap)-2) // omitting two fields
|
||||
for _, f := range attrToProtoFieldMap {
|
||||
// Skip the acl and owner fields for "NoACL".
|
||||
if f == "acl" || f == "owner" {
|
||||
continue
|
||||
}
|
||||
paths = append(paths, f)
|
||||
}
|
||||
fm.Paths = paths
|
||||
}
|
||||
|
||||
return fm
|
||||
}
|
||||
|
||||
// Conditions constrain methods to act on specific generations of
|
||||
|
@ -1838,6 +1824,33 @@ func applySourceConds(gen int64, conds *Conditions, call *raw.ObjectsRewriteCall
|
|||
return nil
|
||||
}
|
||||
|
||||
func applySourceCondsProto(gen int64, conds *Conditions, call *storagepb.RewriteObjectRequest) error {
|
||||
if gen >= 0 {
|
||||
call.SourceGeneration = gen
|
||||
}
|
||||
if conds == nil {
|
||||
return nil
|
||||
}
|
||||
if err := conds.validate("CopyTo source"); err != nil {
|
||||
return err
|
||||
}
|
||||
switch {
|
||||
case conds.GenerationMatch != 0:
|
||||
call.IfSourceGenerationMatch = proto.Int64(conds.GenerationMatch)
|
||||
case conds.GenerationNotMatch != 0:
|
||||
call.IfSourceGenerationNotMatch = proto.Int64(conds.GenerationNotMatch)
|
||||
case conds.DoesNotExist:
|
||||
call.IfSourceGenerationMatch = proto.Int64(0)
|
||||
}
|
||||
switch {
|
||||
case conds.MetagenerationMatch != 0:
|
||||
call.IfSourceMetagenerationMatch = proto.Int64(conds.MetagenerationMatch)
|
||||
case conds.MetagenerationNotMatch != 0:
|
||||
call.IfSourceMetagenerationNotMatch = proto.Int64(conds.MetagenerationNotMatch)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// setConditionField sets a field on a *raw.WhateverCall.
|
||||
// We can't use anonymous interfaces because the return type is
|
||||
// different, since the field setters are builders.
|
||||
|
@ -1959,8 +1972,8 @@ func (ws *withPolicy) apply(config *retryConfig) {
|
|||
|
||||
// WithErrorFunc allows users to pass a custom function to the retryer. Errors
|
||||
// will be retried if and only if `shouldRetry(err)` returns true.
|
||||
// By default, the following errors are retried (see invoke.go for the default
|
||||
// shouldRetry function):
|
||||
// By default, the following errors are retried (see ShouldRetry for the default
|
||||
// function):
|
||||
//
|
||||
// - HTTP responses with codes 408, 429, 502, 503, and 504.
|
||||
//
|
||||
|
@ -1971,7 +1984,8 @@ func (ws *withPolicy) apply(config *retryConfig) {
|
|||
// - Wrapped versions of these errors.
|
||||
//
|
||||
// This option can be used to retry on a different set of errors than the
|
||||
// default.
|
||||
// default. Users can use the default ShouldRetry function inside their custom
|
||||
// function if they only want to make minor modifications to default behavior.
|
||||
func WithErrorFunc(shouldRetry func(err error) bool) RetryOption {
|
||||
return &withErrorFunc{
|
||||
shouldRetry: shouldRetry,
|
||||
|
@ -2066,17 +2080,9 @@ func toProtoCommonObjectRequestParams(key []byte) *storagepb.CommonObjectRequest
|
|||
|
||||
// ServiceAccount fetches the email address of the given project's Google Cloud Storage service account.
|
||||
func (c *Client) ServiceAccount(ctx context.Context, projectID string) (string, error) {
|
||||
r := c.raw.Projects.ServiceAccount.Get(projectID)
|
||||
var res *raw.ServiceAccount
|
||||
var err error
|
||||
err = run(ctx, func() error {
|
||||
res, err = r.Context(ctx).Do()
|
||||
return err
|
||||
}, c.retry, true, setRetryHeaderHTTP(r))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return res.EmailAddress, nil
|
||||
o := makeStorageOpts(true, c.retry, "")
|
||||
return c.tc.GetServiceAccount(ctx, projectID, o...)
|
||||
|
||||
}
|
||||
|
||||
// bucketResourceName formats the given project ID and bucketResourceName ID
|
||||
|
@ -2093,6 +2099,24 @@ func parseBucketName(b string) string {
|
|||
return b[sep+1:]
|
||||
}
|
||||
|
||||
// parseProjectNumber consume the given resource name and parses out the project
|
||||
// number if one is present i.e. it is not a project ID.
|
||||
func parseProjectNumber(r string) uint64 {
|
||||
projectID := regexp.MustCompile(`projects\/([0-9]+)\/?`)
|
||||
if matches := projectID.FindStringSubmatch(r); len(matches) > 0 {
|
||||
// Capture group follows the matched segment. For example:
|
||||
// input: projects/123/bars/456
|
||||
// output: [projects/123/, 123]
|
||||
number, err := strconv.ParseUint(matches[1], 10, 64)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
return number
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
// toProjectResource accepts a project ID and formats it as a Project resource
|
||||
// name.
|
||||
func toProjectResource(project string) string {
|
||||
|
|
134
vendor/cloud.google.com/go/storage/writer.go
generated
vendored
134
vendor/cloud.google.com/go/storage/writer.go
generated
vendored
|
@ -16,25 +16,12 @@ package storage
|
|||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
"google.golang.org/api/googleapi"
|
||||
raw "google.golang.org/api/storage/v1"
|
||||
storagepb "google.golang.org/genproto/googleapis/storage/v2"
|
||||
)
|
||||
|
||||
const (
|
||||
// Maximum amount of content that can be sent per WriteObjectRequest message.
|
||||
// A buffer reaching this amount will precipitate a flush of the buffer.
|
||||
//
|
||||
// This is only used for the gRPC-based Writer.
|
||||
maxPerMessageWriteSize int = int(storagepb.ServiceConstants_MAX_WRITE_CHUNK_BYTES)
|
||||
)
|
||||
|
||||
// A Writer writes a Cloud Storage object.
|
||||
|
@ -122,102 +109,6 @@ type Writer struct {
|
|||
err error
|
||||
}
|
||||
|
||||
func (w *Writer) open() error {
|
||||
if err := w.validateWriteAttrs(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pr, pw := io.Pipe()
|
||||
w.pw = pw
|
||||
w.opened = true
|
||||
|
||||
go w.monitorCancel()
|
||||
|
||||
attrs := w.ObjectAttrs
|
||||
mediaOpts := []googleapi.MediaOption{
|
||||
googleapi.ChunkSize(w.ChunkSize),
|
||||
}
|
||||
if c := attrs.ContentType; c != "" {
|
||||
mediaOpts = append(mediaOpts, googleapi.ContentType(c))
|
||||
}
|
||||
if w.ChunkRetryDeadline != 0 {
|
||||
mediaOpts = append(mediaOpts, googleapi.ChunkRetryDeadline(w.ChunkRetryDeadline))
|
||||
}
|
||||
|
||||
go func() {
|
||||
defer close(w.donec)
|
||||
|
||||
rawObj := attrs.toRawObject(w.o.bucket)
|
||||
if w.SendCRC32C {
|
||||
rawObj.Crc32c = encodeUint32(attrs.CRC32C)
|
||||
}
|
||||
if w.MD5 != nil {
|
||||
rawObj.Md5Hash = base64.StdEncoding.EncodeToString(w.MD5)
|
||||
}
|
||||
call := w.o.c.raw.Objects.Insert(w.o.bucket, rawObj).
|
||||
Media(pr, mediaOpts...).
|
||||
Projection("full").
|
||||
Context(w.ctx).
|
||||
Name(w.o.object)
|
||||
|
||||
if w.ProgressFunc != nil {
|
||||
call.ProgressUpdater(func(n, _ int64) { w.ProgressFunc(n) })
|
||||
}
|
||||
if attrs.KMSKeyName != "" {
|
||||
call.KmsKeyName(attrs.KMSKeyName)
|
||||
}
|
||||
if attrs.PredefinedACL != "" {
|
||||
call.PredefinedAcl(attrs.PredefinedACL)
|
||||
}
|
||||
if err := setEncryptionHeaders(call.Header(), w.o.encryptionKey, false); err != nil {
|
||||
w.mu.Lock()
|
||||
w.err = err
|
||||
w.mu.Unlock()
|
||||
pr.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
var resp *raw.Object
|
||||
err := applyConds("NewWriter", w.o.gen, w.o.conds, call)
|
||||
if err == nil {
|
||||
if w.o.userProject != "" {
|
||||
call.UserProject(w.o.userProject)
|
||||
}
|
||||
setClientHeader(call.Header())
|
||||
|
||||
// The internals that perform call.Do automatically retry both the initial
|
||||
// call to set up the upload as well as calls to upload individual chunks
|
||||
// for a resumable upload (as long as the chunk size is non-zero). Hence
|
||||
// there is no need to add retries here.
|
||||
|
||||
// Retry only when the operation is idempotent or the retry policy is RetryAlways.
|
||||
isIdempotent := w.o.conds != nil && (w.o.conds.GenerationMatch >= 0 || w.o.conds.DoesNotExist == true)
|
||||
var useRetry bool
|
||||
if (w.o.retry == nil || w.o.retry.policy == RetryIdempotent) && isIdempotent {
|
||||
useRetry = true
|
||||
} else if w.o.retry != nil && w.o.retry.policy == RetryAlways {
|
||||
useRetry = true
|
||||
}
|
||||
if useRetry {
|
||||
if w.o.retry != nil {
|
||||
call.WithRetry(w.o.retry.backoff, w.o.retry.shouldRetry)
|
||||
} else {
|
||||
call.WithRetry(nil, nil)
|
||||
}
|
||||
}
|
||||
resp, err = call.Do()
|
||||
}
|
||||
if err != nil {
|
||||
w.mu.Lock()
|
||||
w.err = err
|
||||
w.mu.Unlock()
|
||||
pr.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
w.obj = newObject(resp)
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Write appends to w. It implements the io.Writer interface.
|
||||
//
|
||||
// Since writes happen asynchronously, Write may return a nil
|
||||
|
@ -235,12 +126,7 @@ func (w *Writer) Write(p []byte) (n int, err error) {
|
|||
return 0, werr
|
||||
}
|
||||
if !w.opened {
|
||||
// gRPC client has been initialized - use gRPC to upload.
|
||||
if w.o.c.tc != nil {
|
||||
if err := w.openWriter(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
} else if err := w.open(); err != nil {
|
||||
if err := w.openWriter(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
@ -264,11 +150,7 @@ func (w *Writer) Write(p []byte) (n int, err error) {
|
|||
// can be retrieved by calling Attrs.
|
||||
func (w *Writer) Close() error {
|
||||
if !w.opened {
|
||||
if w.o.c.tc != nil {
|
||||
if err := w.openWriter(); err != nil {
|
||||
return err
|
||||
}
|
||||
} else if err := w.open(); err != nil {
|
||||
if err := w.openWriter(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -288,8 +170,12 @@ func (w *Writer) openWriter() (err error) {
|
|||
if err := w.validateWriteAttrs(); err != nil {
|
||||
return err
|
||||
}
|
||||
if w.o.gen != defaultGen {
|
||||
return fmt.Errorf("storage: generation not supported on Writer, got %v", w.o.gen)
|
||||
}
|
||||
|
||||
go w.monitorCancel()
|
||||
isIdempotent := w.o.conds != nil && (w.o.conds.GenerationMatch >= 0 || w.o.conds.DoesNotExist == true)
|
||||
opts := makeStorageOpts(isIdempotent, w.o.retry, w.o.userProject)
|
||||
params := &openWriterParams{
|
||||
ctx: w.ctx,
|
||||
chunkSize: w.ChunkSize,
|
||||
|
@ -304,11 +190,15 @@ func (w *Writer) openWriter() (err error) {
|
|||
progress: w.progress,
|
||||
setObj: func(o *ObjectAttrs) { w.obj = o },
|
||||
}
|
||||
w.pw, err = w.o.c.tc.OpenWriter(params)
|
||||
if err := w.ctx.Err(); err != nil {
|
||||
return err // short-circuit
|
||||
}
|
||||
w.pw, err = w.o.c.tc.OpenWriter(params, opts...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w.opened = true
|
||||
go w.monitorCancel()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
236
vendor/cloud.google.com/go/testing.md
generated
vendored
236
vendor/cloud.google.com/go/testing.md
generated
vendored
|
@ -1,236 +0,0 @@
|
|||
# Testing Code that depends on Go Client Libraries
|
||||
|
||||
The Go client libraries generated as a part of `cloud.google.com/go` all take
|
||||
the approach of returning concrete types instead of interfaces. That way, new
|
||||
fields and methods can be added to the libraries without breaking users. This
|
||||
document will go over some patterns that can be used to test code that depends
|
||||
on the Go client libraries.
|
||||
|
||||
## Testing gRPC services using fakes
|
||||
|
||||
*Note*: You can see the full
|
||||
[example code using a fake here](https://github.com/googleapis/google-cloud-go/tree/main/internal/examples/fake).
|
||||
|
||||
The clients found in `cloud.google.com/go` are gRPC based, with a couple of
|
||||
notable exceptions being the [`storage`](https://pkg.go.dev/cloud.google.com/go/storage)
|
||||
and [`bigquery`](https://pkg.go.dev/cloud.google.com/go/bigquery) clients.
|
||||
Interactions with gRPC services can be faked by serving up your own in-memory
|
||||
server within your test. One benefit of using this approach is that you don’t
|
||||
need to define an interface in your runtime code; you can keep using
|
||||
concrete struct types. You instead define a fake server in your test code. For
|
||||
example, take a look at the following function:
|
||||
|
||||
```go
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
|
||||
translate "cloud.google.com/go/translate/apiv3"
|
||||
"github.com/googleapis/gax-go/v2"
|
||||
translatepb "google.golang.org/genproto/googleapis/cloud/translate/v3"
|
||||
)
|
||||
|
||||
func TranslateTextWithConcreteClient(client *translate.TranslationClient, text string, targetLang string) (string, error) {
|
||||
ctx := context.Background()
|
||||
log.Printf("Translating %q to %q", text, targetLang)
|
||||
req := &translatepb.TranslateTextRequest{
|
||||
Parent: fmt.Sprintf("projects/%s/locations/global", os.Getenv("GOOGLE_CLOUD_PROJECT")),
|
||||
TargetLanguageCode: "en-US",
|
||||
Contents: []string{text},
|
||||
}
|
||||
resp, err := client.TranslateText(ctx, req)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("unable to translate text: %v", err)
|
||||
}
|
||||
translations := resp.GetTranslations()
|
||||
if len(translations) != 1 {
|
||||
return "", fmt.Errorf("expected only one result, got %d", len(translations))
|
||||
}
|
||||
return translations[0].TranslatedText, nil
|
||||
}
|
||||
```
|
||||
|
||||
Here is an example of what a fake server implementation would look like for
|
||||
faking the interactions above:
|
||||
|
||||
```go
|
||||
import (
|
||||
"context"
|
||||
|
||||
translatepb "google.golang.org/genproto/googleapis/cloud/translate/v3"
|
||||
)
|
||||
|
||||
type fakeTranslationServer struct {
|
||||
translatepb.UnimplementedTranslationServiceServer
|
||||
}
|
||||
|
||||
func (f *fakeTranslationServer) TranslateText(ctx context.Context, req *translatepb.TranslateTextRequest) (*translatepb.TranslateTextResponse, error) {
|
||||
resp := &translatepb.TranslateTextResponse{
|
||||
Translations: []*translatepb.Translation{
|
||||
&translatepb.Translation{
|
||||
TranslatedText: "Hello World",
|
||||
},
|
||||
},
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
```
|
||||
|
||||
All of the generated protobuf code found in [google.golang.org/genproto](https://pkg.go.dev/google.golang.org/genproto)
|
||||
contains a similar `package.UnimplmentedFooServer` type that is useful for
|
||||
creating fakes. By embedding the unimplemented server in the
|
||||
`fakeTranslationServer`, the fake will “inherit” all of the RPCs the server
|
||||
exposes. Then, by providing our own `fakeTranslationServer.TranslateText`
|
||||
method you can “override” the default unimplemented behavior of the one RPC that
|
||||
you would like to be faked.
|
||||
|
||||
The test itself does require a little bit of setup: start up a `net.Listener`,
|
||||
register the server, and tell the client library to call the server:
|
||||
|
||||
```go
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"testing"
|
||||
|
||||
translate "cloud.google.com/go/translate/apiv3"
|
||||
"google.golang.org/api/option"
|
||||
translatepb "google.golang.org/genproto/googleapis/cloud/translate/v3"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
func TestTranslateTextWithConcreteClient(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
// Setup the fake server.
|
||||
fakeTranslationServer := &fakeTranslationServer{}
|
||||
l, err := net.Listen("tcp", "localhost:0")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
gsrv := grpc.NewServer()
|
||||
translatepb.RegisterTranslationServiceServer(gsrv, fakeTranslationServer)
|
||||
fakeServerAddr := l.Addr().String()
|
||||
go func() {
|
||||
if err := gsrv.Serve(l); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Create a client.
|
||||
client, err := translate.NewTranslationClient(ctx,
|
||||
option.WithEndpoint(fakeServerAddr),
|
||||
option.WithoutAuthentication(),
|
||||
option.WithGRPCDialOption(grpc.WithInsecure()),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Run the test.
|
||||
text, err := TranslateTextWithConcreteClient(client, "Hola Mundo", "en-US")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if text != "Hello World" {
|
||||
t.Fatalf("got %q, want Hello World", text)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Testing using mocks
|
||||
|
||||
*Note*: You can see the full
|
||||
[example code using a mock here](https://github.com/googleapis/google-cloud-go/tree/main/internal/examples/mock).
|
||||
|
||||
When mocking code you need to work with interfaces. Let’s create an interface
|
||||
for the `cloud.google.com/go/translate/apiv3` client used in the
|
||||
`TranslateTextWithConcreteClient` function mentioned in the previous section.
|
||||
The `translate.Client` has over a dozen methods but this code only uses one of
|
||||
them. Here is an interface that satisfies the interactions of the
|
||||
`translate.Client` in this function.
|
||||
|
||||
```go
|
||||
type TranslationClient interface {
|
||||
TranslateText(ctx context.Context, req *translatepb.TranslateTextRequest, opts ...gax.CallOption) (*translatepb.TranslateTextResponse, error)
|
||||
}
|
||||
```
|
||||
|
||||
Now that we have an interface that satisfies the method being used we can
|
||||
rewrite the function signature to take the interface instead of the concrete
|
||||
type.
|
||||
|
||||
```go
|
||||
func TranslateTextWithInterfaceClient(client TranslationClient, text string, targetLang string) (string, error) {
|
||||
// ...
|
||||
}
|
||||
```
|
||||
|
||||
This allows a real `translate.Client` to be passed to the method in production
|
||||
and for a mock implementation to be passed in during testing. This pattern can
|
||||
be applied to any Go code, not just `cloud.google.com/go`. This is because
|
||||
interfaces in Go are implicitly satisfied. Structs in the client libraries can
|
||||
implicitly implement interfaces defined in your codebase. Let’s take a look at
|
||||
what it might look like to define a lightweight mock for the `TranslationClient`
|
||||
interface.
|
||||
|
||||
```go
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/googleapis/gax-go/v2"
|
||||
translatepb "google.golang.org/genproto/googleapis/cloud/translate/v3"
|
||||
)
|
||||
|
||||
type mockClient struct{}
|
||||
|
||||
func (*mockClient) TranslateText(_ context.Context, req *translatepb.TranslateTextRequest, opts ...gax.CallOption) (*translatepb.TranslateTextResponse, error) {
|
||||
resp := &translatepb.TranslateTextResponse{
|
||||
Translations: []*translatepb.Translation{
|
||||
&translatepb.Translation{
|
||||
TranslatedText: "Hello World",
|
||||
},
|
||||
},
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func TestTranslateTextWithAbstractClient(t *testing.T) {
|
||||
client := &mockClient{}
|
||||
text, err := TranslateTextWithInterfaceClient(client, "Hola Mundo", "en-US")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if text != "Hello World" {
|
||||
t.Fatalf("got %q, want Hello World", text)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
If you prefer to not write your own mocks there are mocking frameworks such as
|
||||
[golang/mock](https://github.com/golang/mock) which can generate mocks for you
|
||||
from an interface. As a word of caution though, try to not
|
||||
[overuse mocks](https://testing.googleblog.com/2013/05/testing-on-toilet-dont-overuse-mocks.html).
|
||||
|
||||
## Testing using emulators
|
||||
|
||||
Some of the client libraries provided in `cloud.google.com/go` support running
|
||||
against a service emulator. The concept is similar to that of using fakes,
|
||||
mentioned above, but the server is managed for you. You just need to start it up
|
||||
and instruct the client library to talk to the emulator by setting a service
|
||||
specific emulator environment variable. Current services/environment-variables
|
||||
are:
|
||||
|
||||
- bigtable: `BIGTABLE_EMULATOR_HOST`
|
||||
- datastore: `DATASTORE_EMULATOR_HOST`
|
||||
- firestore: `FIRESTORE_EMULATOR_HOST`
|
||||
- pubsub: `PUBSUB_EMULATOR_HOST`
|
||||
- spanner: `SPANNER_EMULATOR_HOST`
|
||||
- storage: `STORAGE_EMULATOR_HOST`
|
||||
- Although the storage client supports an emulator environment variable there is no official emulator provided by gcloud.
|
||||
|
||||
For more information on emulators please refer to the
|
||||
[gcloud documentation](https://cloud.google.com/sdk/gcloud/reference/beta/emulators).
|
15
vendor/github.com/VictoriaMetrics/metrics/README.md
generated
vendored
15
vendor/github.com/VictoriaMetrics/metrics/README.md
generated
vendored
|
@ -16,6 +16,9 @@
|
|||
* Allows exporting distinct metric sets via distinct endpoints. See [Set](http://godoc.org/github.com/VictoriaMetrics/metrics#Set).
|
||||
* Supports [easy-to-use histograms](http://godoc.org/github.com/VictoriaMetrics/metrics#Histogram), which just work without any tuning.
|
||||
Read more about VictoriaMetrics histograms at [this article](https://medium.com/@valyala/improving-histogram-usability-for-prometheus-and-grafana-bc7e5df0e350).
|
||||
* Can push metrics to VictoriaMetrics or to any other remote storage, which accepts metrics
|
||||
in [Prometheus text exposition format](https://github.com/prometheus/docs/blob/main/content/docs/instrumenting/exposition_formats.md#text-based-format).
|
||||
See [these docs](http://godoc.org/github.com/VictoriaMetrics/metrics#InitPush).
|
||||
|
||||
|
||||
### Limitations
|
||||
|
@ -28,8 +31,8 @@
|
|||
```go
|
||||
import "github.com/VictoriaMetrics/metrics"
|
||||
|
||||
// Register various time series.
|
||||
// Time series name may contain labels in Prometheus format - see below.
|
||||
// Register various metrics.
|
||||
// Metric name may contain labels in Prometheus format - see below.
|
||||
var (
|
||||
// Register counter without labels.
|
||||
requestsTotal = metrics.NewCounter("requests_total")
|
||||
|
@ -64,6 +67,10 @@ func requestHandler() {
|
|||
http.HandleFunc("/metrics", func(w http.ResponseWriter, req *http.Request) {
|
||||
metrics.WritePrometheus(w, true)
|
||||
})
|
||||
|
||||
// ... or push registered metrics every 10 seconds to http://victoria-metrics:8428/api/v1/import/prometheus
|
||||
// with the added `instance="foobar"` label to all the pushed metrics.
|
||||
metrics.InitPush("http://victoria-metrics:8428/api/v1/import/prometheus", 10*time.Second, `instance="foobar"`, true)
|
||||
```
|
||||
|
||||
See [docs](http://godoc.org/github.com/VictoriaMetrics/metrics) for more info.
|
||||
|
@ -86,8 +93,8 @@ Because the `github.com/prometheus/client_golang` is too complex and is hard to
|
|||
#### Why the `metrics.WritePrometheus` doesn't expose documentation for each metric?
|
||||
|
||||
Because this documentation is ignored by Prometheus. The documentation is for users.
|
||||
Just give meaningful names to the exported metrics or add comments in the source code
|
||||
or in other suitable place explaining each metric exposed from your application.
|
||||
Just give [meaningful names to the exported metrics](https://prometheus.io/docs/practices/naming/#metric-names)
|
||||
or add comments in the source code or in other suitable place explaining each metric exposed from your application.
|
||||
|
||||
|
||||
#### How to implement [CounterVec](https://godoc.org/github.com/prometheus/client_golang/prometheus#CounterVec) in `metrics`?
|
||||
|
|
12
vendor/github.com/VictoriaMetrics/metrics/counter.go
generated
vendored
12
vendor/github.com/VictoriaMetrics/metrics/counter.go
generated
vendored
|
@ -11,9 +11,9 @@ import (
|
|||
// name must be valid Prometheus-compatible metric with possible labels.
|
||||
// For instance,
|
||||
//
|
||||
// * foo
|
||||
// * foo{bar="baz"}
|
||||
// * foo{bar="baz",aaa="b"}
|
||||
// - foo
|
||||
// - foo{bar="baz"}
|
||||
// - foo{bar="baz",aaa="b"}
|
||||
//
|
||||
// The returned counter is safe to use from concurrent goroutines.
|
||||
func NewCounter(name string) *Counter {
|
||||
|
@ -65,9 +65,9 @@ func (c *Counter) marshalTo(prefix string, w io.Writer) {
|
|||
// name must be valid Prometheus-compatible metric with possible labels.
|
||||
// For instance,
|
||||
//
|
||||
// * foo
|
||||
// * foo{bar="baz"}
|
||||
// * foo{bar="baz",aaa="b"}
|
||||
// - foo
|
||||
// - foo{bar="baz"}
|
||||
// - foo{bar="baz",aaa="b"}
|
||||
//
|
||||
// The returned counter is safe to use from concurrent goroutines.
|
||||
//
|
||||
|
|
12
vendor/github.com/VictoriaMetrics/metrics/floatcounter.go
generated
vendored
12
vendor/github.com/VictoriaMetrics/metrics/floatcounter.go
generated
vendored
|
@ -11,9 +11,9 @@ import (
|
|||
// name must be valid Prometheus-compatible metric with possible labels.
|
||||
// For instance,
|
||||
//
|
||||
// * foo
|
||||
// * foo{bar="baz"}
|
||||
// * foo{bar="baz",aaa="b"}
|
||||
// - foo
|
||||
// - foo{bar="baz"}
|
||||
// - foo{bar="baz",aaa="b"}
|
||||
//
|
||||
// The returned counter is safe to use from concurrent goroutines.
|
||||
func NewFloatCounter(name string) *FloatCounter {
|
||||
|
@ -70,9 +70,9 @@ func (fc *FloatCounter) marshalTo(prefix string, w io.Writer) {
|
|||
// name must be valid Prometheus-compatible metric with possible labels.
|
||||
// For instance,
|
||||
//
|
||||
// * foo
|
||||
// * foo{bar="baz"}
|
||||
// * foo{bar="baz",aaa="b"}
|
||||
// - foo
|
||||
// - foo{bar="baz"}
|
||||
// - foo{bar="baz",aaa="b"}
|
||||
//
|
||||
// The returned FloatCounter is safe to use from concurrent goroutines.
|
||||
//
|
||||
|
|
12
vendor/github.com/VictoriaMetrics/metrics/gauge.go
generated
vendored
12
vendor/github.com/VictoriaMetrics/metrics/gauge.go
generated
vendored
|
@ -11,9 +11,9 @@ import (
|
|||
// name must be valid Prometheus-compatible metric with possible labels.
|
||||
// For instance,
|
||||
//
|
||||
// * foo
|
||||
// * foo{bar="baz"}
|
||||
// * foo{bar="baz",aaa="b"}
|
||||
// - foo
|
||||
// - foo{bar="baz"}
|
||||
// - foo{bar="baz",aaa="b"}
|
||||
//
|
||||
// f must be safe for concurrent calls.
|
||||
//
|
||||
|
@ -53,9 +53,9 @@ func (g *Gauge) marshalTo(prefix string, w io.Writer) {
|
|||
// name must be valid Prometheus-compatible metric with possible labels.
|
||||
// For instance,
|
||||
//
|
||||
// * foo
|
||||
// * foo{bar="baz"}
|
||||
// * foo{bar="baz",aaa="b"}
|
||||
// - foo
|
||||
// - foo{bar="baz"}
|
||||
// - foo{bar="baz",aaa="b"}
|
||||
//
|
||||
// The returned gauge is safe to use from concurrent goroutines.
|
||||
//
|
||||
|
|
24
vendor/github.com/VictoriaMetrics/metrics/histogram.go
generated
vendored
24
vendor/github.com/VictoriaMetrics/metrics/histogram.go
generated
vendored
|
@ -25,20 +25,20 @@ var bucketMultiplier = math.Pow(10, 1.0/bucketsPerDecimal)
|
|||
// Each bucket contains a counter for values in the given range.
|
||||
// Each non-empty bucket is exposed via the following metric:
|
||||
//
|
||||
// <metric_name>_bucket{<optional_tags>,vmrange="<start>...<end>"} <counter>
|
||||
// <metric_name>_bucket{<optional_tags>,vmrange="<start>...<end>"} <counter>
|
||||
//
|
||||
// Where:
|
||||
//
|
||||
// - <metric_name> is the metric name passed to NewHistogram
|
||||
// - <optional_tags> is optional tags for the <metric_name>, which are passed to NewHistogram
|
||||
// - <start> and <end> - start and end values for the given bucket
|
||||
// - <counter> - the number of hits to the given bucket during Update* calls
|
||||
// - <metric_name> is the metric name passed to NewHistogram
|
||||
// - <optional_tags> is optional tags for the <metric_name>, which are passed to NewHistogram
|
||||
// - <start> and <end> - start and end values for the given bucket
|
||||
// - <counter> - the number of hits to the given bucket during Update* calls
|
||||
//
|
||||
// Histogram buckets can be converted to Prometheus-like buckets with `le` labels
|
||||
// with `prometheus_buckets(<metric_name>_bucket)` function from PromQL extensions in VictoriaMetrics.
|
||||
// (see https://github.com/VictoriaMetrics/VictoriaMetrics/wiki/MetricsQL ):
|
||||
//
|
||||
// prometheus_buckets(request_duration_bucket)
|
||||
// prometheus_buckets(request_duration_bucket)
|
||||
//
|
||||
// Time series produced by the Histogram have better compression ratio comparing to
|
||||
// Prometheus histogram buckets with `le` labels, since they don't include counters
|
||||
|
@ -143,9 +143,9 @@ func (h *Histogram) VisitNonZeroBuckets(f func(vmrange string, count uint64)) {
|
|||
// name must be valid Prometheus-compatible metric with possible labels.
|
||||
// For instance,
|
||||
//
|
||||
// * foo
|
||||
// * foo{bar="baz"}
|
||||
// * foo{bar="baz",aaa="b"}
|
||||
// - foo
|
||||
// - foo{bar="baz"}
|
||||
// - foo{bar="baz",aaa="b"}
|
||||
//
|
||||
// The returned histogram is safe to use from concurrent goroutines.
|
||||
func NewHistogram(name string) *Histogram {
|
||||
|
@ -159,9 +159,9 @@ func NewHistogram(name string) *Histogram {
|
|||
// name must be valid Prometheus-compatible metric with possible labels.
|
||||
// For instance,
|
||||
//
|
||||
// * foo
|
||||
// * foo{bar="baz"}
|
||||
// * foo{bar="baz",aaa="b"}
|
||||
// - foo
|
||||
// - foo{bar="baz"}
|
||||
// - foo{bar="baz",aaa="b"}
|
||||
//
|
||||
// The returned histogram is safe to use from concurrent goroutines.
|
||||
//
|
||||
|
|
187
vendor/github.com/VictoriaMetrics/metrics/metrics.go
generated
vendored
187
vendor/github.com/VictoriaMetrics/metrics/metrics.go
generated
vendored
|
@ -5,20 +5,24 @@
|
|||
//
|
||||
// Usage:
|
||||
//
|
||||
// 1. Register the required metrics via New* functions.
|
||||
// 2. Expose them to `/metrics` page via WritePrometheus.
|
||||
// 3. Update the registered metrics during application lifetime.
|
||||
// 1. Register the required metrics via New* functions.
|
||||
// 2. Expose them to `/metrics` page via WritePrometheus.
|
||||
// 3. Update the registered metrics during application lifetime.
|
||||
//
|
||||
// The package has been extracted from https://victoriametrics.com/
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"io"
|
||||
"sort"
|
||||
"sync"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
type namedMetric struct {
|
||||
name string
|
||||
metric metric
|
||||
isAux bool
|
||||
}
|
||||
|
||||
type metric interface {
|
||||
|
@ -27,19 +31,59 @@ type metric interface {
|
|||
|
||||
var defaultSet = NewSet()
|
||||
|
||||
// WritePrometheus writes all the registered metrics in Prometheus format to w.
|
||||
func init() {
|
||||
RegisterSet(defaultSet)
|
||||
}
|
||||
|
||||
var (
|
||||
registeredSets = make(map[*Set]struct{})
|
||||
registeredSetsLock sync.Mutex
|
||||
)
|
||||
|
||||
// RegisterSet registers the given set s for metrics export via global WritePrometheus() call.
|
||||
//
|
||||
// See also UnregisterSet.
|
||||
func RegisterSet(s *Set) {
|
||||
registeredSetsLock.Lock()
|
||||
registeredSets[s] = struct{}{}
|
||||
registeredSetsLock.Unlock()
|
||||
}
|
||||
|
||||
// UnregisterSet stops exporting metrics for the given s via global WritePrometheus() call.
|
||||
//
|
||||
// Call s.UnregisterAllMetrics() after unregistering s if it is no longer used.
|
||||
func UnregisterSet(s *Set) {
|
||||
registeredSetsLock.Lock()
|
||||
delete(registeredSets, s)
|
||||
registeredSetsLock.Unlock()
|
||||
}
|
||||
|
||||
// WritePrometheus writes all the metrics from default set and all the registered sets in Prometheus format to w.
|
||||
//
|
||||
// Additional sets can be registered via RegisterSet() call.
|
||||
//
|
||||
// If exposeProcessMetrics is true, then various `go_*` and `process_*` metrics
|
||||
// are exposed for the current process.
|
||||
//
|
||||
// The WritePrometheus func is usually called inside "/metrics" handler:
|
||||
//
|
||||
// http.HandleFunc("/metrics", func(w http.ResponseWriter, req *http.Request) {
|
||||
// metrics.WritePrometheus(w, true)
|
||||
// })
|
||||
//
|
||||
// http.HandleFunc("/metrics", func(w http.ResponseWriter, req *http.Request) {
|
||||
// metrics.WritePrometheus(w, true)
|
||||
// })
|
||||
func WritePrometheus(w io.Writer, exposeProcessMetrics bool) {
|
||||
defaultSet.WritePrometheus(w)
|
||||
registeredSetsLock.Lock()
|
||||
sets := make([]*Set, 0, len(registeredSets))
|
||||
for s := range registeredSets {
|
||||
sets = append(sets, s)
|
||||
}
|
||||
registeredSetsLock.Unlock()
|
||||
|
||||
sort.Slice(sets, func(i, j int) bool {
|
||||
return uintptr(unsafe.Pointer(sets[i])) < uintptr(unsafe.Pointer(sets[j]))
|
||||
})
|
||||
for _, s := range sets {
|
||||
s.WritePrometheus(w)
|
||||
}
|
||||
if exposeProcessMetrics {
|
||||
WriteProcessMetrics(w)
|
||||
}
|
||||
|
@ -50,55 +94,87 @@ func WritePrometheus(w io.Writer, exposeProcessMetrics bool) {
|
|||
// The following `go_*` and `process_*` metrics are exposed for the currently
|
||||
// running process. Below is a short description for the exposed `process_*` metrics:
|
||||
//
|
||||
// - process_cpu_seconds_system_total - CPU time spent in syscalls
|
||||
// - process_cpu_seconds_user_total - CPU time spent in userspace
|
||||
// - process_cpu_seconds_total - CPU time spent by the process
|
||||
// - process_major_pagefaults_total - page faults resulted in disk IO
|
||||
// - process_minor_pagefaults_total - page faults resolved without disk IO
|
||||
// - process_resident_memory_bytes - recently accessed memory (aka RSS or resident memory)
|
||||
// - process_resident_memory_peak_bytes - the maximum RSS memory usage
|
||||
// - process_resident_memory_anon_bytes - RSS for memory-mapped files
|
||||
// - process_resident_memory_file_bytes - RSS for memory allocated by the process
|
||||
// - process_resident_memory_shared_bytes - RSS for memory shared between multiple processes
|
||||
// - process_virtual_memory_bytes - virtual memory usage
|
||||
// - process_virtual_memory_peak_bytes - the maximum virtual memory usage
|
||||
// - process_num_threads - the number of threads
|
||||
// - process_start_time_seconds - process start time as unix timestamp
|
||||
// - process_cpu_seconds_system_total - CPU time spent in syscalls
|
||||
//
|
||||
// - process_io_read_bytes_total - the number of bytes read via syscalls
|
||||
// - process_io_written_bytes_total - the number of bytes written via syscalls
|
||||
// - process_io_read_syscalls_total - the number of read syscalls
|
||||
// - process_io_write_syscalls_total - the number of write syscalls
|
||||
// - process_io_storage_read_bytes_total - the number of bytes actually read from disk
|
||||
// - process_io_storage_written_bytes_total - the number of bytes actually written to disk
|
||||
// - process_cpu_seconds_user_total - CPU time spent in userspace
|
||||
//
|
||||
// - go_memstats_alloc_bytes - memory usage for Go objects in the heap
|
||||
// - go_memstats_alloc_bytes_total - the cumulative counter for total size of allocated Go objects
|
||||
// - go_memstats_frees_total - the cumulative counter for number of freed Go objects
|
||||
// - go_memstats_gc_cpu_fraction - the fraction of CPU spent in Go garbage collector
|
||||
// - go_memstats_gc_sys_bytes - the size of Go garbage collector metadata
|
||||
// - go_memstats_heap_alloc_bytes - the same as go_memstats_alloc_bytes
|
||||
// - go_memstats_heap_idle_bytes - idle memory ready for new Go object allocations
|
||||
// - go_memstats_heap_objects - the number of Go objects in the heap
|
||||
// - go_memstats_heap_sys_bytes - memory requested for Go objects from the OS
|
||||
// - go_memstats_mallocs_total - the number of allocations for Go objects
|
||||
// - go_memstats_next_gc_bytes - the target heap size when the next garbage collection should start
|
||||
// - go_memstats_stack_inuse_bytes - memory used for goroutine stacks
|
||||
// - go_memstats_stack_sys_bytes - memory requested fromthe OS for goroutine stacks
|
||||
// - go_memstats_sys_bytes - memory requested by Go runtime from the OS
|
||||
// - process_cpu_seconds_total - CPU time spent by the process
|
||||
//
|
||||
// - process_major_pagefaults_total - page faults resulted in disk IO
|
||||
//
|
||||
// - process_minor_pagefaults_total - page faults resolved without disk IO
|
||||
//
|
||||
// - process_resident_memory_bytes - recently accessed memory (aka RSS or resident memory)
|
||||
//
|
||||
// - process_resident_memory_peak_bytes - the maximum RSS memory usage
|
||||
//
|
||||
// - process_resident_memory_anon_bytes - RSS for memory-mapped files
|
||||
//
|
||||
// - process_resident_memory_file_bytes - RSS for memory allocated by the process
|
||||
//
|
||||
// - process_resident_memory_shared_bytes - RSS for memory shared between multiple processes
|
||||
//
|
||||
// - process_virtual_memory_bytes - virtual memory usage
|
||||
//
|
||||
// - process_virtual_memory_peak_bytes - the maximum virtual memory usage
|
||||
//
|
||||
// - process_num_threads - the number of threads
|
||||
//
|
||||
// - process_start_time_seconds - process start time as unix timestamp
|
||||
//
|
||||
// - process_io_read_bytes_total - the number of bytes read via syscalls
|
||||
//
|
||||
// - process_io_written_bytes_total - the number of bytes written via syscalls
|
||||
//
|
||||
// - process_io_read_syscalls_total - the number of read syscalls
|
||||
//
|
||||
// - process_io_write_syscalls_total - the number of write syscalls
|
||||
//
|
||||
// - process_io_storage_read_bytes_total - the number of bytes actually read from disk
|
||||
//
|
||||
// - process_io_storage_written_bytes_total - the number of bytes actually written to disk
|
||||
//
|
||||
// - go_memstats_alloc_bytes - memory usage for Go objects in the heap
|
||||
//
|
||||
// - go_memstats_alloc_bytes_total - the cumulative counter for total size of allocated Go objects
|
||||
//
|
||||
// - go_memstats_frees_total - the cumulative counter for number of freed Go objects
|
||||
//
|
||||
// - go_memstats_gc_cpu_fraction - the fraction of CPU spent in Go garbage collector
|
||||
//
|
||||
// - go_memstats_gc_sys_bytes - the size of Go garbage collector metadata
|
||||
//
|
||||
// - go_memstats_heap_alloc_bytes - the same as go_memstats_alloc_bytes
|
||||
//
|
||||
// - go_memstats_heap_idle_bytes - idle memory ready for new Go object allocations
|
||||
//
|
||||
// - go_memstats_heap_objects - the number of Go objects in the heap
|
||||
//
|
||||
// - go_memstats_heap_sys_bytes - memory requested for Go objects from the OS
|
||||
//
|
||||
// - go_memstats_mallocs_total - the number of allocations for Go objects
|
||||
//
|
||||
// - go_memstats_next_gc_bytes - the target heap size when the next garbage collection should start
|
||||
//
|
||||
// - go_memstats_stack_inuse_bytes - memory used for goroutine stacks
|
||||
//
|
||||
// - go_memstats_stack_sys_bytes - memory requested fromthe OS for goroutine stacks
|
||||
//
|
||||
// - go_memstats_sys_bytes - memory requested by Go runtime from the OS
|
||||
//
|
||||
// The WriteProcessMetrics func is usually called in combination with writing Set metrics
|
||||
// inside "/metrics" handler:
|
||||
//
|
||||
// http.HandleFunc("/metrics", func(w http.ResponseWriter, req *http.Request) {
|
||||
// mySet.WritePrometheus(w)
|
||||
// metrics.WriteProcessMetrics(w)
|
||||
// })
|
||||
// http.HandleFunc("/metrics", func(w http.ResponseWriter, req *http.Request) {
|
||||
// mySet.WritePrometheus(w)
|
||||
// metrics.WriteProcessMetrics(w)
|
||||
// })
|
||||
//
|
||||
// See also WrteFDMetrics.
|
||||
func WriteProcessMetrics(w io.Writer) {
|
||||
writeGoMetrics(w)
|
||||
writeProcessMetrics(w)
|
||||
writePushMetrics(w)
|
||||
}
|
||||
|
||||
// WriteFDMetrics writes `process_max_fds` and `process_open_fds` metrics to w.
|
||||
|
@ -107,6 +183,23 @@ func WriteFDMetrics(w io.Writer) {
|
|||
}
|
||||
|
||||
// UnregisterMetric removes metric with the given name from default set.
|
||||
//
|
||||
// See also UnregisterAllMetrics.
|
||||
func UnregisterMetric(name string) bool {
|
||||
return defaultSet.UnregisterMetric(name)
|
||||
}
|
||||
|
||||
// UnregisterAllMetrics unregisters all the metrics from default set.
|
||||
func UnregisterAllMetrics() {
|
||||
defaultSet.UnregisterAllMetrics()
|
||||
}
|
||||
|
||||
// ListMetricNames returns sorted list of all the metric names from default set.
|
||||
func ListMetricNames() []string {
|
||||
return defaultSet.ListMetricNames()
|
||||
}
|
||||
|
||||
// GetDefaultSet returns the default metrics set.
|
||||
func GetDefaultSet() *Set {
|
||||
return defaultSet
|
||||
}
|
||||
|
|
18
vendor/github.com/VictoriaMetrics/metrics/process_metrics_linux.go
generated
vendored
18
vendor/github.com/VictoriaMetrics/metrics/process_metrics_linux.go
generated
vendored
|
@ -45,13 +45,13 @@ func writeProcessMetrics(w io.Writer) {
|
|||
statFilepath := "/proc/self/stat"
|
||||
data, err := ioutil.ReadFile(statFilepath)
|
||||
if err != nil {
|
||||
log.Printf("ERROR: cannot open %s: %s", statFilepath, err)
|
||||
log.Printf("ERROR: metrics: cannot open %s: %s", statFilepath, err)
|
||||
return
|
||||
}
|
||||
// Search for the end of command.
|
||||
n := bytes.LastIndex(data, []byte(") "))
|
||||
if n < 0 {
|
||||
log.Printf("ERROR: cannot find command in parentheses in %q read from %s", data, statFilepath)
|
||||
log.Printf("ERROR: metrics: cannot find command in parentheses in %q read from %s", data, statFilepath)
|
||||
return
|
||||
}
|
||||
data = data[n+2:]
|
||||
|
@ -62,7 +62,7 @@ func writeProcessMetrics(w io.Writer) {
|
|||
&p.State, &p.Ppid, &p.Pgrp, &p.Session, &p.TtyNr, &p.Tpgid, &p.Flags, &p.Minflt, &p.Cminflt, &p.Majflt, &p.Cmajflt,
|
||||
&p.Utime, &p.Stime, &p.Cutime, &p.Cstime, &p.Priority, &p.Nice, &p.NumThreads, &p.ItrealValue, &p.Starttime, &p.Vsize, &p.Rss)
|
||||
if err != nil {
|
||||
log.Printf("ERROR: cannot parse %q read from %s: %s", data, statFilepath, err)
|
||||
log.Printf("ERROR: metrics: cannot parse %q read from %s: %s", data, statFilepath, err)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -89,17 +89,17 @@ func writeIOMetrics(w io.Writer) {
|
|||
ioFilepath := "/proc/self/io"
|
||||
data, err := ioutil.ReadFile(ioFilepath)
|
||||
if err != nil {
|
||||
log.Printf("ERROR: cannot open %q: %s", ioFilepath, err)
|
||||
log.Printf("ERROR: metrics: cannot open %q: %s", ioFilepath, err)
|
||||
}
|
||||
getInt := func(s string) int64 {
|
||||
n := strings.IndexByte(s, ' ')
|
||||
if n < 0 {
|
||||
log.Printf("ERROR: cannot find whitespace in %q at %q", s, ioFilepath)
|
||||
log.Printf("ERROR: metrics: cannot find whitespace in %q at %q", s, ioFilepath)
|
||||
return 0
|
||||
}
|
||||
v, err := strconv.ParseInt(s[n+1:], 10, 64)
|
||||
if err != nil {
|
||||
log.Printf("ERROR: cannot parse %q at %q: %s", s, ioFilepath, err)
|
||||
log.Printf("ERROR: metrics: cannot parse %q at %q: %s", s, ioFilepath, err)
|
||||
return 0
|
||||
}
|
||||
return v
|
||||
|
@ -137,12 +137,12 @@ var startTimeSeconds = time.Now().Unix()
|
|||
func writeFDMetrics(w io.Writer) {
|
||||
totalOpenFDs, err := getOpenFDsCount("/proc/self/fd")
|
||||
if err != nil {
|
||||
log.Printf("ERROR: cannot determine open file descriptors count: %s", err)
|
||||
log.Printf("ERROR: metrics: cannot determine open file descriptors count: %s", err)
|
||||
return
|
||||
}
|
||||
maxOpenFDs, err := getMaxFilesLimit("/proc/self/limits")
|
||||
if err != nil {
|
||||
log.Printf("ERROR: cannot determine the limit on open file descritors: %s", err)
|
||||
log.Printf("ERROR: metrics: cannot determine the limit on open file descritors: %s", err)
|
||||
return
|
||||
}
|
||||
fmt.Fprintf(w, "process_max_fds %d\n", maxOpenFDs)
|
||||
|
@ -211,7 +211,7 @@ type memStats struct {
|
|||
func writeProcessMemMetrics(w io.Writer) {
|
||||
ms, err := getMemStats("/proc/self/status")
|
||||
if err != nil {
|
||||
log.Printf("ERROR: cannot determine memory status: %s", err)
|
||||
log.Printf("ERROR: metrics: cannot determine memory status: %s", err)
|
||||
return
|
||||
}
|
||||
fmt.Fprintf(w, "process_virtual_memory_peak_bytes %d\n", ms.vmPeak)
|
||||
|
|
1
vendor/github.com/VictoriaMetrics/metrics/process_metrics_other.go
generated
vendored
1
vendor/github.com/VictoriaMetrics/metrics/process_metrics_other.go
generated
vendored
|
@ -1,3 +1,4 @@
|
|||
//go:build !linux
|
||||
// +build !linux
|
||||
|
||||
package metrics
|
||||
|
|
227
vendor/github.com/VictoriaMetrics/metrics/push.go
generated
vendored
Normal file
227
vendor/github.com/VictoriaMetrics/metrics/push.go
generated
vendored
Normal file
|
@ -0,0 +1,227 @@
|
|||
package metrics
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"time"
|
||||
|
||||
"compress/gzip"
|
||||
)
|
||||
|
||||
// InitPushProcessMetrics sets up periodic push for 'process_*' metrics to the given pushURL with the given interval.
|
||||
//
|
||||
// extraLabels may contain comma-separated list of `label="value"` labels, which will be added
|
||||
// to all the metrics before pushing them to pushURL.
|
||||
//
|
||||
// The metrics are pushed to pushURL in Prometheus text exposition format.
|
||||
// See https://github.com/prometheus/docs/blob/main/content/docs/instrumenting/exposition_formats.md#text-based-format
|
||||
//
|
||||
// It is recommended pushing metrics to /api/v1/import/prometheus endpoint according to
|
||||
// https://docs.victoriametrics.com/#how-to-import-data-in-prometheus-exposition-format
|
||||
//
|
||||
// It is OK calling InitPushProcessMetrics multiple times with different pushURL -
|
||||
// in this case metrics are pushed to all the provided pushURL urls.
|
||||
func InitPushProcessMetrics(pushURL string, interval time.Duration, extraLabels string) error {
|
||||
writeMetrics := func(w io.Writer) {
|
||||
WriteProcessMetrics(w)
|
||||
}
|
||||
return InitPushExt(pushURL, interval, extraLabels, writeMetrics)
|
||||
}
|
||||
|
||||
// InitPush sets up periodic push for globally registered metrics to the given pushURL with the given interval.
|
||||
//
|
||||
// extraLabels may contain comma-separated list of `label="value"` labels, which will be added
|
||||
// to all the metrics before pushing them to pushURL.
|
||||
//
|
||||
// If pushProcessMetrics is set to true, then 'process_*' metrics are also pushed to pushURL.
|
||||
//
|
||||
// The metrics are pushed to pushURL in Prometheus text exposition format.
|
||||
// See https://github.com/prometheus/docs/blob/main/content/docs/instrumenting/exposition_formats.md#text-based-format
|
||||
//
|
||||
// It is recommended pushing metrics to /api/v1/import/prometheus endpoint according to
|
||||
// https://docs.victoriametrics.com/#how-to-import-data-in-prometheus-exposition-format
|
||||
//
|
||||
// It is OK calling InitPush multiple times with different pushURL -
|
||||
// in this case metrics are pushed to all the provided pushURL urls.
|
||||
func InitPush(pushURL string, interval time.Duration, extraLabels string, pushProcessMetrics bool) error {
|
||||
writeMetrics := func(w io.Writer) {
|
||||
WritePrometheus(w, pushProcessMetrics)
|
||||
}
|
||||
return InitPushExt(pushURL, interval, extraLabels, writeMetrics)
|
||||
}
|
||||
|
||||
// InitPush sets up periodic push for metrics from s to the given pushURL with the given interval.
|
||||
//
|
||||
// extraLabels may contain comma-separated list of `label="value"` labels, which will be added
|
||||
// to all the metrics before pushing them to pushURL.
|
||||
//
|
||||
// / The metrics are pushed to pushURL in Prometheus text exposition format.
|
||||
// See https://github.com/prometheus/docs/blob/main/content/docs/instrumenting/exposition_formats.md#text-based-format
|
||||
//
|
||||
// It is recommended pushing metrics to /api/v1/import/prometheus endpoint according to
|
||||
// https://docs.victoriametrics.com/#how-to-import-data-in-prometheus-exposition-format
|
||||
//
|
||||
// It is OK calling InitPush multiple times with different pushURL -
|
||||
// in this case metrics are pushed to all the provided pushURL urls.
|
||||
func (s *Set) InitPush(pushURL string, interval time.Duration, extraLabels string) error {
|
||||
writeMetrics := func(w io.Writer) {
|
||||
s.WritePrometheus(w)
|
||||
}
|
||||
return InitPushExt(pushURL, interval, extraLabels, writeMetrics)
|
||||
}
|
||||
|
||||
// InitPushExt sets up periodic push for metrics obtained by calling writeMetrics with the given interval.
|
||||
//
|
||||
// extraLabels may contain comma-separated list of `label="value"` labels, which will be added
|
||||
// to all the metrics before pushing them to pushURL.
|
||||
//
|
||||
// The writeMetrics callback must write metrics to w in Prometheus text exposition format without timestamps and trailing comments.
|
||||
// See https://github.com/prometheus/docs/blob/main/content/docs/instrumenting/exposition_formats.md#text-based-format
|
||||
//
|
||||
// It is recommended pushing metrics to /api/v1/import/prometheus endpoint according to
|
||||
// https://docs.victoriametrics.com/#how-to-import-data-in-prometheus-exposition-format
|
||||
//
|
||||
// It is OK calling InitPushExt multiple times with different pushURL -
|
||||
// in this case metrics are pushed to all the provided pushURL urls.
|
||||
//
|
||||
// It is OK calling InitPushExt multiple times with different writeMetrics -
|
||||
// in this case all the metrics generated by writeMetrics callbacks are writte to pushURL.
|
||||
func InitPushExt(pushURL string, interval time.Duration, extraLabels string, writeMetrics func(w io.Writer)) error {
|
||||
if interval <= 0 {
|
||||
return fmt.Errorf("interval must be positive; got %s", interval)
|
||||
}
|
||||
if err := validateTags(extraLabels); err != nil {
|
||||
return fmt.Errorf("invalid extraLabels=%q: %w", extraLabels, err)
|
||||
}
|
||||
pu, err := url.Parse(pushURL)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot parse pushURL=%q: %w", pushURL, err)
|
||||
}
|
||||
if pu.Scheme != "http" && pu.Scheme != "https" {
|
||||
return fmt.Errorf("unsupported scheme in pushURL=%q; expecting 'http' or 'https'", pushURL)
|
||||
}
|
||||
if pu.Host == "" {
|
||||
return fmt.Errorf("missing host in pushURL=%q", pushURL)
|
||||
}
|
||||
pushURLRedacted := pu.Redacted()
|
||||
c := &http.Client{
|
||||
Timeout: interval,
|
||||
}
|
||||
pushesTotal := pushMetrics.GetOrCreateCounter(fmt.Sprintf(`metrics_push_total{url=%q}`, pushURLRedacted))
|
||||
pushErrorsTotal := pushMetrics.GetOrCreateCounter(fmt.Sprintf(`metrics_push_errors_total{url=%q}`, pushURLRedacted))
|
||||
bytesPushedTotal := pushMetrics.GetOrCreateCounter(fmt.Sprintf(`metrics_push_bytes_pushed_total{url=%q}`, pushURLRedacted))
|
||||
pushDuration := pushMetrics.GetOrCreateHistogram(fmt.Sprintf(`metrics_push_duration_seconds{url=%q}`, pushURLRedacted))
|
||||
pushBlockSize := pushMetrics.GetOrCreateHistogram(fmt.Sprintf(`metrics_push_block_size_bytes{url=%q}`, pushURLRedacted))
|
||||
pushMetrics.GetOrCreateFloatCounter(fmt.Sprintf(`metrics_push_interval_seconds{url=%q}`, pushURLRedacted)).Set(interval.Seconds())
|
||||
go func() {
|
||||
ticker := time.NewTicker(interval)
|
||||
var bb bytes.Buffer
|
||||
var tmpBuf []byte
|
||||
zw := gzip.NewWriter(&bb)
|
||||
for range ticker.C {
|
||||
bb.Reset()
|
||||
writeMetrics(&bb)
|
||||
if len(extraLabels) > 0 {
|
||||
tmpBuf = addExtraLabels(tmpBuf[:0], bb.Bytes(), extraLabels)
|
||||
bb.Reset()
|
||||
if _, err := bb.Write(tmpBuf); err != nil {
|
||||
panic(fmt.Errorf("BUG: cannot write %d bytes to bytes.Buffer: %s", len(tmpBuf), err))
|
||||
}
|
||||
}
|
||||
tmpBuf = append(tmpBuf[:0], bb.Bytes()...)
|
||||
bb.Reset()
|
||||
zw.Reset(&bb)
|
||||
if _, err := zw.Write(tmpBuf); err != nil {
|
||||
panic(fmt.Errorf("BUG: cannot write %d bytes to gzip writer: %s", len(tmpBuf), err))
|
||||
}
|
||||
if err := zw.Close(); err != nil {
|
||||
panic(fmt.Errorf("BUG: cannot flush metrics to gzip writer: %s", err))
|
||||
}
|
||||
pushesTotal.Inc()
|
||||
blockLen := bb.Len()
|
||||
bytesPushedTotal.Add(blockLen)
|
||||
pushBlockSize.Update(float64(blockLen))
|
||||
req, err := http.NewRequest("GET", pushURL, &bb)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("BUG: metrics.push: cannot initialize request for metrics push to %q: %w", pushURLRedacted, err))
|
||||
}
|
||||
req.Header.Set("Content-Type", "text/plain")
|
||||
req.Header.Set("Content-Encoding", "gzip")
|
||||
startTime := time.Now()
|
||||
resp, err := c.Do(req)
|
||||
pushDuration.UpdateDuration(startTime)
|
||||
if err != nil {
|
||||
log.Printf("ERROR: metrics.push: cannot push metrics to %q: %s", pushURLRedacted, err)
|
||||
pushErrorsTotal.Inc()
|
||||
continue
|
||||
}
|
||||
if resp.StatusCode/100 != 2 {
|
||||
body, _ := ioutil.ReadAll(resp.Body)
|
||||
_ = resp.Body.Close()
|
||||
log.Printf("ERROR: metrics.push: unexpected status code in response from %q: %d; expecting 2xx; response body: %q",
|
||||
pushURLRedacted, resp.StatusCode, body)
|
||||
pushErrorsTotal.Inc()
|
||||
continue
|
||||
}
|
||||
_ = resp.Body.Close()
|
||||
}
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
|
||||
var pushMetrics = NewSet()
|
||||
|
||||
func writePushMetrics(w io.Writer) {
|
||||
pushMetrics.WritePrometheus(w)
|
||||
}
|
||||
|
||||
func addExtraLabels(dst, src []byte, extraLabels string) []byte {
|
||||
for len(src) > 0 {
|
||||
var line []byte
|
||||
n := bytes.IndexByte(src, '\n')
|
||||
if n >= 0 {
|
||||
line = src[:n]
|
||||
src = src[n+1:]
|
||||
} else {
|
||||
line = src
|
||||
src = nil
|
||||
}
|
||||
line = bytes.TrimSpace(line)
|
||||
if len(line) == 0 {
|
||||
// Skip empy lines
|
||||
continue
|
||||
}
|
||||
if bytes.HasPrefix(line, bashBytes) {
|
||||
// Copy comments as is
|
||||
dst = append(dst, line...)
|
||||
dst = append(dst, '\n')
|
||||
continue
|
||||
}
|
||||
n = bytes.IndexByte(line, '{')
|
||||
if n >= 0 {
|
||||
dst = append(dst, line[:n+1]...)
|
||||
dst = append(dst, extraLabels...)
|
||||
dst = append(dst, ',')
|
||||
dst = append(dst, line[n+1:]...)
|
||||
} else {
|
||||
n = bytes.LastIndexByte(line, ' ')
|
||||
if n < 0 {
|
||||
panic(fmt.Errorf("BUG: missing whitespace between metric name and metric value in Prometheus text exposition line %q", line))
|
||||
}
|
||||
dst = append(dst, line[:n]...)
|
||||
dst = append(dst, '{')
|
||||
dst = append(dst, extraLabels...)
|
||||
dst = append(dst, '}')
|
||||
dst = append(dst, line[n:]...)
|
||||
}
|
||||
dst = append(dst, '\n')
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
var bashBytes = []byte("#")
|
125
vendor/github.com/VictoriaMetrics/metrics/set.go
generated
vendored
125
vendor/github.com/VictoriaMetrics/metrics/set.go
generated
vendored
|
@ -22,6 +22,8 @@ type Set struct {
|
|||
}
|
||||
|
||||
// NewSet creates new set of metrics.
|
||||
//
|
||||
// Pass the set to RegisterSet() function in order to export its metrics via global WritePrometheus() call.
|
||||
func NewSet() *Set {
|
||||
return &Set{
|
||||
m: make(map[string]*namedMetric),
|
||||
|
@ -58,9 +60,9 @@ func (s *Set) WritePrometheus(w io.Writer) {
|
|||
// name must be valid Prometheus-compatible metric with possible labels.
|
||||
// For instance,
|
||||
//
|
||||
// * foo
|
||||
// * foo{bar="baz"}
|
||||
// * foo{bar="baz",aaa="b"}
|
||||
// - foo
|
||||
// - foo{bar="baz"}
|
||||
// - foo{bar="baz",aaa="b"}
|
||||
//
|
||||
// The returned histogram is safe to use from concurrent goroutines.
|
||||
func (s *Set) NewHistogram(name string) *Histogram {
|
||||
|
@ -75,9 +77,9 @@ func (s *Set) NewHistogram(name string) *Histogram {
|
|||
// name must be valid Prometheus-compatible metric with possible labels.
|
||||
// For instance,
|
||||
//
|
||||
// * foo
|
||||
// * foo{bar="baz"}
|
||||
// * foo{bar="baz",aaa="b"}
|
||||
// - foo
|
||||
// - foo{bar="baz"}
|
||||
// - foo{bar="baz",aaa="b"}
|
||||
//
|
||||
// The returned histogram is safe to use from concurrent goroutines.
|
||||
//
|
||||
|
@ -116,9 +118,9 @@ func (s *Set) GetOrCreateHistogram(name string) *Histogram {
|
|||
// name must be valid Prometheus-compatible metric with possible labels.
|
||||
// For instance,
|
||||
//
|
||||
// * foo
|
||||
// * foo{bar="baz"}
|
||||
// * foo{bar="baz",aaa="b"}
|
||||
// - foo
|
||||
// - foo{bar="baz"}
|
||||
// - foo{bar="baz",aaa="b"}
|
||||
//
|
||||
// The returned counter is safe to use from concurrent goroutines.
|
||||
func (s *Set) NewCounter(name string) *Counter {
|
||||
|
@ -133,9 +135,9 @@ func (s *Set) NewCounter(name string) *Counter {
|
|||
// name must be valid Prometheus-compatible metric with possible labels.
|
||||
// For instance,
|
||||
//
|
||||
// * foo
|
||||
// * foo{bar="baz"}
|
||||
// * foo{bar="baz",aaa="b"}
|
||||
// - foo
|
||||
// - foo{bar="baz"}
|
||||
// - foo{bar="baz",aaa="b"}
|
||||
//
|
||||
// The returned counter is safe to use from concurrent goroutines.
|
||||
//
|
||||
|
@ -174,9 +176,9 @@ func (s *Set) GetOrCreateCounter(name string) *Counter {
|
|||
// name must be valid Prometheus-compatible metric with possible labels.
|
||||
// For instance,
|
||||
//
|
||||
// * foo
|
||||
// * foo{bar="baz"}
|
||||
// * foo{bar="baz",aaa="b"}
|
||||
// - foo
|
||||
// - foo{bar="baz"}
|
||||
// - foo{bar="baz",aaa="b"}
|
||||
//
|
||||
// The returned FloatCounter is safe to use from concurrent goroutines.
|
||||
func (s *Set) NewFloatCounter(name string) *FloatCounter {
|
||||
|
@ -191,9 +193,9 @@ func (s *Set) NewFloatCounter(name string) *FloatCounter {
|
|||
// name must be valid Prometheus-compatible metric with possible labels.
|
||||
// For instance,
|
||||
//
|
||||
// * foo
|
||||
// * foo{bar="baz"}
|
||||
// * foo{bar="baz",aaa="b"}
|
||||
// - foo
|
||||
// - foo{bar="baz"}
|
||||
// - foo{bar="baz",aaa="b"}
|
||||
//
|
||||
// The returned FloatCounter is safe to use from concurrent goroutines.
|
||||
//
|
||||
|
@ -233,9 +235,9 @@ func (s *Set) GetOrCreateFloatCounter(name string) *FloatCounter {
|
|||
// name must be valid Prometheus-compatible metric with possible labels.
|
||||
// For instance,
|
||||
//
|
||||
// * foo
|
||||
// * foo{bar="baz"}
|
||||
// * foo{bar="baz",aaa="b"}
|
||||
// - foo
|
||||
// - foo{bar="baz"}
|
||||
// - foo{bar="baz",aaa="b"}
|
||||
//
|
||||
// f must be safe for concurrent calls.
|
||||
//
|
||||
|
@ -257,9 +259,9 @@ func (s *Set) NewGauge(name string, f func() float64) *Gauge {
|
|||
// name must be valid Prometheus-compatible metric with possible labels.
|
||||
// For instance,
|
||||
//
|
||||
// * foo
|
||||
// * foo{bar="baz"}
|
||||
// * foo{bar="baz",aaa="b"}
|
||||
// - foo
|
||||
// - foo{bar="baz"}
|
||||
// - foo{bar="baz",aaa="b"}
|
||||
//
|
||||
// The returned gauge is safe to use from concurrent goroutines.
|
||||
//
|
||||
|
@ -303,9 +305,9 @@ func (s *Set) GetOrCreateGauge(name string, f func() float64) *Gauge {
|
|||
// name must be valid Prometheus-compatible metric with possible labels.
|
||||
// For instance,
|
||||
//
|
||||
// * foo
|
||||
// * foo{bar="baz"}
|
||||
// * foo{bar="baz",aaa="b"}
|
||||
// - foo
|
||||
// - foo{bar="baz"}
|
||||
// - foo{bar="baz",aaa="b"}
|
||||
//
|
||||
// The returned summary is safe to use from concurrent goroutines.
|
||||
func (s *Set) NewSummary(name string) *Summary {
|
||||
|
@ -318,9 +320,9 @@ func (s *Set) NewSummary(name string) *Summary {
|
|||
// name must be valid Prometheus-compatible metric with possible labels.
|
||||
// For instance,
|
||||
//
|
||||
// * foo
|
||||
// * foo{bar="baz"}
|
||||
// * foo{bar="baz",aaa="b"}
|
||||
// - foo
|
||||
// - foo{bar="baz"}
|
||||
// - foo{bar="baz",aaa="b"}
|
||||
//
|
||||
// The returned summary is safe to use from concurrent goroutines.
|
||||
func (s *Set) NewSummaryExt(name string, window time.Duration, quantiles []float64) *Summary {
|
||||
|
@ -334,7 +336,7 @@ func (s *Set) NewSummaryExt(name string, window time.Duration, quantiles []float
|
|||
// checks in tests
|
||||
defer s.mu.Unlock()
|
||||
|
||||
s.mustRegisterLocked(name, sm)
|
||||
s.mustRegisterLocked(name, sm, false)
|
||||
registerSummaryLocked(sm)
|
||||
s.registerSummaryQuantilesLocked(name, sm)
|
||||
s.summaries = append(s.summaries, sm)
|
||||
|
@ -347,9 +349,9 @@ func (s *Set) NewSummaryExt(name string, window time.Duration, quantiles []float
|
|||
// name must be valid Prometheus-compatible metric with possible labels.
|
||||
// For instance,
|
||||
//
|
||||
// * foo
|
||||
// * foo{bar="baz"}
|
||||
// * foo{bar="baz",aaa="b"}
|
||||
// - foo
|
||||
// - foo{bar="baz"}
|
||||
// - foo{bar="baz",aaa="b"}
|
||||
//
|
||||
// The returned summary is safe to use from concurrent goroutines.
|
||||
//
|
||||
|
@ -365,9 +367,9 @@ func (s *Set) GetOrCreateSummary(name string) *Summary {
|
|||
// name must be valid Prometheus-compatible metric with possible labels.
|
||||
// For instance,
|
||||
//
|
||||
// * foo
|
||||
// * foo{bar="baz"}
|
||||
// * foo{bar="baz",aaa="b"}
|
||||
// - foo
|
||||
// - foo{bar="baz"}
|
||||
// - foo{bar="baz",aaa="b"}
|
||||
//
|
||||
// The returned summary is safe to use from concurrent goroutines.
|
||||
//
|
||||
|
@ -418,7 +420,7 @@ func (s *Set) registerSummaryQuantilesLocked(name string, sm *Summary) {
|
|||
sm: sm,
|
||||
idx: i,
|
||||
}
|
||||
s.mustRegisterLocked(quantileValueName, qv)
|
||||
s.mustRegisterLocked(quantileValueName, qv, true)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -430,18 +432,19 @@ func (s *Set) registerMetric(name string, m metric) {
|
|||
// defer will unlock in case of panic
|
||||
// checks in test
|
||||
defer s.mu.Unlock()
|
||||
s.mustRegisterLocked(name, m)
|
||||
s.mustRegisterLocked(name, m, false)
|
||||
}
|
||||
|
||||
// mustRegisterLocked registers given metric with
|
||||
// the given name. Panics if the given name was
|
||||
// already registered before.
|
||||
func (s *Set) mustRegisterLocked(name string, m metric) {
|
||||
// mustRegisterLocked registers given metric with the given name.
|
||||
//
|
||||
// Panics if the given name was already registered before.
|
||||
func (s *Set) mustRegisterLocked(name string, m metric, isAux bool) {
|
||||
nm, ok := s.m[name]
|
||||
if !ok {
|
||||
nm = &namedMetric{
|
||||
name: name,
|
||||
metric: m,
|
||||
isAux: isAux,
|
||||
}
|
||||
s.m[name] = nm
|
||||
s.a = append(s.a, nm)
|
||||
|
@ -463,8 +466,16 @@ func (s *Set) UnregisterMetric(name string) bool {
|
|||
if !ok {
|
||||
return false
|
||||
}
|
||||
m := nm.metric
|
||||
if nm.isAux {
|
||||
// Do not allow deleting auxiliary metrics such as summary_metric{quantile="..."}
|
||||
// Such metrics must be deleted via parent metric name, e.g. summary_metric .
|
||||
return false
|
||||
}
|
||||
return s.unregisterMetricLocked(nm)
|
||||
}
|
||||
|
||||
func (s *Set) unregisterMetricLocked(nm *namedMetric) bool {
|
||||
name := nm.name
|
||||
delete(s.m, name)
|
||||
|
||||
deleteFromList := func(metricName string) {
|
||||
|
@ -480,9 +491,9 @@ func (s *Set) UnregisterMetric(name string) bool {
|
|||
// remove metric from s.a
|
||||
deleteFromList(name)
|
||||
|
||||
sm, ok := m.(*Summary)
|
||||
sm, ok := nm.metric.(*Summary)
|
||||
if !ok {
|
||||
// There is no need in cleaning up summary.
|
||||
// There is no need in cleaning up non-summary metrics.
|
||||
return true
|
||||
}
|
||||
|
||||
|
@ -509,13 +520,25 @@ func (s *Set) UnregisterMetric(name string) bool {
|
|||
return true
|
||||
}
|
||||
|
||||
// ListMetricNames returns a list of all the metrics in s.
|
||||
// UnregisterAllMetrics de-registers all metrics registered in s.
|
||||
func (s *Set) UnregisterAllMetrics() {
|
||||
metricNames := s.ListMetricNames()
|
||||
for _, name := range metricNames {
|
||||
s.UnregisterMetric(name)
|
||||
}
|
||||
}
|
||||
|
||||
// ListMetricNames returns sorted list of all the metrics in s.
|
||||
func (s *Set) ListMetricNames() []string {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
var list []string
|
||||
for name := range s.m {
|
||||
list = append(list, name)
|
||||
metricNames := make([]string, 0, len(s.m))
|
||||
for _, nm := range s.m {
|
||||
if nm.isAux {
|
||||
continue
|
||||
}
|
||||
metricNames = append(metricNames, nm.name)
|
||||
}
|
||||
return list
|
||||
sort.Strings(metricNames)
|
||||
return metricNames
|
||||
}
|
||||
|
|
24
vendor/github.com/VictoriaMetrics/metrics/summary.go
generated
vendored
24
vendor/github.com/VictoriaMetrics/metrics/summary.go
generated
vendored
|
@ -36,9 +36,9 @@ type Summary struct {
|
|||
// name must be valid Prometheus-compatible metric with possible labels.
|
||||
// For instance,
|
||||
//
|
||||
// * foo
|
||||
// * foo{bar="baz"}
|
||||
// * foo{bar="baz",aaa="b"}
|
||||
// - foo
|
||||
// - foo{bar="baz"}
|
||||
// - foo{bar="baz",aaa="b"}
|
||||
//
|
||||
// The returned summary is safe to use from concurrent goroutines.
|
||||
func NewSummary(name string) *Summary {
|
||||
|
@ -51,9 +51,9 @@ func NewSummary(name string) *Summary {
|
|||
// name must be valid Prometheus-compatible metric with possible labels.
|
||||
// For instance,
|
||||
//
|
||||
// * foo
|
||||
// * foo{bar="baz"}
|
||||
// * foo{bar="baz",aaa="b"}
|
||||
// - foo
|
||||
// - foo{bar="baz"}
|
||||
// - foo{bar="baz",aaa="b"}
|
||||
//
|
||||
// The returned summary is safe to use from concurrent goroutines.
|
||||
func NewSummaryExt(name string, window time.Duration, quantiles []float64) *Summary {
|
||||
|
@ -140,9 +140,9 @@ func (sm *Summary) updateQuantiles() {
|
|||
// name must be valid Prometheus-compatible metric with possible labels.
|
||||
// For instance,
|
||||
//
|
||||
// * foo
|
||||
// * foo{bar="baz"}
|
||||
// * foo{bar="baz",aaa="b"}
|
||||
// - foo
|
||||
// - foo{bar="baz"}
|
||||
// - foo{bar="baz",aaa="b"}
|
||||
//
|
||||
// The returned summary is safe to use from concurrent goroutines.
|
||||
//
|
||||
|
@ -158,9 +158,9 @@ func GetOrCreateSummary(name string) *Summary {
|
|||
// name must be valid Prometheus-compatible metric with possible labels.
|
||||
// For instance,
|
||||
//
|
||||
// * foo
|
||||
// * foo{bar="baz"}
|
||||
// * foo{bar="baz",aaa="b"}
|
||||
// - foo
|
||||
// - foo{bar="baz"}
|
||||
// - foo{bar="baz",aaa="b"}
|
||||
//
|
||||
// The returned summary is safe to use from concurrent goroutines.
|
||||
//
|
||||
|
|
13
vendor/github.com/VictoriaMetrics/metricsql/doc.go
generated
vendored
13
vendor/github.com/VictoriaMetrics/metricsql/doc.go
generated
vendored
|
@ -5,11 +5,10 @@
|
|||
//
|
||||
// Usage:
|
||||
//
|
||||
// expr, err := metricsql.Parse(`sum(rate(foo{bar="baz"}[5m])) by (job)`)
|
||||
// if err != nil {
|
||||
// // parse error
|
||||
// }
|
||||
// // Now expr contains parsed MetricsQL as `*Expr` structs.
|
||||
// // See Parse examples for more details.
|
||||
//
|
||||
// expr, err := metricsql.Parse(`sum(rate(foo{bar="baz"}[5m])) by (job)`)
|
||||
// if err != nil {
|
||||
// // parse error
|
||||
// }
|
||||
// // Now expr contains parsed MetricsQL as `*Expr` structs.
|
||||
// // See Parse examples for more details.
|
||||
package metricsql
|
||||
|
|
6
vendor/github.com/VictoriaMetrics/metricsql/optimizer.go
generated
vendored
6
vendor/github.com/VictoriaMetrics/metricsql/optimizer.go
generated
vendored
|
@ -10,9 +10,9 @@ import (
|
|||
//
|
||||
// It performs the following optimizations:
|
||||
//
|
||||
// - Adds missing filters to `foo{filters1} op bar{filters2}`
|
||||
// according to https://utcc.utoronto.ca/~cks/space/blog/sysadmin/PrometheusLabelNonOptimization
|
||||
// I.e. such query is converted to `foo{filters1, filters2} op bar{filters1, filters2}`
|
||||
// - Adds missing filters to `foo{filters1} op bar{filters2}`
|
||||
// according to https://utcc.utoronto.ca/~cks/space/blog/sysadmin/PrometheusLabelNonOptimization
|
||||
// I.e. such query is converted to `foo{filters1, filters2} op bar{filters1, filters2}`
|
||||
func Optimize(e Expr) Expr {
|
||||
if !canOptimize(e) {
|
||||
return e
|
||||
|
|
188
vendor/github.com/VictoriaMetrics/metricsql/transform.go
generated
vendored
188
vendor/github.com/VictoriaMetrics/metricsql/transform.go
generated
vendored
|
@ -5,99 +5,101 @@ import (
|
|||
)
|
||||
|
||||
var transformFuncs = map[string]bool{
|
||||
"": true, // empty func is a synonym to union
|
||||
"abs": true,
|
||||
"absent": true,
|
||||
"acos": true,
|
||||
"acosh": true,
|
||||
"asin": true,
|
||||
"asinh": true,
|
||||
"atan": true,
|
||||
"atanh": true,
|
||||
"bitmap_and": true,
|
||||
"bitmap_or": true,
|
||||
"bitmap_xor": true,
|
||||
"buckets_limit": true,
|
||||
"ceil": true,
|
||||
"clamp": true,
|
||||
"clamp_max": true,
|
||||
"clamp_min": true,
|
||||
"cos": true,
|
||||
"cosh": true,
|
||||
"day_of_month": true,
|
||||
"day_of_week": true,
|
||||
"days_in_month": true,
|
||||
"deg": true,
|
||||
"drop_common_labels": true,
|
||||
"end": true,
|
||||
"exp": true,
|
||||
"floor": true,
|
||||
"histogram_avg": true,
|
||||
"histogram_quantile": true,
|
||||
"histogram_quantiles": true,
|
||||
"histogram_share": true,
|
||||
"histogram_stddev": true,
|
||||
"histogram_stdvar": true,
|
||||
"hour": true,
|
||||
"interpolate": true,
|
||||
"keep_last_value": true,
|
||||
"keep_next_value": true,
|
||||
"label_copy": true,
|
||||
"label_del": true,
|
||||
"label_graphite_group": true,
|
||||
"label_join": true,
|
||||
"label_keep": true,
|
||||
"label_lowercase": true,
|
||||
"label_map": true,
|
||||
"label_match": true,
|
||||
"label_mismatch": true,
|
||||
"label_move": true,
|
||||
"label_replace": true,
|
||||
"label_set": true,
|
||||
"label_transform": true,
|
||||
"label_uppercase": true,
|
||||
"label_value": true,
|
||||
"limit_offset": true,
|
||||
"ln": true,
|
||||
"log2": true,
|
||||
"log10": true,
|
||||
"minute": true,
|
||||
"month": true,
|
||||
"now": true,
|
||||
"pi": true,
|
||||
"prometheus_buckets": true,
|
||||
"rad": true,
|
||||
"rand": true,
|
||||
"rand_exponential": true,
|
||||
"rand_normal": true,
|
||||
"range_avg": true,
|
||||
"range_first": true,
|
||||
"range_last": true,
|
||||
"range_max": true,
|
||||
"range_min": true,
|
||||
"range_quantile": true,
|
||||
"range_sum": true,
|
||||
"remove_resets": true,
|
||||
"round": true,
|
||||
"running_avg": true,
|
||||
"running_max": true,
|
||||
"running_min": true,
|
||||
"running_sum": true,
|
||||
"scalar": true,
|
||||
"sgn": true,
|
||||
"sin": true,
|
||||
"sinh": true,
|
||||
"smooth_exponential": true,
|
||||
"sort": true,
|
||||
"sort_by_label": true,
|
||||
"sort_by_label_desc": true,
|
||||
"sort_desc": true,
|
||||
"sqrt": true,
|
||||
"start": true,
|
||||
"step": true,
|
||||
"tan": true,
|
||||
"tanh": true,
|
||||
"time": true,
|
||||
"": true, // empty func is a synonym to union
|
||||
"abs": true,
|
||||
"absent": true,
|
||||
"acos": true,
|
||||
"acosh": true,
|
||||
"asin": true,
|
||||
"asinh": true,
|
||||
"atan": true,
|
||||
"atanh": true,
|
||||
"bitmap_and": true,
|
||||
"bitmap_or": true,
|
||||
"bitmap_xor": true,
|
||||
"buckets_limit": true,
|
||||
"ceil": true,
|
||||
"clamp": true,
|
||||
"clamp_max": true,
|
||||
"clamp_min": true,
|
||||
"cos": true,
|
||||
"cosh": true,
|
||||
"day_of_month": true,
|
||||
"day_of_week": true,
|
||||
"days_in_month": true,
|
||||
"deg": true,
|
||||
"drop_common_labels": true,
|
||||
"end": true,
|
||||
"exp": true,
|
||||
"floor": true,
|
||||
"histogram_avg": true,
|
||||
"histogram_quantile": true,
|
||||
"histogram_quantiles": true,
|
||||
"histogram_share": true,
|
||||
"histogram_stddev": true,
|
||||
"histogram_stdvar": true,
|
||||
"hour": true,
|
||||
"interpolate": true,
|
||||
"keep_last_value": true,
|
||||
"keep_next_value": true,
|
||||
"label_copy": true,
|
||||
"label_del": true,
|
||||
"label_graphite_group": true,
|
||||
"label_join": true,
|
||||
"label_keep": true,
|
||||
"label_lowercase": true,
|
||||
"label_map": true,
|
||||
"label_match": true,
|
||||
"label_mismatch": true,
|
||||
"label_move": true,
|
||||
"label_replace": true,
|
||||
"label_set": true,
|
||||
"label_transform": true,
|
||||
"label_uppercase": true,
|
||||
"label_value": true,
|
||||
"limit_offset": true,
|
||||
"ln": true,
|
||||
"log2": true,
|
||||
"log10": true,
|
||||
"minute": true,
|
||||
"month": true,
|
||||
"now": true,
|
||||
"pi": true,
|
||||
"prometheus_buckets": true,
|
||||
"rad": true,
|
||||
"rand": true,
|
||||
"rand_exponential": true,
|
||||
"rand_normal": true,
|
||||
"range_avg": true,
|
||||
"range_first": true,
|
||||
"range_last": true,
|
||||
"range_max": true,
|
||||
"range_min": true,
|
||||
"range_quantile": true,
|
||||
"range_sum": true,
|
||||
"remove_resets": true,
|
||||
"round": true,
|
||||
"running_avg": true,
|
||||
"running_max": true,
|
||||
"running_min": true,
|
||||
"running_sum": true,
|
||||
"scalar": true,
|
||||
"sgn": true,
|
||||
"sin": true,
|
||||
"sinh": true,
|
||||
"smooth_exponential": true,
|
||||
"sort": true,
|
||||
"sort_by_label": true,
|
||||
"sort_by_label_desc": true,
|
||||
"sort_by_label_numeric": true,
|
||||
"sort_by_label_numeric_desc": true,
|
||||
"sort_desc": true,
|
||||
"sqrt": true,
|
||||
"start": true,
|
||||
"step": true,
|
||||
"tan": true,
|
||||
"tanh": true,
|
||||
"time": true,
|
||||
// "timestamp" has been moved to rollup funcs. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/415
|
||||
"timezone_offset": true,
|
||||
"union": true,
|
||||
|
|
10
vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go
generated
vendored
10
vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go
generated
vendored
|
@ -31,12 +31,12 @@ func (d *DecodeModelOptions) Set(optFns ...func(*DecodeModelOptions)) {
|
|||
// allow you to get a list of the partitions in the order the endpoints
|
||||
// will be resolved in.
|
||||
//
|
||||
// resolver, err := endpoints.DecodeModel(reader)
|
||||
// resolver, err := endpoints.DecodeModel(reader)
|
||||
//
|
||||
// partitions := resolver.(endpoints.EnumPartitions).Partitions()
|
||||
// for _, p := range partitions {
|
||||
// // ... inspect partitions
|
||||
// }
|
||||
// partitions := resolver.(endpoints.EnumPartitions).Partitions()
|
||||
// for _, p := range partitions {
|
||||
// // ... inspect partitions
|
||||
// }
|
||||
func DecodeModel(r io.Reader, optFns ...func(*DecodeModelOptions)) (Resolver, error) {
|
||||
var opts DecodeModelOptions
|
||||
opts.Set(optFns...)
|
||||
|
|
2649
vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
generated
vendored
2649
vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
generated
vendored
File diff suppressed because it is too large
Load diff
55
vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go
generated
vendored
55
vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go
generated
vendored
|
@ -9,7 +9,7 @@
|
|||
// AWS GovCloud (US) (aws-us-gov).
|
||||
// .
|
||||
//
|
||||
// Enumerating Regions and Endpoint Metadata
|
||||
// # Enumerating Regions and Endpoint Metadata
|
||||
//
|
||||
// Casting the Resolver returned by DefaultResolver to a EnumPartitions interface
|
||||
// will allow you to get access to the list of underlying Partitions with the
|
||||
|
@ -17,22 +17,22 @@
|
|||
// resolving to a single partition, or enumerate regions, services, and endpoints
|
||||
// in the partition.
|
||||
//
|
||||
// resolver := endpoints.DefaultResolver()
|
||||
// partitions := resolver.(endpoints.EnumPartitions).Partitions()
|
||||
// resolver := endpoints.DefaultResolver()
|
||||
// partitions := resolver.(endpoints.EnumPartitions).Partitions()
|
||||
//
|
||||
// for _, p := range partitions {
|
||||
// fmt.Println("Regions for", p.ID())
|
||||
// for id, _ := range p.Regions() {
|
||||
// fmt.Println("*", id)
|
||||
// }
|
||||
// for _, p := range partitions {
|
||||
// fmt.Println("Regions for", p.ID())
|
||||
// for id, _ := range p.Regions() {
|
||||
// fmt.Println("*", id)
|
||||
// }
|
||||
//
|
||||
// fmt.Println("Services for", p.ID())
|
||||
// for id, _ := range p.Services() {
|
||||
// fmt.Println("*", id)
|
||||
// }
|
||||
// }
|
||||
// fmt.Println("Services for", p.ID())
|
||||
// for id, _ := range p.Services() {
|
||||
// fmt.Println("*", id)
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// Using Custom Endpoints
|
||||
// # Using Custom Endpoints
|
||||
//
|
||||
// The endpoints package also gives you the ability to use your own logic how
|
||||
// endpoints are resolved. This is a great way to define a custom endpoint
|
||||
|
@ -47,20 +47,19 @@
|
|||
// of Resolver.EndpointFor, converting it to a type that satisfies the
|
||||
// Resolver interface.
|
||||
//
|
||||
// myCustomResolver := func(service, region string, optFns ...func(*endpoints.Options)) (endpoints.ResolvedEndpoint, error) {
|
||||
// if service == endpoints.S3ServiceID {
|
||||
// return endpoints.ResolvedEndpoint{
|
||||
// URL: "s3.custom.endpoint.com",
|
||||
// SigningRegion: "custom-signing-region",
|
||||
// }, nil
|
||||
// }
|
||||
//
|
||||
// myCustomResolver := func(service, region string, optFns ...func(*endpoints.Options)) (endpoints.ResolvedEndpoint, error) {
|
||||
// if service == endpoints.S3ServiceID {
|
||||
// return endpoints.ResolvedEndpoint{
|
||||
// URL: "s3.custom.endpoint.com",
|
||||
// SigningRegion: "custom-signing-region",
|
||||
// }, nil
|
||||
// }
|
||||
// return endpoints.DefaultResolver().EndpointFor(service, region, optFns...)
|
||||
// }
|
||||
//
|
||||
// return endpoints.DefaultResolver().EndpointFor(service, region, optFns...)
|
||||
// }
|
||||
//
|
||||
// sess := session.Must(session.NewSession(&aws.Config{
|
||||
// Region: aws.String("us-west-2"),
|
||||
// EndpointResolver: endpoints.ResolverFunc(myCustomResolver),
|
||||
// }))
|
||||
// sess := session.Must(session.NewSession(&aws.Config{
|
||||
// Region: aws.String("us-west-2"),
|
||||
// EndpointResolver: endpoints.ResolverFunc(myCustomResolver),
|
||||
// }))
|
||||
package endpoints
|
||||
|
|
10
vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go
generated
vendored
10
vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go
generated
vendored
|
@ -353,10 +353,12 @@ type EnumPartitions interface {
|
|||
// as the second parameter.
|
||||
//
|
||||
// This example shows how to get the regions for DynamoDB in the AWS partition.
|
||||
// rs, exists := endpoints.RegionsForService(endpoints.DefaultPartitions(), endpoints.AwsPartitionID, endpoints.DynamodbServiceID)
|
||||
//
|
||||
// rs, exists := endpoints.RegionsForService(endpoints.DefaultPartitions(), endpoints.AwsPartitionID, endpoints.DynamodbServiceID)
|
||||
//
|
||||
// This is equivalent to using the partition directly.
|
||||
// rs := endpoints.AwsPartition().Services()[endpoints.DynamodbServiceID].Regions()
|
||||
//
|
||||
// rs := endpoints.AwsPartition().Services()[endpoints.DynamodbServiceID].Regions()
|
||||
func RegionsForService(ps []Partition, partitionID, serviceID string) (map[string]Region, bool) {
|
||||
for _, p := range ps {
|
||||
if p.ID() != partitionID {
|
||||
|
@ -423,8 +425,8 @@ func (p Partition) ID() string { return p.id }
|
|||
// of new regions and services expansions.
|
||||
//
|
||||
// Errors that can be returned.
|
||||
// * UnknownServiceError
|
||||
// * UnknownEndpointError
|
||||
// - UnknownServiceError
|
||||
// - UnknownEndpointError
|
||||
func (p Partition) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) {
|
||||
return p.p.EndpointFor(service, region, opts...)
|
||||
}
|
||||
|
|
2
vendor/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
|
@ -5,4 +5,4 @@ package aws
|
|||
const SDKName = "aws-sdk-go"
|
||||
|
||||
// SDKVersion is the version of this SDK
|
||||
const SDKVersion = "1.44.53"
|
||||
const SDKVersion = "1.44.134"
|
||||
|
|
18
vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go
generated
vendored
18
vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go
generated
vendored
|
@ -1,9 +1,8 @@
|
|||
package shareddefaults
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/user"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
// SharedCredentialsFilename returns the SDK's default file path
|
||||
|
@ -31,10 +30,17 @@ func SharedConfigFilename() string {
|
|||
// UserHomeDir returns the home directory for the user the process is
|
||||
// running under.
|
||||
func UserHomeDir() string {
|
||||
if runtime.GOOS == "windows" { // Windows
|
||||
return os.Getenv("USERPROFILE")
|
||||
var home string
|
||||
|
||||
home = userHomeDir()
|
||||
if len(home) > 0 {
|
||||
return home
|
||||
}
|
||||
|
||||
// *nix
|
||||
return os.Getenv("HOME")
|
||||
currUser, _ := user.Current()
|
||||
if currUser != nil {
|
||||
home = currUser.HomeDir
|
||||
}
|
||||
|
||||
return home
|
||||
}
|
||||
|
|
18
vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config_resolve_home.go
generated
vendored
Normal file
18
vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config_resolve_home.go
generated
vendored
Normal file
|
@ -0,0 +1,18 @@
|
|||
//go:build !go1.12
|
||||
// +build !go1.12
|
||||
|
||||
package shareddefaults
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
func userHomeDir() string {
|
||||
if runtime.GOOS == "windows" { // Windows
|
||||
return os.Getenv("USERPROFILE")
|
||||
}
|
||||
|
||||
// *nix
|
||||
return os.Getenv("HOME")
|
||||
}
|
13
vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config_resolve_home_go1.12.go
generated
vendored
Normal file
13
vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config_resolve_home_go1.12.go
generated
vendored
Normal file
|
@ -0,0 +1,13 @@
|
|||
//go:build go1.12
|
||||
// +build go1.12
|
||||
|
||||
package shareddefaults
|
||||
|
||||
import (
|
||||
"os"
|
||||
)
|
||||
|
||||
func userHomeDir() string {
|
||||
home, _ := os.UserHomeDir()
|
||||
return home
|
||||
}
|
19
vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go
generated
vendored
19
vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go
generated
vendored
|
@ -4,7 +4,6 @@ package jsonutil
|
|||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math"
|
||||
"reflect"
|
||||
|
@ -16,6 +15,12 @@ import (
|
|||
"github.com/aws/aws-sdk-go/private/protocol"
|
||||
)
|
||||
|
||||
const (
|
||||
floatNaN = "NaN"
|
||||
floatInf = "Infinity"
|
||||
floatNegInf = "-Infinity"
|
||||
)
|
||||
|
||||
var timeType = reflect.ValueOf(time.Time{}).Type()
|
||||
var byteSliceType = reflect.ValueOf([]byte{}).Type()
|
||||
|
||||
|
@ -211,10 +216,16 @@ func buildScalar(v reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) erro
|
|||
buf.Write(strconv.AppendInt(scratch[:0], value.Int(), 10))
|
||||
case reflect.Float64:
|
||||
f := value.Float()
|
||||
if math.IsInf(f, 0) || math.IsNaN(f) {
|
||||
return &json.UnsupportedValueError{Value: v, Str: strconv.FormatFloat(f, 'f', -1, 64)}
|
||||
switch {
|
||||
case math.IsNaN(f):
|
||||
writeString(floatNaN, buf)
|
||||
case math.IsInf(f, 1):
|
||||
writeString(floatInf, buf)
|
||||
case math.IsInf(f, -1):
|
||||
writeString(floatNegInf, buf)
|
||||
default:
|
||||
buf.Write(strconv.AppendFloat(scratch[:0], f, 'f', -1, 64))
|
||||
}
|
||||
buf.Write(strconv.AppendFloat(scratch[:0], f, 'f', -1, 64))
|
||||
default:
|
||||
switch converted := value.Interface().(type) {
|
||||
case time.Time:
|
||||
|
|
13
vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go
generated
vendored
13
vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go
generated
vendored
|
@ -6,6 +6,7 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"math/big"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
@ -258,6 +259,18 @@ func (u unmarshaler) unmarshalScalar(value reflect.Value, data interface{}, tag
|
|||
return err
|
||||
}
|
||||
value.Set(reflect.ValueOf(v))
|
||||
case *float64:
|
||||
// These are regular strings when parsed by encoding/json's unmarshaler.
|
||||
switch {
|
||||
case strings.EqualFold(d, floatNaN):
|
||||
value.Set(reflect.ValueOf(aws.Float64(math.NaN())))
|
||||
case strings.EqualFold(d, floatInf):
|
||||
value.Set(reflect.ValueOf(aws.Float64(math.Inf(1))))
|
||||
case strings.EqualFold(d, floatNegInf):
|
||||
value.Set(reflect.ValueOf(aws.Float64(math.Inf(-1))))
|
||||
default:
|
||||
return fmt.Errorf("unknown JSON number value: %s", d)
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type())
|
||||
}
|
||||
|
|
34
vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go
generated
vendored
34
vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go
generated
vendored
|
@ -3,6 +3,7 @@ package queryutil
|
|||
import (
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"math"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"sort"
|
||||
|
@ -13,6 +14,12 @@ import (
|
|||
"github.com/aws/aws-sdk-go/private/protocol"
|
||||
)
|
||||
|
||||
const (
|
||||
floatNaN = "NaN"
|
||||
floatInf = "Infinity"
|
||||
floatNegInf = "-Infinity"
|
||||
)
|
||||
|
||||
// Parse parses an object i and fills a url.Values object. The isEC2 flag
|
||||
// indicates if this is the EC2 Query sub-protocol.
|
||||
func Parse(body url.Values, i interface{}, isEC2 bool) error {
|
||||
|
@ -228,9 +235,32 @@ func (q *queryParser) parseScalar(v url.Values, r reflect.Value, name string, ta
|
|||
case int:
|
||||
v.Set(name, strconv.Itoa(value))
|
||||
case float64:
|
||||
v.Set(name, strconv.FormatFloat(value, 'f', -1, 64))
|
||||
var str string
|
||||
switch {
|
||||
case math.IsNaN(value):
|
||||
str = floatNaN
|
||||
case math.IsInf(value, 1):
|
||||
str = floatInf
|
||||
case math.IsInf(value, -1):
|
||||
str = floatNegInf
|
||||
default:
|
||||
str = strconv.FormatFloat(value, 'f', -1, 64)
|
||||
}
|
||||
v.Set(name, str)
|
||||
case float32:
|
||||
v.Set(name, strconv.FormatFloat(float64(value), 'f', -1, 32))
|
||||
asFloat64 := float64(value)
|
||||
var str string
|
||||
switch {
|
||||
case math.IsNaN(asFloat64):
|
||||
str = floatNaN
|
||||
case math.IsInf(asFloat64, 1):
|
||||
str = floatInf
|
||||
case math.IsInf(asFloat64, -1):
|
||||
str = floatNegInf
|
||||
default:
|
||||
str = strconv.FormatFloat(asFloat64, 'f', -1, 32)
|
||||
}
|
||||
v.Set(name, str)
|
||||
case time.Time:
|
||||
const ISO8601UTC = "2006-01-02T15:04:05Z"
|
||||
format := tag.Get("timestampFormat")
|
||||
|
|
3
vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go
generated
vendored
3
vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go
generated
vendored
|
@ -3,6 +3,7 @@ package query
|
|||
import (
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
|
@ -62,7 +63,7 @@ func UnmarshalError(r *request.Request) {
|
|||
}
|
||||
|
||||
r.Error = awserr.NewRequestFailure(
|
||||
awserr.New(respErr.Code, respErr.Message, nil),
|
||||
awserr.New(strings.TrimSpace(respErr.Code), strings.TrimSpace(respErr.Message), nil),
|
||||
r.HTTPResponse.StatusCode,
|
||||
reqID,
|
||||
)
|
||||
|
|
18
vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go
generated
vendored
18
vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go
generated
vendored
|
@ -6,6 +6,7 @@ import (
|
|||
"encoding/base64"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
|
@ -20,6 +21,12 @@ import (
|
|||
"github.com/aws/aws-sdk-go/private/protocol"
|
||||
)
|
||||
|
||||
const (
|
||||
floatNaN = "NaN"
|
||||
floatInf = "Infinity"
|
||||
floatNegInf = "-Infinity"
|
||||
)
|
||||
|
||||
// Whether the byte value can be sent without escaping in AWS URLs
|
||||
var noEscape [256]bool
|
||||
|
||||
|
@ -302,7 +309,16 @@ func convertType(v reflect.Value, tag reflect.StructTag) (str string, err error)
|
|||
case int64:
|
||||
str = strconv.FormatInt(value, 10)
|
||||
case float64:
|
||||
str = strconv.FormatFloat(value, 'f', -1, 64)
|
||||
switch {
|
||||
case math.IsNaN(value):
|
||||
str = floatNaN
|
||||
case math.IsInf(value, 1):
|
||||
str = floatInf
|
||||
case math.IsInf(value, -1):
|
||||
str = floatNegInf
|
||||
default:
|
||||
str = strconv.FormatFloat(value, 'f', -1, 64)
|
||||
}
|
||||
case time.Time:
|
||||
format := tag.Get("timestampFormat")
|
||||
if len(format) == 0 {
|
||||
|
|
18
vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go
generated
vendored
18
vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go
generated
vendored
|
@ -6,6 +6,7 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"strconv"
|
||||
|
@ -231,9 +232,20 @@ func unmarshalHeader(v reflect.Value, header string, tag reflect.StructTag) erro
|
|||
}
|
||||
v.Set(reflect.ValueOf(&i))
|
||||
case *float64:
|
||||
f, err := strconv.ParseFloat(header, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
var f float64
|
||||
switch {
|
||||
case strings.EqualFold(header, floatNaN):
|
||||
f = math.NaN()
|
||||
case strings.EqualFold(header, floatInf):
|
||||
f = math.Inf(1)
|
||||
case strings.EqualFold(header, floatNegInf):
|
||||
f = math.Inf(-1)
|
||||
default:
|
||||
var err error
|
||||
f, err = strconv.ParseFloat(header, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
v.Set(reflect.ValueOf(&f))
|
||||
case *time.Time:
|
||||
|
|
32
vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go
generated
vendored
32
vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go
generated
vendored
|
@ -5,6 +5,7 @@ import (
|
|||
"encoding/base64"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"math"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
|
@ -14,6 +15,12 @@ import (
|
|||
"github.com/aws/aws-sdk-go/private/protocol"
|
||||
)
|
||||
|
||||
const (
|
||||
floatNaN = "NaN"
|
||||
floatInf = "Infinity"
|
||||
floatNegInf = "-Infinity"
|
||||
)
|
||||
|
||||
// BuildXML will serialize params into an xml.Encoder. Error will be returned
|
||||
// if the serialization of any of the params or nested values fails.
|
||||
func BuildXML(params interface{}, e *xml.Encoder) error {
|
||||
|
@ -275,6 +282,7 @@ func (b *xmlBuilder) buildMap(value reflect.Value, current *XMLNode, tag reflect
|
|||
// Error will be returned if the value type is unsupported.
|
||||
func (b *xmlBuilder) buildScalar(value reflect.Value, current *XMLNode, tag reflect.StructTag) error {
|
||||
var str string
|
||||
|
||||
switch converted := value.Interface().(type) {
|
||||
case string:
|
||||
str = converted
|
||||
|
@ -289,9 +297,29 @@ func (b *xmlBuilder) buildScalar(value reflect.Value, current *XMLNode, tag refl
|
|||
case int:
|
||||
str = strconv.Itoa(converted)
|
||||
case float64:
|
||||
str = strconv.FormatFloat(converted, 'f', -1, 64)
|
||||
switch {
|
||||
case math.IsNaN(converted):
|
||||
str = floatNaN
|
||||
case math.IsInf(converted, 1):
|
||||
str = floatInf
|
||||
case math.IsInf(converted, -1):
|
||||
str = floatNegInf
|
||||
default:
|
||||
str = strconv.FormatFloat(converted, 'f', -1, 64)
|
||||
}
|
||||
case float32:
|
||||
str = strconv.FormatFloat(float64(converted), 'f', -1, 32)
|
||||
// The SDK doesn't render float32 values in types, only float64. This case would never be hit currently.
|
||||
asFloat64 := float64(converted)
|
||||
switch {
|
||||
case math.IsNaN(asFloat64):
|
||||
str = floatNaN
|
||||
case math.IsInf(asFloat64, 1):
|
||||
str = floatInf
|
||||
case math.IsInf(asFloat64, -1):
|
||||
str = floatNegInf
|
||||
default:
|
||||
str = strconv.FormatFloat(asFloat64, 'f', -1, 32)
|
||||
}
|
||||
case time.Time:
|
||||
format := tag.Get("timestampFormat")
|
||||
if len(format) == 0 {
|
||||
|
|
18
vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go
generated
vendored
18
vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go
generated
vendored
|
@ -6,6 +6,7 @@ import (
|
|||
"encoding/xml"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
@ -276,9 +277,20 @@ func parseScalar(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
|
|||
}
|
||||
r.Set(reflect.ValueOf(&v))
|
||||
case *float64:
|
||||
v, err := strconv.ParseFloat(node.Text, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
var v float64
|
||||
switch {
|
||||
case strings.EqualFold(node.Text, floatNaN):
|
||||
v = math.NaN()
|
||||
case strings.EqualFold(node.Text, floatInf):
|
||||
v = math.Inf(1)
|
||||
case strings.EqualFold(node.Text, floatNegInf):
|
||||
v = math.Inf(-1)
|
||||
default:
|
||||
var err error
|
||||
v, err = strconv.ParseFloat(node.Text, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
r.Set(reflect.ValueOf(&v))
|
||||
case *time.Time:
|
||||
|
|
3086
vendor/github.com/aws/aws-sdk-go/service/s3/api.go
generated
vendored
3086
vendor/github.com/aws/aws-sdk-go/service/s3/api.go
generated
vendored
File diff suppressed because it is too large
Load diff
22
vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go
generated
vendored
22
vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go
generated
vendored
|
@ -39,11 +39,11 @@ func NormalizeBucketLocation(loc string) string {
|
|||
// See http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html
|
||||
// for more information on the values that can be returned.
|
||||
//
|
||||
// req, result := svc.GetBucketLocationRequest(&s3.GetBucketLocationInput{
|
||||
// Bucket: aws.String(bucket),
|
||||
// })
|
||||
// req.Handlers.Unmarshal.PushBackNamed(NormalizeBucketLocationHandler)
|
||||
// err := req.Send()
|
||||
// req, result := svc.GetBucketLocationRequest(&s3.GetBucketLocationInput{
|
||||
// Bucket: aws.String(bucket),
|
||||
// })
|
||||
// req.Handlers.Unmarshal.PushBackNamed(NormalizeBucketLocationHandler)
|
||||
// err := req.Send()
|
||||
var NormalizeBucketLocationHandler = request.NamedHandler{
|
||||
Name: "awssdk.s3.NormalizeBucketLocation",
|
||||
Fn: func(req *request.Request) {
|
||||
|
@ -65,12 +65,12 @@ var NormalizeBucketLocationHandler = request.NamedHandler{
|
|||
// See http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html
|
||||
// for more information on the values that can be returned.
|
||||
//
|
||||
// result, err := svc.GetBucketLocationWithContext(ctx,
|
||||
// &s3.GetBucketLocationInput{
|
||||
// Bucket: aws.String(bucket),
|
||||
// },
|
||||
// s3.WithNormalizeBucketLocation,
|
||||
// )
|
||||
// result, err := svc.GetBucketLocationWithContext(ctx,
|
||||
// &s3.GetBucketLocationInput{
|
||||
// Bucket: aws.String(bucket),
|
||||
// },
|
||||
// s3.WithNormalizeBucketLocation,
|
||||
// )
|
||||
func WithNormalizeBucketLocation(r *request.Request) {
|
||||
r.Handlers.Unmarshal.PushBackNamed(NormalizeBucketLocationHandler)
|
||||
}
|
||||
|
|
2
vendor/github.com/aws/aws-sdk-go/service/s3/doc.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/service/s3/doc.go
generated
vendored
|
@ -8,7 +8,7 @@
|
|||
// See s3 package documentation for more information.
|
||||
// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/
|
||||
//
|
||||
// Using the Client
|
||||
// # Using the Client
|
||||
//
|
||||
// To contact Amazon Simple Storage Service with the SDK use the New function to create
|
||||
// a new service client. With that client you can make API requests to the service.
|
||||
|
|
115
vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go
generated
vendored
115
vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go
generated
vendored
|
@ -6,99 +6,99 @@
|
|||
// for optimizations if the Body satisfies that type. Once the Uploader instance
|
||||
// is created you can call Upload concurrently from multiple goroutines safely.
|
||||
//
|
||||
// // The session the S3 Uploader will use
|
||||
// sess := session.Must(session.NewSession())
|
||||
// // The session the S3 Uploader will use
|
||||
// sess := session.Must(session.NewSession())
|
||||
//
|
||||
// // Create an uploader with the session and default options
|
||||
// uploader := s3manager.NewUploader(sess)
|
||||
// // Create an uploader with the session and default options
|
||||
// uploader := s3manager.NewUploader(sess)
|
||||
//
|
||||
// f, err := os.Open(filename)
|
||||
// if err != nil {
|
||||
// return fmt.Errorf("failed to open file %q, %v", filename, err)
|
||||
// }
|
||||
// f, err := os.Open(filename)
|
||||
// if err != nil {
|
||||
// return fmt.Errorf("failed to open file %q, %v", filename, err)
|
||||
// }
|
||||
//
|
||||
// // Upload the file to S3.
|
||||
// result, err := uploader.Upload(&s3manager.UploadInput{
|
||||
// Bucket: aws.String(myBucket),
|
||||
// Key: aws.String(myString),
|
||||
// Body: f,
|
||||
// })
|
||||
// if err != nil {
|
||||
// return fmt.Errorf("failed to upload file, %v", err)
|
||||
// }
|
||||
// fmt.Printf("file uploaded to, %s\n", aws.StringValue(result.Location))
|
||||
// // Upload the file to S3.
|
||||
// result, err := uploader.Upload(&s3manager.UploadInput{
|
||||
// Bucket: aws.String(myBucket),
|
||||
// Key: aws.String(myString),
|
||||
// Body: f,
|
||||
// })
|
||||
// if err != nil {
|
||||
// return fmt.Errorf("failed to upload file, %v", err)
|
||||
// }
|
||||
// fmt.Printf("file uploaded to, %s\n", aws.StringValue(result.Location))
|
||||
//
|
||||
// See the s3manager package's Uploader type documentation for more information.
|
||||
// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3manager/#Uploader
|
||||
//
|
||||
// Download Manager
|
||||
// # Download Manager
|
||||
//
|
||||
// The s3manager package's Downloader provides concurrently downloading of Objects
|
||||
// from S3. The Downloader will write S3 Object content with an io.WriterAt.
|
||||
// Once the Downloader instance is created you can call Download concurrently from
|
||||
// multiple goroutines safely.
|
||||
//
|
||||
// // The session the S3 Downloader will use
|
||||
// sess := session.Must(session.NewSession())
|
||||
// // The session the S3 Downloader will use
|
||||
// sess := session.Must(session.NewSession())
|
||||
//
|
||||
// // Create a downloader with the session and default options
|
||||
// downloader := s3manager.NewDownloader(sess)
|
||||
// // Create a downloader with the session and default options
|
||||
// downloader := s3manager.NewDownloader(sess)
|
||||
//
|
||||
// // Create a file to write the S3 Object contents to.
|
||||
// f, err := os.Create(filename)
|
||||
// if err != nil {
|
||||
// return fmt.Errorf("failed to create file %q, %v", filename, err)
|
||||
// }
|
||||
// // Create a file to write the S3 Object contents to.
|
||||
// f, err := os.Create(filename)
|
||||
// if err != nil {
|
||||
// return fmt.Errorf("failed to create file %q, %v", filename, err)
|
||||
// }
|
||||
//
|
||||
// // Write the contents of S3 Object to the file
|
||||
// n, err := downloader.Download(f, &s3.GetObjectInput{
|
||||
// Bucket: aws.String(myBucket),
|
||||
// Key: aws.String(myString),
|
||||
// })
|
||||
// if err != nil {
|
||||
// return fmt.Errorf("failed to download file, %v", err)
|
||||
// }
|
||||
// fmt.Printf("file downloaded, %d bytes\n", n)
|
||||
// // Write the contents of S3 Object to the file
|
||||
// n, err := downloader.Download(f, &s3.GetObjectInput{
|
||||
// Bucket: aws.String(myBucket),
|
||||
// Key: aws.String(myString),
|
||||
// })
|
||||
// if err != nil {
|
||||
// return fmt.Errorf("failed to download file, %v", err)
|
||||
// }
|
||||
// fmt.Printf("file downloaded, %d bytes\n", n)
|
||||
//
|
||||
// See the s3manager package's Downloader type documentation for more information.
|
||||
// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3manager/#Downloader
|
||||
//
|
||||
// Automatic URI cleaning
|
||||
// # Automatic URI cleaning
|
||||
//
|
||||
// Interacting with objects whose keys contain adjacent slashes (e.g. bucketname/foo//bar/objectname)
|
||||
// requires setting DisableRestProtocolURICleaning to true in the aws.Config struct
|
||||
// used by the service client.
|
||||
//
|
||||
// svc := s3.New(sess, &aws.Config{
|
||||
// DisableRestProtocolURICleaning: aws.Bool(true),
|
||||
// })
|
||||
// out, err := svc.GetObject(&s3.GetObjectInput {
|
||||
// Bucket: aws.String("bucketname"),
|
||||
// Key: aws.String("//foo//bar//moo"),
|
||||
// })
|
||||
// svc := s3.New(sess, &aws.Config{
|
||||
// DisableRestProtocolURICleaning: aws.Bool(true),
|
||||
// })
|
||||
// out, err := svc.GetObject(&s3.GetObjectInput {
|
||||
// Bucket: aws.String("bucketname"),
|
||||
// Key: aws.String("//foo//bar//moo"),
|
||||
// })
|
||||
//
|
||||
// Get Bucket Region
|
||||
// # Get Bucket Region
|
||||
//
|
||||
// GetBucketRegion will attempt to get the region for a bucket using a region
|
||||
// hint to determine which AWS partition to perform the query on. Use this utility
|
||||
// to determine the region a bucket is in.
|
||||
//
|
||||
// sess := session.Must(session.NewSession())
|
||||
// sess := session.Must(session.NewSession())
|
||||
//
|
||||
// bucket := "my-bucket"
|
||||
// region, err := s3manager.GetBucketRegion(ctx, sess, bucket, "us-west-2")
|
||||
// if err != nil {
|
||||
// if aerr, ok := err.(awserr.Error); ok && aerr.Code() == "NotFound" {
|
||||
// fmt.Fprintf(os.Stderr, "unable to find bucket %s's region not found\n", bucket)
|
||||
// }
|
||||
// return err
|
||||
// }
|
||||
// fmt.Printf("Bucket %s is in %s region\n", bucket, region)
|
||||
// bucket := "my-bucket"
|
||||
// region, err := s3manager.GetBucketRegion(ctx, sess, bucket, "us-west-2")
|
||||
// if err != nil {
|
||||
// if aerr, ok := err.(awserr.Error); ok && aerr.Code() == "NotFound" {
|
||||
// fmt.Fprintf(os.Stderr, "unable to find bucket %s's region not found\n", bucket)
|
||||
// }
|
||||
// return err
|
||||
// }
|
||||
// fmt.Printf("Bucket %s is in %s region\n", bucket, region)
|
||||
//
|
||||
// See the s3manager package's GetBucketRegion function documentation for more information
|
||||
// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3manager/#GetBucketRegion
|
||||
//
|
||||
// S3 Crypto Client
|
||||
// # S3 Crypto Client
|
||||
//
|
||||
// The s3crypto package provides the tools to upload and download encrypted
|
||||
// content from S3. The Encryption and Decryption clients can be used concurrently
|
||||
|
@ -106,5 +106,4 @@
|
|||
//
|
||||
// See the s3crypto package documentation for more information.
|
||||
// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3crypto/
|
||||
//
|
||||
package s3
|
||||
|
|
5
vendor/github.com/aws/aws-sdk-go/service/s3/endpoint.go
generated
vendored
5
vendor/github.com/aws/aws-sdk-go/service/s3/endpoint.go
generated
vendored
|
@ -52,9 +52,8 @@ func accessPointResourceParser(a awsarn.ARN) (arn.Resource, error) {
|
|||
// outpost access-point resource.
|
||||
//
|
||||
// Supported Outpost AccessPoint ARN format:
|
||||
// - ARN format: arn:{partition}:s3-outposts:{region}:{accountId}:outpost/{outpostId}/accesspoint/{accesspointName}
|
||||
// - example: arn:aws:s3-outposts:us-west-2:012345678901:outpost/op-1234567890123456/accesspoint/myaccesspoint
|
||||
//
|
||||
// - ARN format: arn:{partition}:s3-outposts:{region}:{accountId}:outpost/{outpostId}/accesspoint/{accesspointName}
|
||||
// - example: arn:aws:s3-outposts:us-west-2:012345678901:outpost/op-1234567890123456/accesspoint/myaccesspoint
|
||||
func parseOutpostAccessPointResource(a awsarn.ARN, resParts []string) (arn.OutpostAccessPointARN, error) {
|
||||
// outpost accesspoint arn is only valid if service is s3-outposts
|
||||
if a.Service != "s3-outposts" {
|
||||
|
|
3
vendor/github.com/aws/aws-sdk-go/service/s3/endpoint_builder.go
generated
vendored
3
vendor/github.com/aws/aws-sdk-go/service/s3/endpoint_builder.go
generated
vendored
|
@ -37,7 +37,6 @@ type accessPointEndpointBuilder arn.AccessPointARN
|
|||
// - example : myaccesspoint-012345678901.s3-accesspoint.us-west-2.amazonaws.com
|
||||
//
|
||||
// Access Point Endpoint requests are signed using "s3" as signing name.
|
||||
//
|
||||
func (a accessPointEndpointBuilder) build(req *request.Request) error {
|
||||
resolveService := arn.AccessPointARN(a).Service
|
||||
resolveRegion := arn.AccessPointARN(a).Region
|
||||
|
@ -92,7 +91,6 @@ type s3ObjectLambdaAccessPointEndpointBuilder arn.S3ObjectLambdaAccessPointARN
|
|||
// - example : myaccesspoint-012345678901.s3-object-lambda.us-west-2.amazonaws.com
|
||||
//
|
||||
// Access Point Endpoint requests are signed using "s3-object-lambda" as signing name.
|
||||
//
|
||||
func (a s3ObjectLambdaAccessPointEndpointBuilder) build(req *request.Request) error {
|
||||
resolveRegion := arn.S3ObjectLambdaAccessPointARN(a).Region
|
||||
|
||||
|
@ -147,7 +145,6 @@ type outpostAccessPointEndpointBuilder arn.OutpostAccessPointARN
|
|||
// - example : myaccesspoint-012345678901.op-01234567890123456.s3-outposts.us-west-2.amazonaws.com
|
||||
//
|
||||
// Outpost AccessPoint Endpoint request are signed using "s3-outposts" as signing name.
|
||||
//
|
||||
func (o outpostAccessPointEndpointBuilder) build(req *request.Request) error {
|
||||
resolveRegion := o.Region
|
||||
resolveService := o.Service
|
||||
|
|
46
vendor/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go
generated
vendored
46
vendor/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go
generated
vendored
|
@ -23,37 +23,37 @@ import (
|
|||
// can be stubbed out for unit testing your code with the SDK without needing
|
||||
// to inject custom request handlers into the SDK's request pipeline.
|
||||
//
|
||||
// // myFunc uses an SDK service client to make a request to
|
||||
// // Amazon Simple Storage Service.
|
||||
// func myFunc(svc s3iface.S3API) bool {
|
||||
// // Make svc.AbortMultipartUpload request
|
||||
// }
|
||||
// // myFunc uses an SDK service client to make a request to
|
||||
// // Amazon Simple Storage Service.
|
||||
// func myFunc(svc s3iface.S3API) bool {
|
||||
// // Make svc.AbortMultipartUpload request
|
||||
// }
|
||||
//
|
||||
// func main() {
|
||||
// sess := session.New()
|
||||
// svc := s3.New(sess)
|
||||
// func main() {
|
||||
// sess := session.New()
|
||||
// svc := s3.New(sess)
|
||||
//
|
||||
// myFunc(svc)
|
||||
// }
|
||||
// myFunc(svc)
|
||||
// }
|
||||
//
|
||||
// In your _test.go file:
|
||||
//
|
||||
// // Define a mock struct to be used in your unit tests of myFunc.
|
||||
// type mockS3Client struct {
|
||||
// s3iface.S3API
|
||||
// }
|
||||
// func (m *mockS3Client) AbortMultipartUpload(input *s3.AbortMultipartUploadInput) (*s3.AbortMultipartUploadOutput, error) {
|
||||
// // mock response/functionality
|
||||
// }
|
||||
// // Define a mock struct to be used in your unit tests of myFunc.
|
||||
// type mockS3Client struct {
|
||||
// s3iface.S3API
|
||||
// }
|
||||
// func (m *mockS3Client) AbortMultipartUpload(input *s3.AbortMultipartUploadInput) (*s3.AbortMultipartUploadOutput, error) {
|
||||
// // mock response/functionality
|
||||
// }
|
||||
//
|
||||
// func TestMyFunc(t *testing.T) {
|
||||
// // Setup Test
|
||||
// mockSvc := &mockS3Client{}
|
||||
// func TestMyFunc(t *testing.T) {
|
||||
// // Setup Test
|
||||
// mockSvc := &mockS3Client{}
|
||||
//
|
||||
// myfunc(mockSvc)
|
||||
// myfunc(mockSvc)
|
||||
//
|
||||
// // Verify myFunc's functionality
|
||||
// }
|
||||
// // Verify myFunc's functionality
|
||||
// }
|
||||
//
|
||||
// It is important to note that this interface will have breaking changes
|
||||
// when the service model is updated and adds new API operations, paginators,
|
||||
|
|
3
vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/batch.go
generated
vendored
3
vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/batch.go
generated
vendored
|
@ -112,6 +112,7 @@ type BatchDeleteIterator interface {
|
|||
// iterate through a list of objects and delete the objects.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// iter := &s3manager.DeleteListIterator{
|
||||
// Client: svc,
|
||||
// Input: &s3.ListObjectsInput{
|
||||
|
@ -203,6 +204,7 @@ type BatchDelete struct {
|
|||
// objects.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// batcher := s3manager.NewBatchDeleteWithClient(client, size)
|
||||
//
|
||||
// objects := []BatchDeleteObject{
|
||||
|
@ -236,6 +238,7 @@ func NewBatchDeleteWithClient(client s3iface.S3API, options ...func(*BatchDelete
|
|||
// objects.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// batcher := s3manager.NewBatchDelete(sess, size)
|
||||
//
|
||||
// objects := []BatchDeleteObject{
|
||||
|
|
54
vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/bucket_region.go
generated
vendored
54
vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/bucket_region.go
generated
vendored
|
@ -24,30 +24,30 @@ import (
|
|||
// For example to get the region of a bucket which exists in "eu-central-1"
|
||||
// you could provide a region hint of "us-west-2".
|
||||
//
|
||||
// sess := session.Must(session.NewSession())
|
||||
// sess := session.Must(session.NewSession())
|
||||
//
|
||||
// bucket := "my-bucket"
|
||||
// region, err := s3manager.GetBucketRegion(ctx, sess, bucket, "us-west-2")
|
||||
// if err != nil {
|
||||
// if aerr, ok := err.(awserr.Error); ok && aerr.Code() == "NotFound" {
|
||||
// fmt.Fprintf(os.Stderr, "unable to find bucket %s's region not found\n", bucket)
|
||||
// }
|
||||
// return err
|
||||
// }
|
||||
// fmt.Printf("Bucket %s is in %s region\n", bucket, region)
|
||||
// bucket := "my-bucket"
|
||||
// region, err := s3manager.GetBucketRegion(ctx, sess, bucket, "us-west-2")
|
||||
// if err != nil {
|
||||
// if aerr, ok := err.(awserr.Error); ok && aerr.Code() == "NotFound" {
|
||||
// fmt.Fprintf(os.Stderr, "unable to find bucket %s's region not found\n", bucket)
|
||||
// }
|
||||
// return err
|
||||
// }
|
||||
// fmt.Printf("Bucket %s is in %s region\n", bucket, region)
|
||||
//
|
||||
// By default the request will be made to the Amazon S3 endpoint using the Path
|
||||
// style addressing.
|
||||
//
|
||||
// s3.us-west-2.amazonaws.com/bucketname
|
||||
// s3.us-west-2.amazonaws.com/bucketname
|
||||
//
|
||||
// This is not compatible with Amazon S3's FIPS endpoints. To override this
|
||||
// behavior to use Virtual Host style addressing, provide a functional option
|
||||
// that will set the Request's Config.S3ForcePathStyle to aws.Bool(false).
|
||||
//
|
||||
// region, err := s3manager.GetBucketRegion(ctx, sess, "bucketname", "us-west-2", func(r *request.Request) {
|
||||
// r.S3ForcePathStyle = aws.Bool(false)
|
||||
// })
|
||||
// region, err := s3manager.GetBucketRegion(ctx, sess, "bucketname", "us-west-2", func(r *request.Request) {
|
||||
// r.S3ForcePathStyle = aws.Bool(false)
|
||||
// })
|
||||
//
|
||||
// To configure the GetBucketRegion to make a request via the Amazon
|
||||
// S3 FIPS endpoints directly when a FIPS region name is not available, (e.g.
|
||||
|
@ -55,11 +55,11 @@ import (
|
|||
// utility is called with. The hint region will be ignored if an endpoint URL
|
||||
// is configured on the session or client.
|
||||
//
|
||||
// sess, err := session.NewSession(&aws.Config{
|
||||
// Endpoint: aws.String("https://s3-fips.us-west-2.amazonaws.com"),
|
||||
// })
|
||||
// sess, err := session.NewSession(&aws.Config{
|
||||
// Endpoint: aws.String("https://s3-fips.us-west-2.amazonaws.com"),
|
||||
// })
|
||||
//
|
||||
// region, err := s3manager.GetBucketRegion(context.Background(), sess, "bucketname", "")
|
||||
// region, err := s3manager.GetBucketRegion(context.Background(), sess, "bucketname", "")
|
||||
func GetBucketRegion(ctx aws.Context, c client.ConfigProvider, bucket, regionHint string, opts ...request.Option) (string, error) {
|
||||
var cfg aws.Config
|
||||
if len(regionHint) != 0 {
|
||||
|
@ -78,15 +78,15 @@ const bucketRegionHeader = "X-Amz-Bucket-Region"
|
|||
// By default the request will be made to the Amazon S3 endpoint using the Path
|
||||
// style addressing.
|
||||
//
|
||||
// s3.us-west-2.amazonaws.com/bucketname
|
||||
// s3.us-west-2.amazonaws.com/bucketname
|
||||
//
|
||||
// This is not compatible with Amazon S3's FIPS endpoints. To override this
|
||||
// behavior to use Virtual Host style addressing, provide a functional option
|
||||
// that will set the Request's Config.S3ForcePathStyle to aws.Bool(false).
|
||||
//
|
||||
// region, err := s3manager.GetBucketRegionWithClient(ctx, client, "bucketname", func(r *request.Request) {
|
||||
// r.S3ForcePathStyle = aws.Bool(false)
|
||||
// })
|
||||
// region, err := s3manager.GetBucketRegionWithClient(ctx, client, "bucketname", func(r *request.Request) {
|
||||
// r.S3ForcePathStyle = aws.Bool(false)
|
||||
// })
|
||||
//
|
||||
// To configure the GetBucketRegion to make a request via the Amazon
|
||||
// S3 FIPS endpoints directly when a FIPS region name is not available, (e.g.
|
||||
|
@ -94,11 +94,11 @@ const bucketRegionHeader = "X-Amz-Bucket-Region"
|
|||
// utility is called with. The hint region will be ignored if an endpoint URL
|
||||
// is configured on the session or client.
|
||||
//
|
||||
// region, err := s3manager.GetBucketRegionWithClient(context.Background(),
|
||||
// s3.New(sess, &aws.Config{
|
||||
// Endpoint: aws.String("https://s3-fips.us-west-2.amazonaws.com"),
|
||||
// }),
|
||||
// "bucketname")
|
||||
// region, err := s3manager.GetBucketRegionWithClient(context.Background(),
|
||||
// s3.New(sess, &aws.Config{
|
||||
// Endpoint: aws.String("https://s3-fips.us-west-2.amazonaws.com"),
|
||||
// }),
|
||||
// "bucketname")
|
||||
//
|
||||
// See GetBucketRegion for more information.
|
||||
func GetBucketRegionWithClient(ctx aws.Context, svc s3iface.S3API, bucket string, opts ...request.Option) (string, error) {
|
||||
|
|
45
vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/download.go
generated
vendored
45
vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/download.go
generated
vendored
|
@ -86,16 +86,17 @@ func WithDownloaderRequestOptions(opts ...request.Option) func(*Downloader) {
|
|||
// interface.
|
||||
//
|
||||
// Example:
|
||||
// // The session the S3 Downloader will use
|
||||
// sess := session.Must(session.NewSession())
|
||||
//
|
||||
// // Create a downloader with the session and default options
|
||||
// downloader := s3manager.NewDownloader(sess)
|
||||
// // The session the S3 Downloader will use
|
||||
// sess := session.Must(session.NewSession())
|
||||
//
|
||||
// // Create a downloader with the session and custom options
|
||||
// downloader := s3manager.NewDownloader(sess, func(d *s3manager.Downloader) {
|
||||
// d.PartSize = 64 * 1024 * 1024 // 64MB per part
|
||||
// })
|
||||
// // Create a downloader with the session and default options
|
||||
// downloader := s3manager.NewDownloader(sess)
|
||||
//
|
||||
// // Create a downloader with the session and custom options
|
||||
// downloader := s3manager.NewDownloader(sess, func(d *s3manager.Downloader) {
|
||||
// d.PartSize = 64 * 1024 * 1024 // 64MB per part
|
||||
// })
|
||||
func NewDownloader(c client.ConfigProvider, options ...func(*Downloader)) *Downloader {
|
||||
return newDownloader(s3.New(c), options...)
|
||||
}
|
||||
|
@ -120,19 +121,20 @@ func newDownloader(client s3iface.S3API, options ...func(*Downloader)) *Download
|
|||
// to make S3 API calls.
|
||||
//
|
||||
// Example:
|
||||
// // The session the S3 Downloader will use
|
||||
// sess := session.Must(session.NewSession())
|
||||
//
|
||||
// // The S3 client the S3 Downloader will use
|
||||
// s3Svc := s3.New(sess)
|
||||
// // The session the S3 Downloader will use
|
||||
// sess := session.Must(session.NewSession())
|
||||
//
|
||||
// // Create a downloader with the s3 client and default options
|
||||
// downloader := s3manager.NewDownloaderWithClient(s3Svc)
|
||||
// // The S3 client the S3 Downloader will use
|
||||
// s3Svc := s3.New(sess)
|
||||
//
|
||||
// // Create a downloader with the s3 client and custom options
|
||||
// downloader := s3manager.NewDownloaderWithClient(s3Svc, func(d *s3manager.Downloader) {
|
||||
// d.PartSize = 64 * 1024 * 1024 // 64MB per part
|
||||
// })
|
||||
// // Create a downloader with the s3 client and default options
|
||||
// downloader := s3manager.NewDownloaderWithClient(s3Svc)
|
||||
//
|
||||
// // Create a downloader with the s3 client and custom options
|
||||
// downloader := s3manager.NewDownloaderWithClient(s3Svc, func(d *s3manager.Downloader) {
|
||||
// d.PartSize = 64 * 1024 * 1024 // 64MB per part
|
||||
// })
|
||||
func NewDownloaderWithClient(svc s3iface.S3API, options ...func(*Downloader)) *Downloader {
|
||||
return newDownloader(svc, options...)
|
||||
}
|
||||
|
@ -223,6 +225,7 @@ func (d Downloader) DownloadWithContext(ctx aws.Context, w io.WriterAt, input *s
|
|||
// to the io.WriterAt specificed in the iterator.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// svc := s3manager.NewDownloader(session)
|
||||
//
|
||||
// fooFile, err := os.Open("/tmp/foo.file")
|
||||
|
@ -464,7 +467,11 @@ func (d *downloader) tryDownloadChunk(in *s3.GetObjectInput, w io.Writer) (int64
|
|||
}
|
||||
d.setTotalBytes(resp) // Set total if not yet set.
|
||||
|
||||
n, err := io.Copy(w, resp.Body)
|
||||
var src io.Reader = resp.Body
|
||||
if d.cfg.BufferProvider != nil {
|
||||
src = &suppressWriterAt{suppressed: src}
|
||||
}
|
||||
n, err := io.Copy(w, src)
|
||||
resp.Body.Close()
|
||||
if err != nil {
|
||||
return n, &errReadingBody{err: err}
|
||||
|
|
91
vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go
generated
vendored
91
vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go
generated
vendored
|
@ -40,18 +40,17 @@ const DefaultUploadConcurrency = 5
|
|||
//
|
||||
// Example:
|
||||
//
|
||||
// u := s3manager.NewUploader(opts)
|
||||
// output, err := u.upload(input)
|
||||
// if err != nil {
|
||||
// if multierr, ok := err.(s3manager.MultiUploadFailure); ok {
|
||||
// // Process error and its associated uploadID
|
||||
// fmt.Println("Error:", multierr.Code(), multierr.Message(), multierr.UploadID())
|
||||
// } else {
|
||||
// // Process error generically
|
||||
// fmt.Println("Error:", err.Error())
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// u := s3manager.NewUploader(opts)
|
||||
// output, err := u.upload(input)
|
||||
// if err != nil {
|
||||
// if multierr, ok := err.(s3manager.MultiUploadFailure); ok {
|
||||
// // Process error and its associated uploadID
|
||||
// fmt.Println("Error:", multierr.Code(), multierr.Message(), multierr.UploadID())
|
||||
// } else {
|
||||
// // Process error generically
|
||||
// fmt.Println("Error:", err.Error())
|
||||
// }
|
||||
// }
|
||||
type MultiUploadFailure interface {
|
||||
awserr.Error
|
||||
|
||||
|
@ -77,7 +76,7 @@ type multiUploadError struct {
|
|||
|
||||
// Error returns the string representation of the error.
|
||||
//
|
||||
// See apierr.BaseError ErrorWithExtra for output format
|
||||
// # See apierr.BaseError ErrorWithExtra for output format
|
||||
//
|
||||
// Satisfies the error interface.
|
||||
func (m multiUploadError) Error() string {
|
||||
|
@ -187,16 +186,17 @@ type Uploader struct {
|
|||
// satisfies the client.ConfigProvider interface.
|
||||
//
|
||||
// Example:
|
||||
// // The session the S3 Uploader will use
|
||||
// sess := session.Must(session.NewSession())
|
||||
//
|
||||
// // Create an uploader with the session and default options
|
||||
// uploader := s3manager.NewUploader(sess)
|
||||
// // The session the S3 Uploader will use
|
||||
// sess := session.Must(session.NewSession())
|
||||
//
|
||||
// // Create an uploader with the session and custom options
|
||||
// uploader := s3manager.NewUploader(session, func(u *s3manager.Uploader) {
|
||||
// u.PartSize = 64 * 1024 * 1024 // 64MB per part
|
||||
// })
|
||||
// // Create an uploader with the session and default options
|
||||
// uploader := s3manager.NewUploader(sess)
|
||||
//
|
||||
// // Create an uploader with the session and custom options
|
||||
// uploader := s3manager.NewUploader(session, func(u *s3manager.Uploader) {
|
||||
// u.PartSize = 64 * 1024 * 1024 // 64MB per part
|
||||
// })
|
||||
func NewUploader(c client.ConfigProvider, options ...func(*Uploader)) *Uploader {
|
||||
return newUploader(s3.New(c), options...)
|
||||
}
|
||||
|
@ -225,19 +225,20 @@ func newUploader(client s3iface.S3API, options ...func(*Uploader)) *Uploader {
|
|||
// a S3 service client to make S3 API calls.
|
||||
//
|
||||
// Example:
|
||||
// // The session the S3 Uploader will use
|
||||
// sess := session.Must(session.NewSession())
|
||||
//
|
||||
// // S3 service client the Upload manager will use.
|
||||
// s3Svc := s3.New(sess)
|
||||
// // The session the S3 Uploader will use
|
||||
// sess := session.Must(session.NewSession())
|
||||
//
|
||||
// // Create an uploader with S3 client and default options
|
||||
// uploader := s3manager.NewUploaderWithClient(s3Svc)
|
||||
// // S3 service client the Upload manager will use.
|
||||
// s3Svc := s3.New(sess)
|
||||
//
|
||||
// // Create an uploader with S3 client and custom options
|
||||
// uploader := s3manager.NewUploaderWithClient(s3Svc, func(u *s3manager.Uploader) {
|
||||
// u.PartSize = 64 * 1024 * 1024 // 64MB per part
|
||||
// })
|
||||
// // Create an uploader with S3 client and default options
|
||||
// uploader := s3manager.NewUploaderWithClient(s3Svc)
|
||||
//
|
||||
// // Create an uploader with S3 client and custom options
|
||||
// uploader := s3manager.NewUploaderWithClient(s3Svc, func(u *s3manager.Uploader) {
|
||||
// u.PartSize = 64 * 1024 * 1024 // 64MB per part
|
||||
// })
|
||||
func NewUploaderWithClient(svc s3iface.S3API, options ...func(*Uploader)) *Uploader {
|
||||
return newUploader(svc, options...)
|
||||
}
|
||||
|
@ -256,21 +257,22 @@ func NewUploaderWithClient(svc s3iface.S3API, options ...func(*Uploader)) *Uploa
|
|||
// It is safe to call this method concurrently across goroutines.
|
||||
//
|
||||
// Example:
|
||||
// // Upload input parameters
|
||||
// upParams := &s3manager.UploadInput{
|
||||
// Bucket: &bucketName,
|
||||
// Key: &keyName,
|
||||
// Body: file,
|
||||
// }
|
||||
//
|
||||
// // Perform an upload.
|
||||
// result, err := uploader.Upload(upParams)
|
||||
// // Upload input parameters
|
||||
// upParams := &s3manager.UploadInput{
|
||||
// Bucket: &bucketName,
|
||||
// Key: &keyName,
|
||||
// Body: file,
|
||||
// }
|
||||
//
|
||||
// // Perform upload with options different than the those in the Uploader.
|
||||
// result, err := uploader.Upload(upParams, func(u *s3manager.Uploader) {
|
||||
// u.PartSize = 10 * 1024 * 1024 // 10MB part size
|
||||
// u.LeavePartsOnError = true // Don't delete the parts if the upload fails.
|
||||
// })
|
||||
// // Perform an upload.
|
||||
// result, err := uploader.Upload(upParams)
|
||||
//
|
||||
// // Perform upload with options different than the those in the Uploader.
|
||||
// result, err := uploader.Upload(upParams, func(u *s3manager.Uploader) {
|
||||
// u.PartSize = 10 * 1024 * 1024 // 10MB part size
|
||||
// u.LeavePartsOnError = true // Don't delete the parts if the upload fails.
|
||||
// })
|
||||
func (u Uploader) Upload(input *UploadInput, options ...func(*Uploader)) (*UploadOutput, error) {
|
||||
return u.UploadWithContext(aws.BackgroundContext(), input, options...)
|
||||
}
|
||||
|
@ -310,6 +312,7 @@ func (u Uploader) UploadWithContext(ctx aws.Context, input *UploadInput, opts ..
|
|||
// allows for custom defined functionality.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// svc:= s3manager.NewUploader(sess)
|
||||
//
|
||||
// objects := []BatchUploadObject{
|
||||
|
|
8
vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/writer_read_from.go
generated
vendored
8
vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/writer_read_from.go
generated
vendored
|
@ -73,3 +73,11 @@ func (p *PooledBufferedReadFromProvider) GetReadFrom(writer io.Writer) (r Writer
|
|||
}
|
||||
return r, cleanup
|
||||
}
|
||||
|
||||
type suppressWriterAt struct {
|
||||
suppressed io.Reader
|
||||
}
|
||||
|
||||
func (s *suppressWriterAt) Read(p []byte) (n int, err error) {
|
||||
return s.suppressed.Read(p)
|
||||
}
|
||||
|
|
11
vendor/github.com/aws/aws-sdk-go/service/s3/service.go
generated
vendored
11
vendor/github.com/aws/aws-sdk-go/service/s3/service.go
generated
vendored
|
@ -39,13 +39,14 @@ const (
|
|||
// aws.Config parameter to add your extra config.
|
||||
//
|
||||
// Example:
|
||||
// mySession := session.Must(session.NewSession())
|
||||
//
|
||||
// // Create a S3 client from just a session.
|
||||
// svc := s3.New(mySession)
|
||||
// mySession := session.Must(session.NewSession())
|
||||
//
|
||||
// // Create a S3 client with additional configuration
|
||||
// svc := s3.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
|
||||
// // Create a S3 client from just a session.
|
||||
// svc := s3.New(mySession)
|
||||
//
|
||||
// // Create a S3 client with additional configuration
|
||||
// svc := s3.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
|
||||
func New(p client.ConfigProvider, cfgs ...*aws.Config) *S3 {
|
||||
c := p.ClientConfig(EndpointsID, cfgs...)
|
||||
if c.SigningNameDerived || len(c.SigningName) == 0 {
|
||||
|
|
201
vendor/github.com/aws/aws-sdk-go/service/sso/api.go
generated
vendored
201
vendor/github.com/aws/aws-sdk-go/service/sso/api.go
generated
vendored
|
@ -29,14 +29,13 @@ const opGetRoleCredentials = "GetRoleCredentials"
|
|||
// This method is useful when you want to inject custom logic or configuration
|
||||
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
|
||||
//
|
||||
// // Example sending a request using the GetRoleCredentialsRequest method.
|
||||
// req, resp := client.GetRoleCredentialsRequest(params)
|
||||
//
|
||||
// // Example sending a request using the GetRoleCredentialsRequest method.
|
||||
// req, resp := client.GetRoleCredentialsRequest(params)
|
||||
//
|
||||
// err := req.Send()
|
||||
// if err == nil { // resp is now filled
|
||||
// fmt.Println(resp)
|
||||
// }
|
||||
// err := req.Send()
|
||||
// if err == nil { // resp is now filled
|
||||
// fmt.Println(resp)
|
||||
// }
|
||||
//
|
||||
// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/GetRoleCredentials
|
||||
func (c *SSO) GetRoleCredentialsRequest(input *GetRoleCredentialsInput) (req *request.Request, output *GetRoleCredentialsOutput) {
|
||||
|
@ -69,20 +68,21 @@ func (c *SSO) GetRoleCredentialsRequest(input *GetRoleCredentialsInput) (req *re
|
|||
// API operation GetRoleCredentials for usage and error information.
|
||||
//
|
||||
// Returned Error Types:
|
||||
// * InvalidRequestException
|
||||
// Indicates that a problem occurred with the input to the request. For example,
|
||||
// a required parameter might be missing or out of range.
|
||||
//
|
||||
// * UnauthorizedException
|
||||
// Indicates that the request is not authorized. This can happen due to an invalid
|
||||
// access token in the request.
|
||||
// - InvalidRequestException
|
||||
// Indicates that a problem occurred with the input to the request. For example,
|
||||
// a required parameter might be missing or out of range.
|
||||
//
|
||||
// * TooManyRequestsException
|
||||
// Indicates that the request is being made too frequently and is more than
|
||||
// what the server can handle.
|
||||
// - UnauthorizedException
|
||||
// Indicates that the request is not authorized. This can happen due to an invalid
|
||||
// access token in the request.
|
||||
//
|
||||
// * ResourceNotFoundException
|
||||
// The specified resource doesn't exist.
|
||||
// - TooManyRequestsException
|
||||
// Indicates that the request is being made too frequently and is more than
|
||||
// what the server can handle.
|
||||
//
|
||||
// - ResourceNotFoundException
|
||||
// The specified resource doesn't exist.
|
||||
//
|
||||
// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/GetRoleCredentials
|
||||
func (c *SSO) GetRoleCredentials(input *GetRoleCredentialsInput) (*GetRoleCredentialsOutput, error) {
|
||||
|
@ -122,14 +122,13 @@ const opListAccountRoles = "ListAccountRoles"
|
|||
// This method is useful when you want to inject custom logic or configuration
|
||||
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
|
||||
//
|
||||
// // Example sending a request using the ListAccountRolesRequest method.
|
||||
// req, resp := client.ListAccountRolesRequest(params)
|
||||
//
|
||||
// // Example sending a request using the ListAccountRolesRequest method.
|
||||
// req, resp := client.ListAccountRolesRequest(params)
|
||||
//
|
||||
// err := req.Send()
|
||||
// if err == nil { // resp is now filled
|
||||
// fmt.Println(resp)
|
||||
// }
|
||||
// err := req.Send()
|
||||
// if err == nil { // resp is now filled
|
||||
// fmt.Println(resp)
|
||||
// }
|
||||
//
|
||||
// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/ListAccountRoles
|
||||
func (c *SSO) ListAccountRolesRequest(input *ListAccountRolesInput) (req *request.Request, output *ListAccountRolesOutput) {
|
||||
|
@ -167,20 +166,21 @@ func (c *SSO) ListAccountRolesRequest(input *ListAccountRolesInput) (req *reques
|
|||
// API operation ListAccountRoles for usage and error information.
|
||||
//
|
||||
// Returned Error Types:
|
||||
// * InvalidRequestException
|
||||
// Indicates that a problem occurred with the input to the request. For example,
|
||||
// a required parameter might be missing or out of range.
|
||||
//
|
||||
// * UnauthorizedException
|
||||
// Indicates that the request is not authorized. This can happen due to an invalid
|
||||
// access token in the request.
|
||||
// - InvalidRequestException
|
||||
// Indicates that a problem occurred with the input to the request. For example,
|
||||
// a required parameter might be missing or out of range.
|
||||
//
|
||||
// * TooManyRequestsException
|
||||
// Indicates that the request is being made too frequently and is more than
|
||||
// what the server can handle.
|
||||
// - UnauthorizedException
|
||||
// Indicates that the request is not authorized. This can happen due to an invalid
|
||||
// access token in the request.
|
||||
//
|
||||
// * ResourceNotFoundException
|
||||
// The specified resource doesn't exist.
|
||||
// - TooManyRequestsException
|
||||
// Indicates that the request is being made too frequently and is more than
|
||||
// what the server can handle.
|
||||
//
|
||||
// - ResourceNotFoundException
|
||||
// The specified resource doesn't exist.
|
||||
//
|
||||
// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/ListAccountRoles
|
||||
func (c *SSO) ListAccountRoles(input *ListAccountRolesInput) (*ListAccountRolesOutput, error) {
|
||||
|
@ -212,15 +212,14 @@ func (c *SSO) ListAccountRolesWithContext(ctx aws.Context, input *ListAccountRol
|
|||
//
|
||||
// Note: This operation can generate multiple requests to a service.
|
||||
//
|
||||
// // Example iterating over at most 3 pages of a ListAccountRoles operation.
|
||||
// pageNum := 0
|
||||
// err := client.ListAccountRolesPages(params,
|
||||
// func(page *sso.ListAccountRolesOutput, lastPage bool) bool {
|
||||
// pageNum++
|
||||
// fmt.Println(page)
|
||||
// return pageNum <= 3
|
||||
// })
|
||||
//
|
||||
// // Example iterating over at most 3 pages of a ListAccountRoles operation.
|
||||
// pageNum := 0
|
||||
// err := client.ListAccountRolesPages(params,
|
||||
// func(page *sso.ListAccountRolesOutput, lastPage bool) bool {
|
||||
// pageNum++
|
||||
// fmt.Println(page)
|
||||
// return pageNum <= 3
|
||||
// })
|
||||
func (c *SSO) ListAccountRolesPages(input *ListAccountRolesInput, fn func(*ListAccountRolesOutput, bool) bool) error {
|
||||
return c.ListAccountRolesPagesWithContext(aws.BackgroundContext(), input, fn)
|
||||
}
|
||||
|
@ -272,14 +271,13 @@ const opListAccounts = "ListAccounts"
|
|||
// This method is useful when you want to inject custom logic or configuration
|
||||
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
|
||||
//
|
||||
// // Example sending a request using the ListAccountsRequest method.
|
||||
// req, resp := client.ListAccountsRequest(params)
|
||||
//
|
||||
// // Example sending a request using the ListAccountsRequest method.
|
||||
// req, resp := client.ListAccountsRequest(params)
|
||||
//
|
||||
// err := req.Send()
|
||||
// if err == nil { // resp is now filled
|
||||
// fmt.Println(resp)
|
||||
// }
|
||||
// err := req.Send()
|
||||
// if err == nil { // resp is now filled
|
||||
// fmt.Println(resp)
|
||||
// }
|
||||
//
|
||||
// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/ListAccounts
|
||||
func (c *SSO) ListAccountsRequest(input *ListAccountsInput) (req *request.Request, output *ListAccountsOutput) {
|
||||
|
@ -310,7 +308,8 @@ func (c *SSO) ListAccountsRequest(input *ListAccountsInput) (req *request.Reques
|
|||
// Lists all AWS accounts assigned to the user. These AWS accounts are assigned
|
||||
// by the administrator of the account. For more information, see Assign User
|
||||
// Access (https://docs.aws.amazon.com/singlesignon/latest/userguide/useraccess.html#assignusers)
|
||||
// in the AWS SSO User Guide. This operation returns a paginated response.
|
||||
// in the IAM Identity Center User Guide. This operation returns a paginated
|
||||
// response.
|
||||
//
|
||||
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
|
||||
// with awserr.Error's Code and Message methods to get detailed information about
|
||||
|
@ -320,20 +319,21 @@ func (c *SSO) ListAccountsRequest(input *ListAccountsInput) (req *request.Reques
|
|||
// API operation ListAccounts for usage and error information.
|
||||
//
|
||||
// Returned Error Types:
|
||||
// * InvalidRequestException
|
||||
// Indicates that a problem occurred with the input to the request. For example,
|
||||
// a required parameter might be missing or out of range.
|
||||
//
|
||||
// * UnauthorizedException
|
||||
// Indicates that the request is not authorized. This can happen due to an invalid
|
||||
// access token in the request.
|
||||
// - InvalidRequestException
|
||||
// Indicates that a problem occurred with the input to the request. For example,
|
||||
// a required parameter might be missing or out of range.
|
||||
//
|
||||
// * TooManyRequestsException
|
||||
// Indicates that the request is being made too frequently and is more than
|
||||
// what the server can handle.
|
||||
// - UnauthorizedException
|
||||
// Indicates that the request is not authorized. This can happen due to an invalid
|
||||
// access token in the request.
|
||||
//
|
||||
// * ResourceNotFoundException
|
||||
// The specified resource doesn't exist.
|
||||
// - TooManyRequestsException
|
||||
// Indicates that the request is being made too frequently and is more than
|
||||
// what the server can handle.
|
||||
//
|
||||
// - ResourceNotFoundException
|
||||
// The specified resource doesn't exist.
|
||||
//
|
||||
// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/ListAccounts
|
||||
func (c *SSO) ListAccounts(input *ListAccountsInput) (*ListAccountsOutput, error) {
|
||||
|
@ -365,15 +365,14 @@ func (c *SSO) ListAccountsWithContext(ctx aws.Context, input *ListAccountsInput,
|
|||
//
|
||||
// Note: This operation can generate multiple requests to a service.
|
||||
//
|
||||
// // Example iterating over at most 3 pages of a ListAccounts operation.
|
||||
// pageNum := 0
|
||||
// err := client.ListAccountsPages(params,
|
||||
// func(page *sso.ListAccountsOutput, lastPage bool) bool {
|
||||
// pageNum++
|
||||
// fmt.Println(page)
|
||||
// return pageNum <= 3
|
||||
// })
|
||||
//
|
||||
// // Example iterating over at most 3 pages of a ListAccounts operation.
|
||||
// pageNum := 0
|
||||
// err := client.ListAccountsPages(params,
|
||||
// func(page *sso.ListAccountsOutput, lastPage bool) bool {
|
||||
// pageNum++
|
||||
// fmt.Println(page)
|
||||
// return pageNum <= 3
|
||||
// })
|
||||
func (c *SSO) ListAccountsPages(input *ListAccountsInput, fn func(*ListAccountsOutput, bool) bool) error {
|
||||
return c.ListAccountsPagesWithContext(aws.BackgroundContext(), input, fn)
|
||||
}
|
||||
|
@ -425,14 +424,13 @@ const opLogout = "Logout"
|
|||
// This method is useful when you want to inject custom logic or configuration
|
||||
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
|
||||
//
|
||||
// // Example sending a request using the LogoutRequest method.
|
||||
// req, resp := client.LogoutRequest(params)
|
||||
//
|
||||
// // Example sending a request using the LogoutRequest method.
|
||||
// req, resp := client.LogoutRequest(params)
|
||||
//
|
||||
// err := req.Send()
|
||||
// if err == nil { // resp is now filled
|
||||
// fmt.Println(resp)
|
||||
// }
|
||||
// err := req.Send()
|
||||
// if err == nil { // resp is now filled
|
||||
// fmt.Println(resp)
|
||||
// }
|
||||
//
|
||||
// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/Logout
|
||||
func (c *SSO) LogoutRequest(input *LogoutInput) (req *request.Request, output *LogoutOutput) {
|
||||
|
@ -455,7 +453,21 @@ func (c *SSO) LogoutRequest(input *LogoutInput) (req *request.Request, output *L
|
|||
|
||||
// Logout API operation for AWS Single Sign-On.
|
||||
//
|
||||
// Removes the client- and server-side session that is associated with the user.
|
||||
// Removes the locally stored SSO tokens from the client-side cache and sends
|
||||
// an API call to the IAM Identity Center service to invalidate the corresponding
|
||||
// server-side IAM Identity Center sign in session.
|
||||
//
|
||||
// If a user uses IAM Identity Center to access the AWS CLI, the user’s IAM
|
||||
// Identity Center sign in session is used to obtain an IAM session, as specified
|
||||
// in the corresponding IAM Identity Center permission set. More specifically,
|
||||
// IAM Identity Center assumes an IAM role in the target account on behalf of
|
||||
// the user, and the corresponding temporary AWS credentials are returned to
|
||||
// the client.
|
||||
//
|
||||
// After user logout, any existing IAM role sessions that were created by using
|
||||
// IAM Identity Center permission sets continue based on the duration configured
|
||||
// in the permission set. For more information, see User authentications (https://docs.aws.amazon.com/singlesignon/latest/userguide/authconcept.html)
|
||||
// in the IAM Identity Center User Guide.
|
||||
//
|
||||
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
|
||||
// with awserr.Error's Code and Message methods to get detailed information about
|
||||
|
@ -465,17 +477,18 @@ func (c *SSO) LogoutRequest(input *LogoutInput) (req *request.Request, output *L
|
|||
// API operation Logout for usage and error information.
|
||||
//
|
||||
// Returned Error Types:
|
||||
// * InvalidRequestException
|
||||
// Indicates that a problem occurred with the input to the request. For example,
|
||||
// a required parameter might be missing or out of range.
|
||||
//
|
||||
// * UnauthorizedException
|
||||
// Indicates that the request is not authorized. This can happen due to an invalid
|
||||
// access token in the request.
|
||||
// - InvalidRequestException
|
||||
// Indicates that a problem occurred with the input to the request. For example,
|
||||
// a required parameter might be missing or out of range.
|
||||
//
|
||||
// * TooManyRequestsException
|
||||
// Indicates that the request is being made too frequently and is more than
|
||||
// what the server can handle.
|
||||
// - UnauthorizedException
|
||||
// Indicates that the request is not authorized. This can happen due to an invalid
|
||||
// access token in the request.
|
||||
//
|
||||
// - TooManyRequestsException
|
||||
// Indicates that the request is being made too frequently and is more than
|
||||
// what the server can handle.
|
||||
//
|
||||
// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/Logout
|
||||
func (c *SSO) Logout(input *LogoutInput) (*LogoutOutput, error) {
|
||||
|
@ -554,7 +567,7 @@ type GetRoleCredentialsInput struct {
|
|||
|
||||
// The token issued by the CreateToken API call. For more information, see CreateToken
|
||||
// (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html)
|
||||
// in the AWS SSO OIDC API Reference Guide.
|
||||
// in the IAM Identity Center OIDC API Reference Guide.
|
||||
//
|
||||
// AccessToken is a sensitive parameter and its value will be
|
||||
// replaced with "sensitive" in string returned by GetRoleCredentialsInput's
|
||||
|
@ -730,7 +743,7 @@ type ListAccountRolesInput struct {
|
|||
|
||||
// The token issued by the CreateToken API call. For more information, see CreateToken
|
||||
// (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html)
|
||||
// in the AWS SSO OIDC API Reference Guide.
|
||||
// in the IAM Identity Center OIDC API Reference Guide.
|
||||
//
|
||||
// AccessToken is a sensitive parameter and its value will be
|
||||
// replaced with "sensitive" in string returned by ListAccountRolesInput's
|
||||
|
@ -859,7 +872,7 @@ type ListAccountsInput struct {
|
|||
|
||||
// The token issued by the CreateToken API call. For more information, see CreateToken
|
||||
// (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html)
|
||||
// in the AWS SSO OIDC API Reference Guide.
|
||||
// in the IAM Identity Center OIDC API Reference Guide.
|
||||
//
|
||||
// AccessToken is a sensitive parameter and its value will be
|
||||
// replaced with "sensitive" in string returned by ListAccountsInput's
|
||||
|
@ -974,7 +987,7 @@ type LogoutInput struct {
|
|||
|
||||
// The token issued by the CreateToken API call. For more information, see CreateToken
|
||||
// (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html)
|
||||
// in the AWS SSO OIDC API Reference Guide.
|
||||
// in the IAM Identity Center OIDC API Reference Guide.
|
||||
//
|
||||
// AccessToken is a sensitive parameter and its value will be
|
||||
// replaced with "sensitive" in string returned by LogoutInput's
|
||||
|
|
27
vendor/github.com/aws/aws-sdk-go/service/sso/doc.go
generated
vendored
27
vendor/github.com/aws/aws-sdk-go/service/sso/doc.go
generated
vendored
|
@ -3,30 +3,31 @@
|
|||
// Package sso provides the client and types for making API
|
||||
// requests to AWS Single Sign-On.
|
||||
//
|
||||
// AWS Single Sign-On Portal is a web service that makes it easy for you to
|
||||
// assign user access to AWS SSO resources such as the user portal. Users can
|
||||
// get AWS account applications and roles assigned to them and get federated
|
||||
// into the application.
|
||||
// AWS IAM Identity Center (successor to AWS Single Sign-On) Portal is a web
|
||||
// service that makes it easy for you to assign user access to IAM Identity
|
||||
// Center resources such as the AWS access portal. Users can get AWS account
|
||||
// applications and roles assigned to them and get federated into the application.
|
||||
//
|
||||
// For general information about AWS SSO, see What is AWS Single Sign-On? (https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html)
|
||||
// in the AWS SSO User Guide.
|
||||
// Although AWS Single Sign-On was renamed, the sso and identitystore API namespaces
|
||||
// will continue to retain their original name for backward compatibility purposes.
|
||||
// For more information, see IAM Identity Center rename (https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html#renamed).
|
||||
//
|
||||
// This API reference guide describes the AWS SSO Portal operations that you
|
||||
// can call programatically and includes detailed information on data types
|
||||
// and errors.
|
||||
// This reference guide describes the IAM Identity Center Portal operations
|
||||
// that you can call programatically and includes detailed information on data
|
||||
// types and errors.
|
||||
//
|
||||
// AWS provides SDKs that consist of libraries and sample code for various programming
|
||||
// languages and platforms, such as Java, Ruby, .Net, iOS, or Android. The SDKs
|
||||
// provide a convenient way to create programmatic access to AWS SSO and other
|
||||
// AWS services. For more information about the AWS SDKs, including how to download
|
||||
// and install them, see Tools for Amazon Web Services (http://aws.amazon.com/tools/).
|
||||
// provide a convenient way to create programmatic access to IAM Identity Center
|
||||
// and other AWS services. For more information about the AWS SDKs, including
|
||||
// how to download and install them, see Tools for Amazon Web Services (http://aws.amazon.com/tools/).
|
||||
//
|
||||
// See https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10 for more information on this service.
|
||||
//
|
||||
// See sso package documentation for more information.
|
||||
// https://docs.aws.amazon.com/sdk-for-go/api/service/sso/
|
||||
//
|
||||
// Using the Client
|
||||
// # Using the Client
|
||||
//
|
||||
// To contact AWS Single Sign-On with the SDK use the New function to create
|
||||
// a new service client. With that client you can make API requests to the service.
|
||||
|
|
11
vendor/github.com/aws/aws-sdk-go/service/sso/service.go
generated
vendored
11
vendor/github.com/aws/aws-sdk-go/service/sso/service.go
generated
vendored
|
@ -40,13 +40,14 @@ const (
|
|||
// aws.Config parameter to add your extra config.
|
||||
//
|
||||
// Example:
|
||||
// mySession := session.Must(session.NewSession())
|
||||
//
|
||||
// // Create a SSO client from just a session.
|
||||
// svc := sso.New(mySession)
|
||||
// mySession := session.Must(session.NewSession())
|
||||
//
|
||||
// // Create a SSO client with additional configuration
|
||||
// svc := sso.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
|
||||
// // Create a SSO client from just a session.
|
||||
// svc := sso.New(mySession)
|
||||
//
|
||||
// // Create a SSO client with additional configuration
|
||||
// svc := sso.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
|
||||
func New(p client.ConfigProvider, cfgs ...*aws.Config) *SSO {
|
||||
c := p.ClientConfig(EndpointsID, cfgs...)
|
||||
if c.SigningNameDerived || len(c.SigningName) == 0 {
|
||||
|
|
46
vendor/github.com/aws/aws-sdk-go/service/sso/ssoiface/interface.go
generated
vendored
46
vendor/github.com/aws/aws-sdk-go/service/sso/ssoiface/interface.go
generated
vendored
|
@ -23,37 +23,37 @@ import (
|
|||
// can be stubbed out for unit testing your code with the SDK without needing
|
||||
// to inject custom request handlers into the SDK's request pipeline.
|
||||
//
|
||||
// // myFunc uses an SDK service client to make a request to
|
||||
// // AWS Single Sign-On.
|
||||
// func myFunc(svc ssoiface.SSOAPI) bool {
|
||||
// // Make svc.GetRoleCredentials request
|
||||
// }
|
||||
// // myFunc uses an SDK service client to make a request to
|
||||
// // AWS Single Sign-On.
|
||||
// func myFunc(svc ssoiface.SSOAPI) bool {
|
||||
// // Make svc.GetRoleCredentials request
|
||||
// }
|
||||
//
|
||||
// func main() {
|
||||
// sess := session.New()
|
||||
// svc := sso.New(sess)
|
||||
// func main() {
|
||||
// sess := session.New()
|
||||
// svc := sso.New(sess)
|
||||
//
|
||||
// myFunc(svc)
|
||||
// }
|
||||
// myFunc(svc)
|
||||
// }
|
||||
//
|
||||
// In your _test.go file:
|
||||
//
|
||||
// // Define a mock struct to be used in your unit tests of myFunc.
|
||||
// type mockSSOClient struct {
|
||||
// ssoiface.SSOAPI
|
||||
// }
|
||||
// func (m *mockSSOClient) GetRoleCredentials(input *sso.GetRoleCredentialsInput) (*sso.GetRoleCredentialsOutput, error) {
|
||||
// // mock response/functionality
|
||||
// }
|
||||
// // Define a mock struct to be used in your unit tests of myFunc.
|
||||
// type mockSSOClient struct {
|
||||
// ssoiface.SSOAPI
|
||||
// }
|
||||
// func (m *mockSSOClient) GetRoleCredentials(input *sso.GetRoleCredentialsInput) (*sso.GetRoleCredentialsOutput, error) {
|
||||
// // mock response/functionality
|
||||
// }
|
||||
//
|
||||
// func TestMyFunc(t *testing.T) {
|
||||
// // Setup Test
|
||||
// mockSvc := &mockSSOClient{}
|
||||
// func TestMyFunc(t *testing.T) {
|
||||
// // Setup Test
|
||||
// mockSvc := &mockSSOClient{}
|
||||
//
|
||||
// myfunc(mockSvc)
|
||||
// myfunc(mockSvc)
|
||||
//
|
||||
// // Verify myFunc's functionality
|
||||
// }
|
||||
// // Verify myFunc's functionality
|
||||
// }
|
||||
//
|
||||
// It is important to note that this interface will have breaking changes
|
||||
// when the service model is updated and adds new API operations, paginators,
|
||||
|
|
484
vendor/github.com/aws/aws-sdk-go/service/sts/api.go
generated
vendored
484
vendor/github.com/aws/aws-sdk-go/service/sts/api.go
generated
vendored
|
@ -28,14 +28,13 @@ const opAssumeRole = "AssumeRole"
|
|||
// This method is useful when you want to inject custom logic or configuration
|
||||
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
|
||||
//
|
||||
// // Example sending a request using the AssumeRoleRequest method.
|
||||
// req, resp := client.AssumeRoleRequest(params)
|
||||
//
|
||||
// // Example sending a request using the AssumeRoleRequest method.
|
||||
// req, resp := client.AssumeRoleRequest(params)
|
||||
//
|
||||
// err := req.Send()
|
||||
// if err == nil { // resp is now filled
|
||||
// fmt.Println(resp)
|
||||
// }
|
||||
// err := req.Send()
|
||||
// if err == nil { // resp is now filled
|
||||
// fmt.Println(resp)
|
||||
// }
|
||||
//
|
||||
// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole
|
||||
func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, output *AssumeRoleOutput) {
|
||||
|
@ -66,7 +65,7 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o
|
|||
// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
|
||||
// in the IAM User Guide.
|
||||
//
|
||||
// Permissions
|
||||
// # Permissions
|
||||
//
|
||||
// The temporary security credentials created by AssumeRole can be used to make
|
||||
// API calls to any Amazon Web Services service with the following exception:
|
||||
|
@ -105,10 +104,10 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o
|
|||
// To allow a user to assume a role in the same account, you can do either of
|
||||
// the following:
|
||||
//
|
||||
// * Attach a policy to the user that allows the user to call AssumeRole
|
||||
// (as long as the role's trust policy trusts the account).
|
||||
// - Attach a policy to the user that allows the user to call AssumeRole
|
||||
// (as long as the role's trust policy trusts the account).
|
||||
//
|
||||
// * Add the user as a principal directly in the role's trust policy.
|
||||
// - Add the user as a principal directly in the role's trust policy.
|
||||
//
|
||||
// You can do either because the role’s trust policy acts as an IAM resource-based
|
||||
// policy. When a resource-based policy grants access to a principal in the
|
||||
|
@ -116,7 +115,7 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o
|
|||
// about trust policies and resource-based policies, see IAM Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html)
|
||||
// in the IAM User Guide.
|
||||
//
|
||||
// Tags
|
||||
// # Tags
|
||||
//
|
||||
// (Optional) You can pass tag key-value pairs to your session. These tags are
|
||||
// called session tags. For more information about session tags, see Passing
|
||||
|
@ -134,7 +133,7 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o
|
|||
// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining)
|
||||
// in the IAM User Guide.
|
||||
//
|
||||
// Using MFA with AssumeRole
|
||||
// # Using MFA with AssumeRole
|
||||
//
|
||||
// (Optional) You can include multi-factor authentication (MFA) information
|
||||
// when you call AssumeRole. This is useful for cross-account scenarios to ensure
|
||||
|
@ -163,35 +162,36 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o
|
|||
// API operation AssumeRole for usage and error information.
|
||||
//
|
||||
// Returned Error Codes:
|
||||
// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument"
|
||||
// The request was rejected because the policy document was malformed. The error
|
||||
// message describes the specific error.
|
||||
//
|
||||
// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge"
|
||||
// The request was rejected because the total packed size of the session policies
|
||||
// and session tags combined was too large. An Amazon Web Services conversion
|
||||
// compresses the session policy document, session policy ARNs, and session
|
||||
// tags into a packed binary format that has a separate limit. The error message
|
||||
// indicates by percentage how close the policies and tags are to the upper
|
||||
// size limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html)
|
||||
// in the IAM User Guide.
|
||||
// - ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument"
|
||||
// The request was rejected because the policy document was malformed. The error
|
||||
// message describes the specific error.
|
||||
//
|
||||
// You could receive this error even though you meet other defined session policy
|
||||
// and session tag limits. For more information, see IAM and STS Entity Character
|
||||
// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length)
|
||||
// in the IAM User Guide.
|
||||
// - ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge"
|
||||
// The request was rejected because the total packed size of the session policies
|
||||
// and session tags combined was too large. An Amazon Web Services conversion
|
||||
// compresses the session policy document, session policy ARNs, and session
|
||||
// tags into a packed binary format that has a separate limit. The error message
|
||||
// indicates by percentage how close the policies and tags are to the upper
|
||||
// size limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html)
|
||||
// in the IAM User Guide.
|
||||
//
|
||||
// * ErrCodeRegionDisabledException "RegionDisabledException"
|
||||
// STS is not activated in the requested region for the account that is being
|
||||
// asked to generate credentials. The account administrator must use the IAM
|
||||
// console to activate STS in that region. For more information, see Activating
|
||||
// and Deactivating Amazon Web Services STS in an Amazon Web Services Region
|
||||
// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
|
||||
// in the IAM User Guide.
|
||||
// You could receive this error even though you meet other defined session policy
|
||||
// and session tag limits. For more information, see IAM and STS Entity Character
|
||||
// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length)
|
||||
// in the IAM User Guide.
|
||||
//
|
||||
// * ErrCodeExpiredTokenException "ExpiredTokenException"
|
||||
// The web identity token that was passed is expired or is not valid. Get a
|
||||
// new identity token from the identity provider and then retry the request.
|
||||
// - ErrCodeRegionDisabledException "RegionDisabledException"
|
||||
// STS is not activated in the requested region for the account that is being
|
||||
// asked to generate credentials. The account administrator must use the IAM
|
||||
// console to activate STS in that region. For more information, see Activating
|
||||
// and Deactivating Amazon Web Services STS in an Amazon Web Services Region
|
||||
// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
|
||||
// in the IAM User Guide.
|
||||
//
|
||||
// - ErrCodeExpiredTokenException "ExpiredTokenException"
|
||||
// The web identity token that was passed is expired or is not valid. Get a
|
||||
// new identity token from the identity provider and then retry the request.
|
||||
//
|
||||
// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole
|
||||
func (c *STS) AssumeRole(input *AssumeRoleInput) (*AssumeRoleOutput, error) {
|
||||
|
@ -231,14 +231,13 @@ const opAssumeRoleWithSAML = "AssumeRoleWithSAML"
|
|||
// This method is useful when you want to inject custom logic or configuration
|
||||
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
|
||||
//
|
||||
// // Example sending a request using the AssumeRoleWithSAMLRequest method.
|
||||
// req, resp := client.AssumeRoleWithSAMLRequest(params)
|
||||
//
|
||||
// // Example sending a request using the AssumeRoleWithSAMLRequest method.
|
||||
// req, resp := client.AssumeRoleWithSAMLRequest(params)
|
||||
//
|
||||
// err := req.Send()
|
||||
// if err == nil { // resp is now filled
|
||||
// fmt.Println(resp)
|
||||
// }
|
||||
// err := req.Send()
|
||||
// if err == nil { // resp is now filled
|
||||
// fmt.Println(resp)
|
||||
// }
|
||||
//
|
||||
// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML
|
||||
func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *request.Request, output *AssumeRoleWithSAMLOutput) {
|
||||
|
@ -274,7 +273,7 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re
|
|||
// can use these temporary security credentials to sign calls to Amazon Web
|
||||
// Services services.
|
||||
//
|
||||
// Session Duration
|
||||
// # Session Duration
|
||||
//
|
||||
// By default, the temporary security credentials created by AssumeRoleWithSAML
|
||||
// last for one hour. However, you can use the optional DurationSeconds parameter
|
||||
|
@ -300,7 +299,7 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re
|
|||
// a role using role chaining and provide a DurationSeconds parameter value
|
||||
// greater than one hour, the operation fails.
|
||||
//
|
||||
// Permissions
|
||||
// # Permissions
|
||||
//
|
||||
// The temporary security credentials created by AssumeRoleWithSAML can be used
|
||||
// to make API calls to any Amazon Web Services service with the following exception:
|
||||
|
@ -331,7 +330,7 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re
|
|||
// identifiable information (PII). For example, you could instead use the persistent
|
||||
// identifier (urn:oasis:names:tc:SAML:2.0:nameid-format:persistent).
|
||||
//
|
||||
// Tags
|
||||
// # Tags
|
||||
//
|
||||
// (Optional) You can configure your IdP to pass attributes into your SAML assertion
|
||||
// as session tags. Each session tag consists of a key name and an associated
|
||||
|
@ -365,7 +364,7 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re
|
|||
// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining)
|
||||
// in the IAM User Guide.
|
||||
//
|
||||
// SAML Configuration
|
||||
// # SAML Configuration
|
||||
//
|
||||
// Before your application can call AssumeRoleWithSAML, you must configure your
|
||||
// SAML identity provider (IdP) to issue the claims required by Amazon Web Services.
|
||||
|
@ -376,17 +375,17 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re
|
|||
//
|
||||
// For more information, see the following resources:
|
||||
//
|
||||
// * About SAML 2.0-based Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html)
|
||||
// in the IAM User Guide.
|
||||
// - About SAML 2.0-based Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html)
|
||||
// in the IAM User Guide.
|
||||
//
|
||||
// * Creating SAML Identity Providers (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html)
|
||||
// in the IAM User Guide.
|
||||
// - Creating SAML Identity Providers (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html)
|
||||
// in the IAM User Guide.
|
||||
//
|
||||
// * Configuring a Relying Party and Claims (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html)
|
||||
// in the IAM User Guide.
|
||||
// - Configuring a Relying Party and Claims (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html)
|
||||
// in the IAM User Guide.
|
||||
//
|
||||
// * Creating a Role for SAML 2.0 Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html)
|
||||
// in the IAM User Guide.
|
||||
// - Creating a Role for SAML 2.0 Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html)
|
||||
// in the IAM User Guide.
|
||||
//
|
||||
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
|
||||
// with awserr.Error's Code and Message methods to get detailed information about
|
||||
|
@ -396,47 +395,48 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re
|
|||
// API operation AssumeRoleWithSAML for usage and error information.
|
||||
//
|
||||
// Returned Error Codes:
|
||||
// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument"
|
||||
// The request was rejected because the policy document was malformed. The error
|
||||
// message describes the specific error.
|
||||
//
|
||||
// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge"
|
||||
// The request was rejected because the total packed size of the session policies
|
||||
// and session tags combined was too large. An Amazon Web Services conversion
|
||||
// compresses the session policy document, session policy ARNs, and session
|
||||
// tags into a packed binary format that has a separate limit. The error message
|
||||
// indicates by percentage how close the policies and tags are to the upper
|
||||
// size limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html)
|
||||
// in the IAM User Guide.
|
||||
// - ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument"
|
||||
// The request was rejected because the policy document was malformed. The error
|
||||
// message describes the specific error.
|
||||
//
|
||||
// You could receive this error even though you meet other defined session policy
|
||||
// and session tag limits. For more information, see IAM and STS Entity Character
|
||||
// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length)
|
||||
// in the IAM User Guide.
|
||||
// - ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge"
|
||||
// The request was rejected because the total packed size of the session policies
|
||||
// and session tags combined was too large. An Amazon Web Services conversion
|
||||
// compresses the session policy document, session policy ARNs, and session
|
||||
// tags into a packed binary format that has a separate limit. The error message
|
||||
// indicates by percentage how close the policies and tags are to the upper
|
||||
// size limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html)
|
||||
// in the IAM User Guide.
|
||||
//
|
||||
// * ErrCodeIDPRejectedClaimException "IDPRejectedClaim"
|
||||
// The identity provider (IdP) reported that authentication failed. This might
|
||||
// be because the claim is invalid.
|
||||
// You could receive this error even though you meet other defined session policy
|
||||
// and session tag limits. For more information, see IAM and STS Entity Character
|
||||
// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length)
|
||||
// in the IAM User Guide.
|
||||
//
|
||||
// If this error is returned for the AssumeRoleWithWebIdentity operation, it
|
||||
// can also mean that the claim has expired or has been explicitly revoked.
|
||||
// - ErrCodeIDPRejectedClaimException "IDPRejectedClaim"
|
||||
// The identity provider (IdP) reported that authentication failed. This might
|
||||
// be because the claim is invalid.
|
||||
//
|
||||
// * ErrCodeInvalidIdentityTokenException "InvalidIdentityToken"
|
||||
// The web identity token that was passed could not be validated by Amazon Web
|
||||
// Services. Get a new identity token from the identity provider and then retry
|
||||
// the request.
|
||||
// If this error is returned for the AssumeRoleWithWebIdentity operation, it
|
||||
// can also mean that the claim has expired or has been explicitly revoked.
|
||||
//
|
||||
// * ErrCodeExpiredTokenException "ExpiredTokenException"
|
||||
// The web identity token that was passed is expired or is not valid. Get a
|
||||
// new identity token from the identity provider and then retry the request.
|
||||
// - ErrCodeInvalidIdentityTokenException "InvalidIdentityToken"
|
||||
// The web identity token that was passed could not be validated by Amazon Web
|
||||
// Services. Get a new identity token from the identity provider and then retry
|
||||
// the request.
|
||||
//
|
||||
// * ErrCodeRegionDisabledException "RegionDisabledException"
|
||||
// STS is not activated in the requested region for the account that is being
|
||||
// asked to generate credentials. The account administrator must use the IAM
|
||||
// console to activate STS in that region. For more information, see Activating
|
||||
// and Deactivating Amazon Web Services STS in an Amazon Web Services Region
|
||||
// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
|
||||
// in the IAM User Guide.
|
||||
// - ErrCodeExpiredTokenException "ExpiredTokenException"
|
||||
// The web identity token that was passed is expired or is not valid. Get a
|
||||
// new identity token from the identity provider and then retry the request.
|
||||
//
|
||||
// - ErrCodeRegionDisabledException "RegionDisabledException"
|
||||
// STS is not activated in the requested region for the account that is being
|
||||
// asked to generate credentials. The account administrator must use the IAM
|
||||
// console to activate STS in that region. For more information, see Activating
|
||||
// and Deactivating Amazon Web Services STS in an Amazon Web Services Region
|
||||
// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
|
||||
// in the IAM User Guide.
|
||||
//
|
||||
// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML
|
||||
func (c *STS) AssumeRoleWithSAML(input *AssumeRoleWithSAMLInput) (*AssumeRoleWithSAMLOutput, error) {
|
||||
|
@ -476,14 +476,13 @@ const opAssumeRoleWithWebIdentity = "AssumeRoleWithWebIdentity"
|
|||
// This method is useful when you want to inject custom logic or configuration
|
||||
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
|
||||
//
|
||||
// // Example sending a request using the AssumeRoleWithWebIdentityRequest method.
|
||||
// req, resp := client.AssumeRoleWithWebIdentityRequest(params)
|
||||
//
|
||||
// // Example sending a request using the AssumeRoleWithWebIdentityRequest method.
|
||||
// req, resp := client.AssumeRoleWithWebIdentityRequest(params)
|
||||
//
|
||||
// err := req.Send()
|
||||
// if err == nil { // resp is now filled
|
||||
// fmt.Println(resp)
|
||||
// }
|
||||
// err := req.Send()
|
||||
// if err == nil { // resp is now filled
|
||||
// fmt.Println(resp)
|
||||
// }
|
||||
//
|
||||
// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity
|
||||
func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityInput) (req *request.Request, output *AssumeRoleWithWebIdentityOutput) {
|
||||
|
@ -540,7 +539,7 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI
|
|||
// temporary security credentials to sign calls to Amazon Web Services service
|
||||
// API operations.
|
||||
//
|
||||
// Session Duration
|
||||
// # Session Duration
|
||||
//
|
||||
// By default, the temporary security credentials created by AssumeRoleWithWebIdentity
|
||||
// last for one hour. However, you can use the optional DurationSeconds parameter
|
||||
|
@ -555,7 +554,7 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI
|
|||
// URL. For more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html)
|
||||
// in the IAM User Guide.
|
||||
//
|
||||
// Permissions
|
||||
// # Permissions
|
||||
//
|
||||
// The temporary security credentials created by AssumeRoleWithWebIdentity can
|
||||
// be used to make API calls to any Amazon Web Services service with the following
|
||||
|
@ -576,7 +575,7 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI
|
|||
// For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
|
||||
// in the IAM User Guide.
|
||||
//
|
||||
// Tags
|
||||
// # Tags
|
||||
//
|
||||
// (Optional) You can configure your IdP to pass attributes into your web identity
|
||||
// token as session tags. Each session tag consists of a key name and an associated
|
||||
|
@ -610,7 +609,7 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI
|
|||
// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining)
|
||||
// in the IAM User Guide.
|
||||
//
|
||||
// Identities
|
||||
// # Identities
|
||||
//
|
||||
// Before your application can call AssumeRoleWithWebIdentity, you must have
|
||||
// an identity token from a supported identity provider and create a role that
|
||||
|
@ -628,24 +627,24 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI
|
|||
// For more information about how to use web identity federation and the AssumeRoleWithWebIdentity
|
||||
// API, see the following resources:
|
||||
//
|
||||
// * Using Web Identity Federation API Operations for Mobile Apps (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html)
|
||||
// and Federation Through a Web-based Identity Provider (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity).
|
||||
// - Using Web Identity Federation API Operations for Mobile Apps (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html)
|
||||
// and Federation Through a Web-based Identity Provider (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity).
|
||||
//
|
||||
// * Web Identity Federation Playground (https://aws.amazon.com/blogs/aws/the-aws-web-identity-federation-playground/).
|
||||
// Walk through the process of authenticating through Login with Amazon,
|
||||
// Facebook, or Google, getting temporary security credentials, and then
|
||||
// using those credentials to make a request to Amazon Web Services.
|
||||
// - Web Identity Federation Playground (https://aws.amazon.com/blogs/aws/the-aws-web-identity-federation-playground/).
|
||||
// Walk through the process of authenticating through Login with Amazon,
|
||||
// Facebook, or Google, getting temporary security credentials, and then
|
||||
// using those credentials to make a request to Amazon Web Services.
|
||||
//
|
||||
// * Amazon Web Services SDK for iOS Developer Guide (http://aws.amazon.com/sdkforios/)
|
||||
// and Amazon Web Services SDK for Android Developer Guide (http://aws.amazon.com/sdkforandroid/).
|
||||
// These toolkits contain sample apps that show how to invoke the identity
|
||||
// providers. The toolkits then show how to use the information from these
|
||||
// providers to get and use temporary security credentials.
|
||||
// - Amazon Web Services SDK for iOS Developer Guide (http://aws.amazon.com/sdkforios/)
|
||||
// and Amazon Web Services SDK for Android Developer Guide (http://aws.amazon.com/sdkforandroid/).
|
||||
// These toolkits contain sample apps that show how to invoke the identity
|
||||
// providers. The toolkits then show how to use the information from these
|
||||
// providers to get and use temporary security credentials.
|
||||
//
|
||||
// * Web Identity Federation with Mobile Applications (http://aws.amazon.com/articles/web-identity-federation-with-mobile-applications).
|
||||
// This article discusses web identity federation and shows an example of
|
||||
// how to use web identity federation to get access to content in Amazon
|
||||
// S3.
|
||||
// - Web Identity Federation with Mobile Applications (http://aws.amazon.com/articles/web-identity-federation-with-mobile-applications).
|
||||
// This article discusses web identity federation and shows an example of
|
||||
// how to use web identity federation to get access to content in Amazon
|
||||
// S3.
|
||||
//
|
||||
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
|
||||
// with awserr.Error's Code and Message methods to get detailed information about
|
||||
|
@ -655,54 +654,55 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI
|
|||
// API operation AssumeRoleWithWebIdentity for usage and error information.
|
||||
//
|
||||
// Returned Error Codes:
|
||||
// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument"
|
||||
// The request was rejected because the policy document was malformed. The error
|
||||
// message describes the specific error.
|
||||
//
|
||||
// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge"
|
||||
// The request was rejected because the total packed size of the session policies
|
||||
// and session tags combined was too large. An Amazon Web Services conversion
|
||||
// compresses the session policy document, session policy ARNs, and session
|
||||
// tags into a packed binary format that has a separate limit. The error message
|
||||
// indicates by percentage how close the policies and tags are to the upper
|
||||
// size limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html)
|
||||
// in the IAM User Guide.
|
||||
// - ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument"
|
||||
// The request was rejected because the policy document was malformed. The error
|
||||
// message describes the specific error.
|
||||
//
|
||||
// You could receive this error even though you meet other defined session policy
|
||||
// and session tag limits. For more information, see IAM and STS Entity Character
|
||||
// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length)
|
||||
// in the IAM User Guide.
|
||||
// - ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge"
|
||||
// The request was rejected because the total packed size of the session policies
|
||||
// and session tags combined was too large. An Amazon Web Services conversion
|
||||
// compresses the session policy document, session policy ARNs, and session
|
||||
// tags into a packed binary format that has a separate limit. The error message
|
||||
// indicates by percentage how close the policies and tags are to the upper
|
||||
// size limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html)
|
||||
// in the IAM User Guide.
|
||||
//
|
||||
// * ErrCodeIDPRejectedClaimException "IDPRejectedClaim"
|
||||
// The identity provider (IdP) reported that authentication failed. This might
|
||||
// be because the claim is invalid.
|
||||
// You could receive this error even though you meet other defined session policy
|
||||
// and session tag limits. For more information, see IAM and STS Entity Character
|
||||
// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length)
|
||||
// in the IAM User Guide.
|
||||
//
|
||||
// If this error is returned for the AssumeRoleWithWebIdentity operation, it
|
||||
// can also mean that the claim has expired or has been explicitly revoked.
|
||||
// - ErrCodeIDPRejectedClaimException "IDPRejectedClaim"
|
||||
// The identity provider (IdP) reported that authentication failed. This might
|
||||
// be because the claim is invalid.
|
||||
//
|
||||
// * ErrCodeIDPCommunicationErrorException "IDPCommunicationError"
|
||||
// The request could not be fulfilled because the identity provider (IDP) that
|
||||
// was asked to verify the incoming identity token could not be reached. This
|
||||
// is often a transient error caused by network conditions. Retry the request
|
||||
// a limited number of times so that you don't exceed the request rate. If the
|
||||
// error persists, the identity provider might be down or not responding.
|
||||
// If this error is returned for the AssumeRoleWithWebIdentity operation, it
|
||||
// can also mean that the claim has expired or has been explicitly revoked.
|
||||
//
|
||||
// * ErrCodeInvalidIdentityTokenException "InvalidIdentityToken"
|
||||
// The web identity token that was passed could not be validated by Amazon Web
|
||||
// Services. Get a new identity token from the identity provider and then retry
|
||||
// the request.
|
||||
// - ErrCodeIDPCommunicationErrorException "IDPCommunicationError"
|
||||
// The request could not be fulfilled because the identity provider (IDP) that
|
||||
// was asked to verify the incoming identity token could not be reached. This
|
||||
// is often a transient error caused by network conditions. Retry the request
|
||||
// a limited number of times so that you don't exceed the request rate. If the
|
||||
// error persists, the identity provider might be down or not responding.
|
||||
//
|
||||
// * ErrCodeExpiredTokenException "ExpiredTokenException"
|
||||
// The web identity token that was passed is expired or is not valid. Get a
|
||||
// new identity token from the identity provider and then retry the request.
|
||||
// - ErrCodeInvalidIdentityTokenException "InvalidIdentityToken"
|
||||
// The web identity token that was passed could not be validated by Amazon Web
|
||||
// Services. Get a new identity token from the identity provider and then retry
|
||||
// the request.
|
||||
//
|
||||
// * ErrCodeRegionDisabledException "RegionDisabledException"
|
||||
// STS is not activated in the requested region for the account that is being
|
||||
// asked to generate credentials. The account administrator must use the IAM
|
||||
// console to activate STS in that region. For more information, see Activating
|
||||
// and Deactivating Amazon Web Services STS in an Amazon Web Services Region
|
||||
// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
|
||||
// in the IAM User Guide.
|
||||
// - ErrCodeExpiredTokenException "ExpiredTokenException"
|
||||
// The web identity token that was passed is expired or is not valid. Get a
|
||||
// new identity token from the identity provider and then retry the request.
|
||||
//
|
||||
// - ErrCodeRegionDisabledException "RegionDisabledException"
|
||||
// STS is not activated in the requested region for the account that is being
|
||||
// asked to generate credentials. The account administrator must use the IAM
|
||||
// console to activate STS in that region. For more information, see Activating
|
||||
// and Deactivating Amazon Web Services STS in an Amazon Web Services Region
|
||||
// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
|
||||
// in the IAM User Guide.
|
||||
//
|
||||
// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity
|
||||
func (c *STS) AssumeRoleWithWebIdentity(input *AssumeRoleWithWebIdentityInput) (*AssumeRoleWithWebIdentityOutput, error) {
|
||||
|
@ -742,14 +742,13 @@ const opDecodeAuthorizationMessage = "DecodeAuthorizationMessage"
|
|||
// This method is useful when you want to inject custom logic or configuration
|
||||
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
|
||||
//
|
||||
// // Example sending a request using the DecodeAuthorizationMessageRequest method.
|
||||
// req, resp := client.DecodeAuthorizationMessageRequest(params)
|
||||
//
|
||||
// // Example sending a request using the DecodeAuthorizationMessageRequest method.
|
||||
// req, resp := client.DecodeAuthorizationMessageRequest(params)
|
||||
//
|
||||
// err := req.Send()
|
||||
// if err == nil { // resp is now filled
|
||||
// fmt.Println(resp)
|
||||
// }
|
||||
// err := req.Send()
|
||||
// if err == nil { // resp is now filled
|
||||
// fmt.Println(resp)
|
||||
// }
|
||||
//
|
||||
// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage
|
||||
func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessageInput) (req *request.Request, output *DecodeAuthorizationMessageOutput) {
|
||||
|
@ -793,18 +792,18 @@ func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessag
|
|||
//
|
||||
// The decoded message includes the following type of information:
|
||||
//
|
||||
// * Whether the request was denied due to an explicit deny or due to the
|
||||
// absence of an explicit allow. For more information, see Determining Whether
|
||||
// a Request is Allowed or Denied (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow)
|
||||
// in the IAM User Guide.
|
||||
// - Whether the request was denied due to an explicit deny or due to the
|
||||
// absence of an explicit allow. For more information, see Determining Whether
|
||||
// a Request is Allowed or Denied (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow)
|
||||
// in the IAM User Guide.
|
||||
//
|
||||
// * The principal who made the request.
|
||||
// - The principal who made the request.
|
||||
//
|
||||
// * The requested action.
|
||||
// - The requested action.
|
||||
//
|
||||
// * The requested resource.
|
||||
// - The requested resource.
|
||||
//
|
||||
// * The values of condition keys in the context of the user's request.
|
||||
// - The values of condition keys in the context of the user's request.
|
||||
//
|
||||
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
|
||||
// with awserr.Error's Code and Message methods to get detailed information about
|
||||
|
@ -814,10 +813,10 @@ func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessag
|
|||
// API operation DecodeAuthorizationMessage for usage and error information.
|
||||
//
|
||||
// Returned Error Codes:
|
||||
// * ErrCodeInvalidAuthorizationMessageException "InvalidAuthorizationMessageException"
|
||||
// The error returned if the message passed to DecodeAuthorizationMessage was
|
||||
// invalid. This can happen if the token contains invalid characters, such as
|
||||
// linebreaks.
|
||||
// - ErrCodeInvalidAuthorizationMessageException "InvalidAuthorizationMessageException"
|
||||
// The error returned if the message passed to DecodeAuthorizationMessage was
|
||||
// invalid. This can happen if the token contains invalid characters, such as
|
||||
// linebreaks.
|
||||
//
|
||||
// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage
|
||||
func (c *STS) DecodeAuthorizationMessage(input *DecodeAuthorizationMessageInput) (*DecodeAuthorizationMessageOutput, error) {
|
||||
|
@ -857,14 +856,13 @@ const opGetAccessKeyInfo = "GetAccessKeyInfo"
|
|||
// This method is useful when you want to inject custom logic or configuration
|
||||
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
|
||||
//
|
||||
// // Example sending a request using the GetAccessKeyInfoRequest method.
|
||||
// req, resp := client.GetAccessKeyInfoRequest(params)
|
||||
//
|
||||
// // Example sending a request using the GetAccessKeyInfoRequest method.
|
||||
// req, resp := client.GetAccessKeyInfoRequest(params)
|
||||
//
|
||||
// err := req.Send()
|
||||
// if err == nil { // resp is now filled
|
||||
// fmt.Println(resp)
|
||||
// }
|
||||
// err := req.Send()
|
||||
// if err == nil { // resp is now filled
|
||||
// fmt.Println(resp)
|
||||
// }
|
||||
//
|
||||
// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetAccessKeyInfo
|
||||
func (c *STS) GetAccessKeyInfoRequest(input *GetAccessKeyInfoInput) (req *request.Request, output *GetAccessKeyInfoOutput) {
|
||||
|
@ -954,14 +952,13 @@ const opGetCallerIdentity = "GetCallerIdentity"
|
|||
// This method is useful when you want to inject custom logic or configuration
|
||||
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
|
||||
//
|
||||
// // Example sending a request using the GetCallerIdentityRequest method.
|
||||
// req, resp := client.GetCallerIdentityRequest(params)
|
||||
//
|
||||
// // Example sending a request using the GetCallerIdentityRequest method.
|
||||
// req, resp := client.GetCallerIdentityRequest(params)
|
||||
//
|
||||
// err := req.Send()
|
||||
// if err == nil { // resp is now filled
|
||||
// fmt.Println(resp)
|
||||
// }
|
||||
// err := req.Send()
|
||||
// if err == nil { // resp is now filled
|
||||
// fmt.Println(resp)
|
||||
// }
|
||||
//
|
||||
// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity
|
||||
func (c *STS) GetCallerIdentityRequest(input *GetCallerIdentityInput) (req *request.Request, output *GetCallerIdentityOutput) {
|
||||
|
@ -1037,14 +1034,13 @@ const opGetFederationToken = "GetFederationToken"
|
|||
// This method is useful when you want to inject custom logic or configuration
|
||||
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
|
||||
//
|
||||
// // Example sending a request using the GetFederationTokenRequest method.
|
||||
// req, resp := client.GetFederationTokenRequest(params)
|
||||
//
|
||||
// // Example sending a request using the GetFederationTokenRequest method.
|
||||
// req, resp := client.GetFederationTokenRequest(params)
|
||||
//
|
||||
// err := req.Send()
|
||||
// if err == nil { // resp is now filled
|
||||
// fmt.Println(resp)
|
||||
// }
|
||||
// err := req.Send()
|
||||
// if err == nil { // resp is now filled
|
||||
// fmt.Println(resp)
|
||||
// }
|
||||
//
|
||||
// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken
|
||||
func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *request.Request, output *GetFederationTokenOutput) {
|
||||
|
@ -1094,7 +1090,7 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re
|
|||
// see IAM Best Practices (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html)
|
||||
// in the IAM User Guide.
|
||||
//
|
||||
// Session duration
|
||||
// # Session duration
|
||||
//
|
||||
// The temporary credentials are valid for the specified duration, from 900
|
||||
// seconds (15 minutes) up to a maximum of 129,600 seconds (36 hours). The default
|
||||
|
@ -1102,15 +1098,15 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re
|
|||
// by using the Amazon Web Services account root user credentials have a maximum
|
||||
// duration of 3,600 seconds (1 hour).
|
||||
//
|
||||
// Permissions
|
||||
// # Permissions
|
||||
//
|
||||
// You can use the temporary credentials created by GetFederationToken in any
|
||||
// Amazon Web Services service except the following:
|
||||
//
|
||||
// * You cannot call any IAM operations using the CLI or the Amazon Web Services
|
||||
// API.
|
||||
// - You cannot call any IAM operations using the CLI or the Amazon Web Services
|
||||
// API.
|
||||
//
|
||||
// * You cannot call any STS operations except GetCallerIdentity.
|
||||
// - You cannot call any STS operations except GetCallerIdentity.
|
||||
//
|
||||
// You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
|
||||
// to this operation. You can pass a single JSON policy document to use as an
|
||||
|
@ -1136,7 +1132,7 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re
|
|||
// by the policy. These permissions are granted in addition to the permissions
|
||||
// granted by the session policies.
|
||||
//
|
||||
// Tags
|
||||
// # Tags
|
||||
//
|
||||
// (Optional) You can pass tag key-value pairs to your session. These are called
|
||||
// session tags. For more information about session tags, see Passing Session
|
||||
|
@ -1172,31 +1168,32 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re
|
|||
// API operation GetFederationToken for usage and error information.
|
||||
//
|
||||
// Returned Error Codes:
|
||||
// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument"
|
||||
// The request was rejected because the policy document was malformed. The error
|
||||
// message describes the specific error.
|
||||
//
|
||||
// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge"
|
||||
// The request was rejected because the total packed size of the session policies
|
||||
// and session tags combined was too large. An Amazon Web Services conversion
|
||||
// compresses the session policy document, session policy ARNs, and session
|
||||
// tags into a packed binary format that has a separate limit. The error message
|
||||
// indicates by percentage how close the policies and tags are to the upper
|
||||
// size limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html)
|
||||
// in the IAM User Guide.
|
||||
// - ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument"
|
||||
// The request was rejected because the policy document was malformed. The error
|
||||
// message describes the specific error.
|
||||
//
|
||||
// You could receive this error even though you meet other defined session policy
|
||||
// and session tag limits. For more information, see IAM and STS Entity Character
|
||||
// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length)
|
||||
// in the IAM User Guide.
|
||||
// - ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge"
|
||||
// The request was rejected because the total packed size of the session policies
|
||||
// and session tags combined was too large. An Amazon Web Services conversion
|
||||
// compresses the session policy document, session policy ARNs, and session
|
||||
// tags into a packed binary format that has a separate limit. The error message
|
||||
// indicates by percentage how close the policies and tags are to the upper
|
||||
// size limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html)
|
||||
// in the IAM User Guide.
|
||||
//
|
||||
// * ErrCodeRegionDisabledException "RegionDisabledException"
|
||||
// STS is not activated in the requested region for the account that is being
|
||||
// asked to generate credentials. The account administrator must use the IAM
|
||||
// console to activate STS in that region. For more information, see Activating
|
||||
// and Deactivating Amazon Web Services STS in an Amazon Web Services Region
|
||||
// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
|
||||
// in the IAM User Guide.
|
||||
// You could receive this error even though you meet other defined session policy
|
||||
// and session tag limits. For more information, see IAM and STS Entity Character
|
||||
// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length)
|
||||
// in the IAM User Guide.
|
||||
//
|
||||
// - ErrCodeRegionDisabledException "RegionDisabledException"
|
||||
// STS is not activated in the requested region for the account that is being
|
||||
// asked to generate credentials. The account administrator must use the IAM
|
||||
// console to activate STS in that region. For more information, see Activating
|
||||
// and Deactivating Amazon Web Services STS in an Amazon Web Services Region
|
||||
// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
|
||||
// in the IAM User Guide.
|
||||
//
|
||||
// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken
|
||||
func (c *STS) GetFederationToken(input *GetFederationTokenInput) (*GetFederationTokenOutput, error) {
|
||||
|
@ -1236,14 +1233,13 @@ const opGetSessionToken = "GetSessionToken"
|
|||
// This method is useful when you want to inject custom logic or configuration
|
||||
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
|
||||
//
|
||||
// // Example sending a request using the GetSessionTokenRequest method.
|
||||
// req, resp := client.GetSessionTokenRequest(params)
|
||||
//
|
||||
// // Example sending a request using the GetSessionTokenRequest method.
|
||||
// req, resp := client.GetSessionTokenRequest(params)
|
||||
//
|
||||
// err := req.Send()
|
||||
// if err == nil { // resp is now filled
|
||||
// fmt.Println(resp)
|
||||
// }
|
||||
// err := req.Send()
|
||||
// if err == nil { // resp is now filled
|
||||
// fmt.Println(resp)
|
||||
// }
|
||||
//
|
||||
// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken
|
||||
func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request.Request, output *GetSessionTokenOutput) {
|
||||
|
@ -1285,7 +1281,7 @@ func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request.
|
|||
// see Permissions for GetSessionToken (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getsessiontoken.html)
|
||||
// in the IAM User Guide.
|
||||
//
|
||||
// Session Duration
|
||||
// # Session Duration
|
||||
//
|
||||
// The GetSessionToken operation must be called by using the long-term Amazon
|
||||
// Web Services security credentials of the Amazon Web Services account root
|
||||
|
@ -1296,15 +1292,15 @@ func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request.
|
|||
// range from 900 seconds (15 minutes) up to 3,600 seconds (1 hour), with a
|
||||
// default of 1 hour.
|
||||
//
|
||||
// Permissions
|
||||
// # Permissions
|
||||
//
|
||||
// The temporary security credentials created by GetSessionToken can be used
|
||||
// to make API calls to any Amazon Web Services service with the following exceptions:
|
||||
//
|
||||
// * You cannot call any IAM API operations unless MFA authentication information
|
||||
// is included in the request.
|
||||
// - You cannot call any IAM API operations unless MFA authentication information
|
||||
// is included in the request.
|
||||
//
|
||||
// * You cannot call any STS API except AssumeRole or GetCallerIdentity.
|
||||
// - You cannot call any STS API except AssumeRole or GetCallerIdentity.
|
||||
//
|
||||
// We recommend that you do not call GetSessionToken with Amazon Web Services
|
||||
// account root user credentials. Instead, follow our best practices (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#create-iam-users)
|
||||
|
@ -1330,13 +1326,13 @@ func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request.
|
|||
// API operation GetSessionToken for usage and error information.
|
||||
//
|
||||
// Returned Error Codes:
|
||||
// * ErrCodeRegionDisabledException "RegionDisabledException"
|
||||
// STS is not activated in the requested region for the account that is being
|
||||
// asked to generate credentials. The account administrator must use the IAM
|
||||
// console to activate STS in that region. For more information, see Activating
|
||||
// and Deactivating Amazon Web Services STS in an Amazon Web Services Region
|
||||
// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
|
||||
// in the IAM User Guide.
|
||||
// - ErrCodeRegionDisabledException "RegionDisabledException"
|
||||
// STS is not activated in the requested region for the account that is being
|
||||
// asked to generate credentials. The account administrator must use the IAM
|
||||
// console to activate STS in that region. For more information, see Activating
|
||||
// and Deactivating Amazon Web Services STS in an Amazon Web Services Region
|
||||
// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
|
||||
// in the IAM User Guide.
|
||||
//
|
||||
// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken
|
||||
func (c *STS) GetSessionToken(input *GetSessionTokenInput) (*GetSessionTokenOutput, error) {
|
||||
|
|
2
vendor/github.com/aws/aws-sdk-go/service/sts/doc.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/service/sts/doc.go
generated
vendored
|
@ -14,7 +14,7 @@
|
|||
// See sts package documentation for more information.
|
||||
// https://docs.aws.amazon.com/sdk-for-go/api/service/sts/
|
||||
//
|
||||
// Using the Client
|
||||
// # Using the Client
|
||||
//
|
||||
// To contact AWS Security Token Service with the SDK use the New function to create
|
||||
// a new service client. With that client you can make API requests to the service.
|
||||
|
|
11
vendor/github.com/aws/aws-sdk-go/service/sts/service.go
generated
vendored
11
vendor/github.com/aws/aws-sdk-go/service/sts/service.go
generated
vendored
|
@ -39,13 +39,14 @@ const (
|
|||
// aws.Config parameter to add your extra config.
|
||||
//
|
||||
// Example:
|
||||
// mySession := session.Must(session.NewSession())
|
||||
//
|
||||
// // Create a STS client from just a session.
|
||||
// svc := sts.New(mySession)
|
||||
// mySession := session.Must(session.NewSession())
|
||||
//
|
||||
// // Create a STS client with additional configuration
|
||||
// svc := sts.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
|
||||
// // Create a STS client from just a session.
|
||||
// svc := sts.New(mySession)
|
||||
//
|
||||
// // Create a STS client with additional configuration
|
||||
// svc := sts.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
|
||||
func New(p client.ConfigProvider, cfgs ...*aws.Config) *STS {
|
||||
c := p.ClientConfig(EndpointsID, cfgs...)
|
||||
if c.SigningNameDerived || len(c.SigningName) == 0 {
|
||||
|
|
46
vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go
generated
vendored
46
vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go
generated
vendored
|
@ -23,37 +23,37 @@ import (
|
|||
// can be stubbed out for unit testing your code with the SDK without needing
|
||||
// to inject custom request handlers into the SDK's request pipeline.
|
||||
//
|
||||
// // myFunc uses an SDK service client to make a request to
|
||||
// // AWS Security Token Service.
|
||||
// func myFunc(svc stsiface.STSAPI) bool {
|
||||
// // Make svc.AssumeRole request
|
||||
// }
|
||||
// // myFunc uses an SDK service client to make a request to
|
||||
// // AWS Security Token Service.
|
||||
// func myFunc(svc stsiface.STSAPI) bool {
|
||||
// // Make svc.AssumeRole request
|
||||
// }
|
||||
//
|
||||
// func main() {
|
||||
// sess := session.New()
|
||||
// svc := sts.New(sess)
|
||||
// func main() {
|
||||
// sess := session.New()
|
||||
// svc := sts.New(sess)
|
||||
//
|
||||
// myFunc(svc)
|
||||
// }
|
||||
// myFunc(svc)
|
||||
// }
|
||||
//
|
||||
// In your _test.go file:
|
||||
//
|
||||
// // Define a mock struct to be used in your unit tests of myFunc.
|
||||
// type mockSTSClient struct {
|
||||
// stsiface.STSAPI
|
||||
// }
|
||||
// func (m *mockSTSClient) AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) {
|
||||
// // mock response/functionality
|
||||
// }
|
||||
// // Define a mock struct to be used in your unit tests of myFunc.
|
||||
// type mockSTSClient struct {
|
||||
// stsiface.STSAPI
|
||||
// }
|
||||
// func (m *mockSTSClient) AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) {
|
||||
// // mock response/functionality
|
||||
// }
|
||||
//
|
||||
// func TestMyFunc(t *testing.T) {
|
||||
// // Setup Test
|
||||
// mockSvc := &mockSTSClient{}
|
||||
// func TestMyFunc(t *testing.T) {
|
||||
// // Setup Test
|
||||
// mockSvc := &mockSTSClient{}
|
||||
//
|
||||
// myfunc(mockSvc)
|
||||
// myfunc(mockSvc)
|
||||
//
|
||||
// // Verify myFunc's functionality
|
||||
// }
|
||||
// // Verify myFunc's functionality
|
||||
// }
|
||||
//
|
||||
// It is important to note that this interface will have breaking changes
|
||||
// when the service model is updated and adds new API operations, paginators,
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue